Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updated cert-manager to automatically create a ServiceAccount #136

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions addons/cert-manager/data.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
data "aws_eks_cluster" "eks_cluster" {
# this makes downstream resources wait for data plane to be ready
name = var.eks_cluster_name
}
54 changes: 54 additions & 0 deletions addons/cert-manager/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,58 @@ module "helm_addon" {
manage_via_gitops = var.manage_via_gitops
helm_config = local.helm_config
addon_context = var.addon_context


set_values = [
{
name = "serviceAccount.create"
value = "false"
},
{
name = "serviceAccount.name"
value = "${local.name}-sa"
}
]

# -- IRSA Configurations
irsa_config = {
irsa_iam_policies = [aws_iam_policy.policy.arn]
irsa_iam_role_name = "${local.name}-${var.eks_cluster_name}"
create_kubernetes_service_account = true
kubernetes_service_account = "${local.name}-sa"
kubernetes_namespace = local.default_helm_config.namespace
eks_oidc_provider_arn = replace(data.aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer, "https://", "")
account_id = var.account_id
}
}

resource "aws_iam_policy" "policy" {
name = "${local.name}-${var.eks_cluster_name}"
path = "/"
description = "IAM Policy used by ${local.name}-${var.eks_cluster_name} IAM Role"
policy = var.iampolicy_json_content != null ? var.iampolicy_json_content : <<-EOT
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets"
],
"Resource": ${jsonencode(length(try(var.certification_manager_extra_configs.hosted_zone_ids, [])) > 0 ? [for id in var.certification_manager_extra_configs.hosted_zone_ids : "arn:aws:route53:::hostedzone/${id}"] : ["arn:aws:route53:::hostedzone/*"])}
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones",
"route53:ListResourceRecordSets",
"route53:ListTagsForResource"
],
"Resource": [
"*"
]
}
]
}
EOT
}
16 changes: 16 additions & 0 deletions addons/cert-manager/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -29,4 +29,20 @@ variable "certification_manager_extra_configs" {
description = "Override attributes of helm_release terraform resource"
type = any
default = {}
}

variable "eks_cluster_name" {
type = string
default = ""
}

variable "account_id" {
type = string
default = ""
}

variable "iampolicy_json_content" {
description = "Custom IAM Policy for cert manager IRSA"
type = string
default = null
}
101 changes: 51 additions & 50 deletions examples/complete/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -33,76 +33,77 @@ module "vpc" {
# AWS EKS
###############################################################################


module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "20.33.1"

cluster_name = "${local.name}-cluster"
cluster_version = local.cluster_version
cluster_endpoint_public_access = true
# cluster_endpoint_private_access = true

cluster_ip_family = "ipv4"

# Set this to true if AmazonEKS_CNI_IPv6_Policy policy is not available
create_cni_ipv6_iam_policy = false

cluster_addons = {
vpc-cni = {
most_recent = true
before_compute = true
configuration_values = jsonencode({
env = {
ENABLE_PREFIX_DELEGATION = "true"
WARM_PREFIX_TARGET = "1"
}
})
}
}
source = "clouddrove/eks/aws"
version = "1.4.2"
enabled = true


name = local.name
kubernetes_version = "1.31"
endpoint_public_access = true


vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets

# manage_aws_auth_configmap = true
# create_aws_auth_configmap = true
allowed_cidr_blocks = [local.vpc_cidr]

eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64"
instance_types = ["t3.medium"]
disk_size = 20
iam_role_attach_cni_policy = true
use_custom_launch_template = false

# AWS Managed Node Group
# Node Groups Defaults Values It will Work all Node Groups
managed_node_group_defaults = {
iam_role_additional_policies = {
policy_arn = aws_iam_policy.node_additional.arn
}
tags = {
"kubernetes.io/cluster/${module.eks.cluster_name}" = "shared"
"karpenter.sh/discovery/${module.eks.cluster_name}" = module.eks.cluster_name
}
block_device_mappings = {
xvda = {
device_name = "/dev/xvda"
ebs = {
volume_size = 50
volume_type = "gp3"
iops = 3000
throughput = 150
encrypted = true
}
}
}
}

eks_managed_node_groups = {
managed_node_group = {
critical = {
name = "critical"
instance_types = ["t3.medium"]
use_name_prefix = false
capacity_type = "ON_DEMAND"
min_size = 1
max_size = 2
desired_size = 1
name = "${module.eks.cluster_name}-critical"
capacity_type = "ON_DEMAND"
min_size = 1
max_size = 2
desired_size = 2
instance_types = ["t3.medium"]
}

application = {
name = "application"
instance_types = ["t3.medium"]
use_name_prefix = false
capacity_type = "SPOT"
min_size = 0
max_size = 1
desired_size = 0
name = "${module.eks.cluster_name}-application"
capacity_type = "SPOT"
min_size = 1
max_size = 2
desired_size = 1
force_update_version = true
instance_types = ["t3.medium"]
}
}
tags = local.tags

apply_config_map_aws_auth = true
map_additional_iam_users = [
{
userarn = "arn:aws:iam::123456789:user/[email protected]"
username = "[email protected]"
groups = ["system:masters"]
}]
addons = []
tags = local.tags
}

################################################################################
Expand Down
14 changes: 3 additions & 11 deletions examples/complete/providers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -8,23 +8,15 @@ provider "aws" {

provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_cluster.certificate_authority[0].data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
args = ["eks", "get-token", "--cluster-name", data.aws_eks_cluster.eks_cluster.name]
command = "aws"
}
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.eks_cluster.token
}

provider "helm" {
kubernetes {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
args = ["eks", "get-token", "--cluster-name", data.aws_eks_cluster.eks_cluster.name]
command = "aws"
}
token = data.aws_eks_cluster_auth.eks_cluster.token
}
}

Expand Down
2 changes: 2 additions & 0 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,8 @@ module "certification_manager" {
helm_config = var.certification_manager_helm_config != null ? var.certification_manager_helm_config : { values = [local_file.certification_manager_helm_config[count.index].content] }
manage_via_gitops = var.manage_via_gitops
addon_context = local.addon_context
eks_cluster_name = data.aws_eks_cluster.eks_cluster.name
account_id = data.aws_caller_identity.current.account_id
certification_manager_extra_configs = var.certification_manager_extra_configs
}

Expand Down