Skip to content

Commit

Permalink
Merge pull request #165 from junior/feature/use_existent_oke
Browse files Browse the repository at this point in the history
Feature/use existent oke
  • Loading branch information
junior authored Jun 4, 2020
2 parents 457ee44 + 20191b2 commit 252a14b
Show file tree
Hide file tree
Showing 8 changed files with 111 additions and 51 deletions.
2 changes: 1 addition & 1 deletion deploy/complete/terraform/VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
2.0.1
2.0.2
4 changes: 2 additions & 2 deletions deploy/complete/terraform/datasources.tf
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ data "oci_identity_availability_domains" "ADs" {

# Gets kubeconfig
data "oci_containerengine_cluster_kube_config" "oke_cluster_kube_config" {
cluster_id = oci_containerengine_cluster.oke_mushop_cluster.id
cluster_id = var.create_new_oke_cluster ? oci_containerengine_cluster.oke_mushop_cluster[0].id : var.existent_oke_cluster_id
}


Expand All @@ -37,7 +37,7 @@ locals {
## Kubernetes Service: mushop-utils-ingress-nginx-controller
data "kubernetes_service" "mushop_ingress" {
metadata {
name = "mushop-utils-ingress-nginx-controller" # mushop-utils included to be backwards compatible to the docs and setup chart install
name = "mushop-utils-ingress-nginx-controller" # mushop-utils name included to be backwards compatible to the docs and setup chart install
namespace = kubernetes_namespace.mushop_utilities_namespace.id
}
depends_on = [helm_release.ingress-nginx]
Expand Down
46 changes: 29 additions & 17 deletions deploy/complete/terraform/oke-network.tf
Original file line number Diff line number Diff line change
Expand Up @@ -7,79 +7,91 @@ resource "oci_core_virtual_network" "oke_mushop_vcn" {
compartment_id = var.compartment_ocid
display_name = "OKE MuShop VCN - ${random_string.deploy_id.result}"
dns_label = "oke${random_string.deploy_id.result}"

count = var.create_new_oke_cluster ? 1 : 0
}

resource "oci_core_subnet" "oke_mushop_subnet" {
cidr_block = lookup(var.network_cidrs, "SUBNET-REGIONAL-CIDR")
compartment_id = var.compartment_ocid
display_name = "oke-mushop-subnet-${random_string.deploy_id.result}"
dns_label = "okesubnet${random_string.deploy_id.result}"
vcn_id = oci_core_virtual_network.oke_mushop_vcn.id
vcn_id = oci_core_virtual_network.oke_mushop_vcn[0].id
prohibit_public_ip_on_vnic = (var.cluster_visibility == "Private") ? true : false
route_table_id = oci_core_route_table.oke_mushop_route_table.id
dhcp_options_id = oci_core_virtual_network.oke_mushop_vcn.default_dhcp_options_id
security_list_ids = [oci_core_security_list.oke_mushop_security_list.id]
route_table_id = oci_core_route_table.oke_mushop_route_table[0].id
dhcp_options_id = oci_core_virtual_network.oke_mushop_vcn[0].default_dhcp_options_id
security_list_ids = [oci_core_security_list.oke_mushop_security_list[0].id]

count = var.create_new_oke_cluster ? 1 : 0
}

resource "oci_core_subnet" "oke_mushop_lb_subnet" {
cidr_block = lookup(var.network_cidrs, "LB-SUBNET-REGIONAL-CIDR")
compartment_id = var.compartment_ocid
display_name = "oke-mushop-lb-subnet-${random_string.deploy_id.result}"
dns_label = "okelbsubnet${random_string.deploy_id.result}"
vcn_id = oci_core_virtual_network.oke_mushop_vcn.id
vcn_id = oci_core_virtual_network.oke_mushop_vcn[0].id
prohibit_public_ip_on_vnic = false
route_table_id = oci_core_route_table.oke_mushop_lb_route_table.id
dhcp_options_id = oci_core_virtual_network.oke_mushop_vcn.default_dhcp_options_id
security_list_ids = [oci_core_security_list.oke_mushop_lb_security_list.id]
route_table_id = oci_core_route_table.oke_mushop_lb_route_table[0].id
dhcp_options_id = oci_core_virtual_network.oke_mushop_vcn[0].default_dhcp_options_id
security_list_ids = [oci_core_security_list.oke_mushop_lb_security_list[0].id]

count = var.create_new_oke_cluster ? 1 : 0
}

resource "oci_core_route_table" "oke_mushop_route_table" {
compartment_id = var.compartment_ocid
vcn_id = oci_core_virtual_network.oke_mushop_vcn.id
vcn_id = oci_core_virtual_network.oke_mushop_vcn[0].id
display_name = "oke-mushop-route-table-${random_string.deploy_id.result}"

route_rules {
destination = lookup(var.network_cidrs, "ALL-CIDR")
destination_type = "CIDR_BLOCK"
network_entity_id = (var.cluster_visibility == "Private") ? oci_core_nat_gateway.oke_mushop_nat_gateway[0].id : oci_core_internet_gateway.oke_mushop_internet_gateway.id
network_entity_id = (var.cluster_visibility == "Private") ? oci_core_nat_gateway.oke_mushop_nat_gateway[0].id : oci_core_internet_gateway.oke_mushop_internet_gateway[0].id
}

count = var.create_new_oke_cluster ? 1 : 0
}

resource "oci_core_route_table" "oke_mushop_lb_route_table" {
compartment_id = var.compartment_ocid
vcn_id = oci_core_virtual_network.oke_mushop_vcn.id
vcn_id = oci_core_virtual_network.oke_mushop_vcn[0].id
display_name = "oke-mushop-lb-route-table-${random_string.deploy_id.result}"

route_rules {
destination = lookup(var.network_cidrs, "ALL-CIDR")
destination_type = "CIDR_BLOCK"
network_entity_id = oci_core_internet_gateway.oke_mushop_internet_gateway.id
network_entity_id = oci_core_internet_gateway.oke_mushop_internet_gateway[0].id
}

count = var.create_new_oke_cluster ? 1 : 0
}

resource "oci_core_nat_gateway" "oke_mushop_nat_gateway" {
block_traffic = "false"
compartment_id = var.compartment_ocid
display_name = "oke-mushop-nat-gateway-${random_string.deploy_id.result}"
vcn_id = oci_core_virtual_network.oke_mushop_vcn.id
vcn_id = oci_core_virtual_network.oke_mushop_vcn[0].id

count = (var.cluster_visibility == "Private") ? 1 : 0
count = var.create_new_oke_cluster ? ((var.cluster_visibility == "Private") ? 1 : 0) : 0
}

resource "oci_core_internet_gateway" "oke_mushop_internet_gateway" {
compartment_id = var.compartment_ocid
display_name = "oke-mushop-internet-gateway-${random_string.deploy_id.result}"
enabled = true
vcn_id = oci_core_virtual_network.oke_mushop_vcn.id
vcn_id = oci_core_virtual_network.oke_mushop_vcn[0].id

count = var.create_new_oke_cluster ? 1 : 0
}

resource "oci_core_service_gateway" "oke_mushop_service_gateway" {
compartment_id = var.compartment_ocid
display_name = "oke-mushop-service-gateway-${random_string.deploy_id.result}"
vcn_id = oci_core_virtual_network.oke_mushop_vcn.id
vcn_id = oci_core_virtual_network.oke_mushop_vcn[0].id
services {
service_id = lookup(data.oci_core_services.all_services.services[0], "id")
}

count = var.mushop_mock_mode_all ? 0 : 1
count = var.create_new_oke_cluster ? (var.mushop_mock_mode_all ? 0 : 1) : 0
}
7 changes: 5 additions & 2 deletions deploy/complete/terraform/oke-securitylists.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
resource oci_core_security_list oke_mushop_security_list {
compartment_id = var.compartment_ocid
display_name = "oke-mushop-wkr-seclist-${random_string.deploy_id.result}"
vcn_id = oci_core_virtual_network.oke_mushop_vcn.id
vcn_id = oci_core_virtual_network.oke_mushop_vcn[0].id

egress_security_rules {
destination = lookup(var.network_cidrs, "SUBNET-REGIONAL-CIDR")
Expand Down Expand Up @@ -46,12 +46,13 @@ resource oci_core_security_list oke_mushop_security_list {
}
}

count = var.create_new_oke_cluster ? 1 : 0
}

resource oci_core_security_list oke_mushop_lb_security_list {
compartment_id = var.compartment_ocid
display_name = "oke-mushop-wkr-lb-seclist-${random_string.deploy_id.result}"
vcn_id = oci_core_virtual_network.oke_mushop_vcn.id
vcn_id = oci_core_virtual_network.oke_mushop_vcn[0].id

egress_security_rules {
destination = lookup(var.network_cidrs, "ALL-CIDR")
Expand All @@ -66,4 +67,6 @@ resource oci_core_security_list oke_mushop_lb_security_list {
protocol = "6"
stateless = true
}

count = var.create_new_oke_cluster ? 1 : 0
}
12 changes: 8 additions & 4 deletions deploy/complete/terraform/oke.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ resource "oci_containerengine_cluster" "oke_mushop_cluster" {
compartment_id = var.compartment_ocid
kubernetes_version = var.k8s_version
name = "${var.cluster_name}-${random_string.deploy_id.result}"
vcn_id = oci_core_virtual_network.oke_mushop_vcn.id
vcn_id = oci_core_virtual_network.oke_mushop_vcn[0].id

options {
service_lb_subnet_ids = [oci_core_subnet.oke_mushop_lb_subnet.id]
service_lb_subnet_ids = [oci_core_subnet.oke_mushop_lb_subnet[0].id]
add_ons {
is_kubernetes_dashboard_enabled = var.cluster_options_add_ons_is_kubernetes_dashboard_enabled
is_tiller_enabled = false # Default is false, left here for reference
Expand All @@ -18,10 +18,12 @@ resource "oci_containerengine_cluster" "oke_mushop_cluster" {
is_pod_security_policy_enabled = var.cluster_options_admission_controller_options_is_pod_security_policy_enabled
}
}

count = var.create_new_oke_cluster ? 1 : 0
}

resource "oci_containerengine_node_pool" "oke_mushop_node_pool" {
cluster_id = oci_containerengine_cluster.oke_mushop_cluster.id
cluster_id = oci_containerengine_cluster.oke_mushop_cluster[0].id
compartment_id = var.compartment_ocid
kubernetes_version = var.k8s_version
name = var.node_pool_name
Expand All @@ -34,7 +36,7 @@ resource "oci_containerengine_node_pool" "oke_mushop_node_pool" {

content {
availability_domain = placement_configs.value.name
subnet_id = oci_core_subnet.oke_mushop_subnet.id
subnet_id = oci_core_subnet.oke_mushop_subnet[0].id
}
}
size = var.num_pool_workers
Expand All @@ -50,6 +52,8 @@ resource "oci_containerengine_node_pool" "oke_mushop_node_pool" {
key = "name"
value = var.node_pool_name
}

count = var.create_new_oke_cluster ? 1 : 0
}

# Local kubeconfig for when using Terraform locally. Not used by Oracle Resource Manager
Expand Down
72 changes: 51 additions & 21 deletions deploy/complete/terraform/schema.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,18 +20,22 @@ variableGroups:
- tenancy_ocid
- region
visible: false
- title: "General Configuration"
- title: "OKE General Configuration"
variables:
- create_new_oke_cluster
- existent_oke_cluster_id
- cluster_name
- k8s_version
- cluster_visibility
visible: true
- title: "Worker Nodes"
- title: "OKE Worker Nodes"
variables:
- node_pool_shape
- num_pool_workers
- node_pool_name
visible: true
visible: #($create_new_oke_cluster = true)
and:
- create_new_oke_cluster
- title: "Add Ons"
variables:
- cluster_options_add_ons_is_kubernetes_dashboard_enabled
Expand Down Expand Up @@ -99,11 +103,28 @@ variables:
description: "The compartment in which to create compute instance(s)"
required: true

create_new_oke_cluster:
type: boolean
title: "Create new OKE Cluster"
description: "Creates a new OKE cluster, node pool and network resources"

existent_oke_cluster_id:
type: string
title: "OKE Cluster id"
description: "Cluster Id of the existent OKE"
required: true
visible: #($create_new_oke_cluster = false)
not:
- create_new_oke_cluster

cluster_name:
type: string
title: "Cluster Name Prefix"
description: "OKE cluster name prefix"
required: true
visible: #($create_new_oke_cluster = true)
and:
- create_new_oke_cluster

k8s_version:
type: enum
Expand All @@ -113,6 +134,9 @@ variables:
title: "Kubernetes Version"
description: "Kubernetes version installed on your master and worker nodes"
required: true
visible: #($create_new_oke_cluster = true)
and:
- create_new_oke_cluster

cluster_visibility:
type: enum
Expand All @@ -122,6 +146,9 @@ variables:
title: "Choose visibility type"
description: "The Kubernetes worker nodes that are created will be hosted in public or private subnet(s)"
required: true
visible: #($create_new_oke_cluster = true)
and:
- create_new_oke_cluster

node_pool_shape:
type: oci:core:instanceshape:name
Expand All @@ -140,18 +167,21 @@ variables:
required: true

node_pool_name:
visible: #($show_advanced == ""Yes"")
eq:
- show_advanced
- "Yes"
type: string
title: "Node Pool Name"
description: "Name of the node pool"
required: true
visible: #($show_advanced == ""Yes"")
eq:
- show_advanced
- "Yes"

cluster_options_add_ons_is_kubernetes_dashboard_enabled:
type: boolean
title: "Kubernetes Dashboard Enabled"
visible: #($create_new_oke_cluster = true)
and:
- create_new_oke_cluster

# Advanced Options
show_advanced:
Expand All @@ -164,48 +194,48 @@ variables:
required: true

generate_public_ssh_key:
type: boolean
title: "Auto generate public ssh key?"
required: true
visible: #($show_advanced == ""Yes"")
eq:
- show_advanced
- "Yes"
type: boolean
title: "Auto generate public ssh key?"
required: true

public_ssh_key:
type: string
title: "Input SSH public key"
description: "In order to access your private nodes with a public SSH key you will need to set up a bastion host (a.k.a. jump box). If using public nodes, bastion is not needed. Left blank to not import keys."
required: false
visible: #($show_advanced == ""Yes"") + ($generate_public_ssh_key == "false")
and:
- eq:
- show_advanced
- "Yes"
- not:
- generate_public_ssh_key
type: string
title: "Input SSH public key"
description: "In order to access your private nodes with a public SSH key you will need to set up a bastion host (a.k.a. jump box). If using public nodes, bastion is not needed. Left blank to not import keys."
required: false

image_operating_system:
visible: #($show_advanced == ""Yes"")
eq:
- show_advanced
- "Yes"
type: enum
title: "Image OS"
description: "The OS/image installed on all nodes in the node pool."
enum:
- "Oracle Linux"
required: true

image_operating_system_version:
visible: #($show_advanced == ""Yes"")
eq:
- show_advanced
- "Yes"

image_operating_system_version:
type: string
required: true
title: "Image OS Version"
description: "The OS/image version installed on all nodes in the node pool."
description: "The OS/image version installed on all nodes in the node pool."
visible: #($show_advanced == ""Yes"")
eq:
- show_advanced
- "Yes"

mushop_mock_mode_all:
visible: yes
Expand Down
7 changes: 6 additions & 1 deletion deploy/complete/terraform/terraform.tfvars.example
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,17 @@ user_ocid = "" # e.g.: "ocid1.user..." or leave blank if using CloudShell
# region
region = "us-ashburn-1"

# cluster_visibility
# OKE Cluster
## cluster_visibility
cluster_visibility = "Private"
## create_new_oke_cluster
create_new_oke_cluster = true
existent_oke_cluster_id = "" # e.g.: ocid1.cluster.oc1.i...

# public_ssh_key
generate_public_ssh_key = true # if true, auto generate public and private keys and assign to the node pool.
public_ssh_key = "" # if generate_public_ssh_key=true, public_ssh_key is ignored. if generate_public_ssh_key=false, assign public_ssh_key, that can be nothing if ""

# MuShop
## Enable Mock Mode
mushop_mock_mode_all = false # Set to true if do not want to provision ATP, ObjectStorage and mock all services
Loading

0 comments on commit 252a14b

Please sign in to comment.