Skip to content

Commit

Permalink
Update install script
Browse files Browse the repository at this point in the history
  • Loading branch information
Yevhen Ivantsov committed Oct 30, 2023
1 parent c32a204 commit 208d149
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 11 deletions.
10 changes: 5 additions & 5 deletions config.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ max_cluster_capacity = 5
#additional_namespaces = ["extra_namespace"]

# Path to a JSON file with EBS and RDS snapshot IDs
#snapshots_json_file_path = "test/dcapt-snapshots.json"
# snapshots_json_file_path = "test/dcapt-snapshots.json"

################################################################################
# Osquery settings. Atlassian only!
Expand Down Expand Up @@ -219,7 +219,7 @@ jira_db_iops = 1000
jira_db_name = "jira"

# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large
# jira_dataset_size = "small"
# jira_dataset_size = "large"

# Database restore configuration
# If you want to restore the database from a snapshot, uncomment the following line and provide the snapshot identifier.
Expand Down Expand Up @@ -317,7 +317,7 @@ confluence_db_iops = 1000
confluence_db_name = "confluence"

# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large
# confluence_dataset_size = "small"
# confluence_dataset_size = "large"

# Database restore configuration
# If you want to restore the database from a snapshot, uncomment the following lines and provide the snapshot identifier.
Expand Down Expand Up @@ -430,7 +430,7 @@ bitbucket_db_name = "bitbucket"
#bitbucket_elasticsearch_replicas = "<NUMBER_OF_NODES>"

# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large
# bitbucket_dataset_size = "small"
# bitbucket_dataset_size = "large"

# Dataset Restore

Expand Down Expand Up @@ -603,7 +603,7 @@ crowd_db_name = "crowd"
#crowd_termination_grace_period = 0

# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large
# crowd_dataset_size = "small"
# crowd_dataset_size = "large"

# Dataset Restore

Expand Down
9 changes: 7 additions & 2 deletions install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -114,9 +114,14 @@ pre_flight_checks() {
PRODUCT_VERSION=$(get_variable ${PRODUCT_VERSION_VAR} "${CONFIG_ABS_PATH}")
MAJOR_MINOR_VERSION=$(echo "$PRODUCT_VERSION" | cut -d '.' -f1-2)
EBS_SNAPSHOT_ID=$(get_variable ${SHARED_HOME_SNAPSHOT_VAR} "${CONFIG_ABS_PATH}")
DATASET_SIZE=$(get_variable ${PRODUCT}_dataset_size "${CONFIG_ABS_PATH}")
if [ -z "$DATASET_SIZE" ]; then
DATASET_SIZE="large"
fi
log "Dataset size is ${DATASET_SIZE}"
SNAPSHOTS_JSON_FILE_PATH=$(get_variable 'snapshots_json_file_path' "${CONFIG_ABS_PATH}")
if [ "${SNAPSHOTS_JSON_FILE_PATH}" ]; then
EBS_SNAPSHOT_ID=$(cat ${SNAPSHOTS_JSON_FILE_PATH} | jq ".${PRODUCT}.versions[] | select(.version == \"${PRODUCT_VERSION}\") | .data[] | select(.size == \"large\" and .type == \"ebs\") | .snapshots[] | .[\"${REGION}\"]" | sed 's/"//g')
EBS_SNAPSHOT_ID=$(cat ${SNAPSHOTS_JSON_FILE_PATH} | jq ".${PRODUCT}.versions[] | select(.version == \"${PRODUCT_VERSION}\") | .data[] | select(.size == \"${DATASET_SIZE}\" and .type == \"ebs\") | .snapshots[] | .[\"${REGION}\"]" | sed 's/"//g')
fi
if [ ! -z ${EBS_SNAPSHOT_ID} ]; then
log "Checking EBS snapshot ${EBS_SNAPSHOT_ID} compatibility with ${PRODUCT} version ${PRODUCT_VERSION}"
Expand Down Expand Up @@ -152,7 +157,7 @@ pre_flight_checks() {
fi
RDS_SNAPSHOT_ID=$(get_variable ${RDS_SNAPSHOT_VAR} "${CONFIG_ABS_PATH}")
if [ "${SNAPSHOTS_JSON_FILE_PATH}" ]; then
RDS_SNAPSHOT_ID=$(cat ${SNAPSHOTS_JSON_FILE_PATH} | jq ".${PRODUCT}.versions[] | select(.version == \"${PRODUCT_VERSION}\") | .data[] | select(.size == \"large\" and .type == \"rds\") | .snapshots[] | .[\"${REGION}\"]" | sed 's/"//g')
RDS_SNAPSHOT_ID=$(cat ${SNAPSHOTS_JSON_FILE_PATH} | jq ".${PRODUCT}.versions[] | select(.version == \"${PRODUCT_VERSION}\") | .data[] | select(.size == \"${DATASET_SIZE}\" and .type == \"rds\") | .snapshots[] | .[\"${REGION}\"]" | sed 's/"//g')
fi
if [ ! -z ${RDS_SNAPSHOT_ID} ]; then
log "Checking RDS snapshot ${RDS_SNAPSHOT_ID} compatibility with ${PRODUCT} version ${PRODUCT_VERSION}"
Expand Down
8 changes: 4 additions & 4 deletions variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ variable "jira_dataset_size" {
type = string
default = "large"
validation {
condition = var.jira_dataset_size == null || can(regex("large|small", var.jira_dataset_size))
condition = var.jira_dataset_size == null || contains(["large", "small"], var.jira_dataset_size)
error_message = "Invalid dataset size. Expected values are: small, large"
}
}
Expand Down Expand Up @@ -653,7 +653,7 @@ variable "confluence_dataset_size" {
type = string
default = "large"
validation {
condition = var.confluence_dataset_size == null || can(regex("large|small", var.confluence_dataset_size))
condition = var.confluence_dataset_size == null || contains(["large", "small"], var.confluence_dataset_size)
error_message = "Invalid dataset size. Expected values are: small, large"
}
}
Expand Down Expand Up @@ -909,7 +909,7 @@ variable "bitbucket_dataset_size" {
type = string
default = "large"
validation {
condition = var.bitbucket_dataset_size == null || can(regex("large|small", var.bitbucket_dataset_size))
condition = var.bitbucket_dataset_size == null || contains(["large", "small"], var.bitbucket_dataset_size)
error_message = "Invalid dataset size. Expected values are: small, large"
}
}
Expand Down Expand Up @@ -1357,7 +1357,7 @@ variable "crowd_dataset_size" {
type = string
default = "large"
validation {
condition = var.crowd_dataset_size == null || can(regex("large|small", var.crowd_dataset_size))
condition = var.crowd_dataset_size == null || contains(["large", "small"], var.crowd_dataset_size)
error_message = "Invalid dataset size. Expected values are: small, large"
}
}
Expand Down

0 comments on commit 208d149

Please sign in to comment.