diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml deleted file mode 100644 index b8a5c64d..00000000 --- a/.github/workflows/cleanup-imagetags.yaml +++ /dev/null @@ -1,95 +0,0 @@ -name: Scheduled cleanup unused images -on: - workflow_dispatch: - schedule: - - cron: '0 0 * * 0' -# At 00:00 on Sunday. - -jobs: - - cleanup-images: - runs-on: ubuntu-latest - steps: - - - name: Log in to Openshift - uses: redhat-actions/oc-login@v1.3 - with: - openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} - openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} - insecure_skip_tls_verify: true - namespace: ${{ env.TOOLS_NAMESPACE }} - - - name: cleanup-images - continue-on-error: true - run: | - - #!/bin/bash - - # This script will delete all image tags for both frontend and backend except the one being referenced - - # The sample of search_string is cthub-backend:0.2.0-20240403221450 - # The sample of oc_output could include - # cthub-backend:0.2.0-20240403210040 - # cthub-backend:0.2.0-20240403211844 - # cthub-backend:0.2.0-20240403221450 - # The script will remove the first two image tags - - delete_resources() { - local search_string="$1" - local oc_output="$2" - local namepace="$3" - - # Check if the oc_output is empty - if [ -z "$oc_output" ]; then - echo "Error: No output provided." - return 1 - fi - - # Loop through each line in the oc output - while IFS= read -r line; do - # Check if the line contains the search string - if [[ "$line" != *"$search_string"* ]]; then - # Extract the name of the resource from the line - resource_name=$(echo "$line" | awk '{print $1}') - # Delete the resource - oc -n "$namepace" delete imagetag/"$resource_name" - fi - done <<< "$oc_output" - } - - # Define the search string - search_string=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev describe deploymentconfig/cthub-dev-backend | grep Image | awk -F '/' '{print $NF}') - # Run the oc command and store the output in a variable - oc_output=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get imagetags | grep cthub-backend | awk '{print $1}') - namespace="${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev" - echo "Will delete all cthub-bakcend image tags in ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev except $search_string" - delete_resources "$search_string" "$oc_output" "$namespace" - - # Define the search string - search_string=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev describe deployment/cthub-dev-frontend | grep Image | awk -F '/' '{print $NF}') - # Run the oc command and store the output in a variable - oc_output=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get imagetags | grep cthub-frontend | awk '{print $1}') - echo "Will delete all cthub-frontend image tags in ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev except $search_string" - namespace="${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev" - delete_resources "$search_string" "$oc_output" "$namespace" - - echo "will delete images in tools env" - frontendimages=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}') - if [ ! -z "$frontendimages" ]; then - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag - fi - - backendimages=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}') - if [ ! -z "$backendimages" ]; then - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag - fi - - echo "Cleaning up Completed pods on Dev except CrunchyDB pods" - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get pods | grep Completed | grep -v crunchy | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev delete pod || true - - echo "Cleaning up Complete and Failed builds on Tools" - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get builds | grep Complete | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete build || true - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get builds | grep Failed | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete build || true - - echo "Cleaning up buildconfigs on Tools" - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get buildconfig | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete buildconfig || true diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml index a87a2fc4..9a27a20b 100644 --- a/.github/workflows/dev-ci.yaml +++ b/.github/workflows/dev-ci.yaml @@ -1,30 +1,47 @@ ## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly -name: CTHUB 0.4.0 Dev CI +name: CTHUB 0.5.0 Dev CI on: push: - branches: [ release-0.4.0 ] + branches: [release-0.5.0] # paths: # - frontend/** # - backend/** workflow_dispatch: env: - VERSION: 0.4.0 - GIT_URL: https://github.com/bcgov/cthub.git + GIT_URL: https://github.com/bcgov/cthub.git TOOLS_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools DEV_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev +permissions: + contents: read + issues: write concurrency: group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true + cancel-in-progress: true jobs: + set-version: + name: Parse version from branch + runs-on: ubuntu-latest + outputs: + version: ${{ steps.set-version.outputs.VERSION }} + steps: + - id: set-version + run: | + version="${GITHUB_REF_NAME##*-}" + if [[ -z "$version" || "$version" == "$GITHUB_REF_NAME" ]]; then + echo "Invalid branch name '$GITHUB_REF_NAME'; expected pattern '*-'." >&2 + exit 1 + fi + echo "VERSION=$version" >> "$GITHUB_OUTPUT" install-oc: runs-on: ubuntu-latest + needs: set-version outputs: cache-hit: ${{ steps.cache.outputs.cache-hit }} steps: @@ -35,7 +52,7 @@ jobs: id: cache uses: actions/cache@v4.2.0 with: - path: /usr/local/bin/oc # Path where the `oc` binary will be installed + path: /usr/local/bin/oc # Path where the `oc` binary will be installed key: oc-cli-${{ runner.os }} - name: Install OpenShift CLI (if not cached) @@ -52,27 +69,26 @@ jobs: set-pre-release: name: Calculate pre-release number runs-on: ubuntu-latest - needs: [install-oc] - + needs: [set-version, install-oc] + outputs: output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }} - + steps: - id: set-pre-release run: echo "PRE_RELEASE=$(date +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT build: - name: Build CTHUB runs-on: ubuntu-latest - needs: set-pre-release + needs: [set-version, set-pre-release] timeout-minutes: 60 env: + VERSION: ${{ needs.set-version.outputs.version }} PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} steps: - - name: Check out repository uses: actions/checkout@v4.1.1 @@ -93,9 +109,8 @@ jobs: - name: Build CTHUB Backend run: | cd openshift/templates/backend - oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} - sleep 5s - oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + oc process -f ./backend-bc-docker.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + oc -n ${{ env.TOOLS_NAMESPACE }} start-build cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }} --wait=true oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - name: Build CTHUB Frontend @@ -105,7 +120,7 @@ jobs: sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-frontend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - + - name: Build CTHUB Task Queue run: | cd openshift/templates/task-queue @@ -123,42 +138,31 @@ jobs: oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-vinpower:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-vinpower:${{ env.VERSION }}-${{ env.PRE_RELEASE }} deploy: - name: Deploy CTHUB on Dev runs-on: ubuntu-latest timeout-minutes: 60 - needs: [set-pre-release, build] + needs: [set-version, set-pre-release, build] env: + VERSION: ${{ needs.set-version.outputs.version }} PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} steps: - - name: Checkout Manifest repository uses: actions/checkout@v4.1.1 with: repository: bcgov-c/tenant-gitops-30b186 ref: main ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} - + - name: Update tags uses: mikefarah/yq@v4.40.5 with: - cmd: | - yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - yq -i '.vinpower.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - - # - name: Update backend tag - # uses: mikefarah/yq@v4.40.5 - # with: - # cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - - # - name: Update task-queue tag - # uses: mikefarah/yq@v4.40.5 - # with: - # cmd: yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + cmd: | + yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + yq -i '.vinpower.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - name: GitHub Commit & Push run: | @@ -167,4 +171,22 @@ jobs: git add cthub/values-dev.yaml git commit -m "Update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }} on Dev" git push - \ No newline at end of file + + push-to-artifactory: + name: Push images to Artifactory (${{ matrix.image_stream }}) + needs: [set-version, set-pre-release, deploy] + strategy: + matrix: + image_stream: + - cthub-backend + - cthub-frontend + - cthub-task-queue + - cthub-vinpower + uses: ./.github/workflows/push-images-to-artifactory.yaml + with: + env: dev + app_name: cthub + image_stream: ${{ matrix.image_stream }} + image_tag: ${{ needs.set-version.outputs.version }}-${{ needs.set-pre-release.outputs.output1 }} + secrets: inherit + diff --git a/.github/workflows/prod-ci.yaml b/.github/workflows/prod-ci.yaml index 82efa58e..f3a96c5c 100644 --- a/.github/workflows/prod-ci.yaml +++ b/.github/workflows/prod-ci.yaml @@ -1,23 +1,37 @@ ## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly -name: CTHUB 0.4.0 Prod CI +name: CTHUB 0.5.0 Prod CI on: workflow_dispatch: env: - VERSION: 0.4.0 - GIT_URL: https://github.com/bcgov/cthub.git + VERSION: 0.5.0 + GIT_URL: https://github.com/bcgov/cthub.git TEST_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-test PROD_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-prod +permissions: + contents: read + issues: write concurrency: group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true + cancel-in-progress: true jobs: + set-version: + name: Parse version from branch + runs-on: ubuntu-latest + outputs: + version: ${{ steps.set-version.outputs.VERSION }} + steps: + - id: set-version + run: | + echo "VERSION=${{ env.VERSION }}" >> "$GITHUB_OUTPUT" + install-oc: runs-on: ubuntu-latest + needs: [set-version] outputs: cache-hit: ${{ steps.cache.outputs.cache-hit }} steps: @@ -28,7 +42,7 @@ jobs: id: cache uses: actions/cache@v4.2.0 with: - path: /usr/local/bin/oc # Path where the `oc` binary will be installed + path: /usr/local/bin/oc # Path where the `oc` binary will be installed key: oc-cli-${{ runner.os }} - name: Install OpenShift CLI (if not cached) @@ -46,7 +60,7 @@ jobs: name: Find Test deployment pre-release number runs-on: ubuntu-latest needs: [install-oc] - + outputs: output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }} @@ -70,7 +84,6 @@ jobs: echo "PRE_RELEASE=$(oc -n ${{ env.TEST_NAMESPACE }} describe deployment/cthub-test-frontend | grep Image | awk -F '-' '{print $NF}')" >> $GITHUB_OUTPUT deploy: - name: Deploy CTHUB on Prod runs-on: ubuntu-latest timeout-minutes: 60 @@ -86,7 +99,7 @@ jobs: secret: ${{ github.TOKEN }} approvers: emi-hi,kuanfandevops,tim738745,JulianForeman minimum-approvals: 2 - issue-title: "CTHUB release-${{ env.VERSION }}-${{ env.PRE_RELEASE }} PRODUCTION Deployment" + issue-title: "CTHUB release-${{ env.VERSION }}-${{ env.PRE_RELEASE }} PRODUCTION Deployment" - name: Restore oc command from Cache uses: actions/cache@v4.2.0 @@ -100,7 +113,7 @@ jobs: openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} insecure_skip_tls_verify: true - namespace: ${{ env.TEST_NAMESPACE }} + namespace: ${{ env.TEST_NAMESPACE }} - name: Tag CTHUB images to Test run: | @@ -108,22 +121,22 @@ jobs: oc tag ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.PROD_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} oc tag ${{ env.TEST_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.PROD_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} oc tag ${{ env.TEST_NAMESPACE }}/cthub-vinpower:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.PROD_NAMESPACE }}/cthub-vinpower:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - + - name: Checkout Manifest repository uses: actions/checkout@v4.1.1 with: repository: bcgov-c/tenant-gitops-30b186 ref: main ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} - + - name: Update frontend tag uses: mikefarah/yq@v4.40.5 with: - cmd: | - yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml - yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml - yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml - yq -i '.vinpower.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml + cmd: | + yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml + yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml + yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml + yq -i '.vinpower.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml - name: GitHub Commit & Push run: | @@ -132,4 +145,21 @@ jobs: git add cthub/values-prod.yaml git commit -m "Update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }} on PRODUCTION" git push - \ No newline at end of file + + push-to-artifactory: + name: Push images to Artifactory (${{ matrix.image_stream }}) + needs: [set-version, set-pre-release, deploy] + strategy: + matrix: + image_stream: + - cthub-backend + - cthub-frontend + - cthub-task-queue + - cthub-vinpower + uses: ./.github/workflows/push-images-to-artifactory.yaml + with: + env: prod + app_name: cthub + image_stream: ${{ matrix.image_stream }} + image_tag: ${{ needs.set-version.outputs.version }}-${{ needs.set-pre-release.outputs.output1 }} + secrets: inherit diff --git a/.github/workflows/push-dependencies-to-artifactory.yaml b/.github/workflows/push-dependencies-to-artifactory.yaml new file mode 100644 index 00000000..1de9e4fb --- /dev/null +++ b/.github/workflows/push-dependencies-to-artifactory.yaml @@ -0,0 +1,50 @@ +name: Push Dependent Images to Artifactory + +on: + workflow_dispatch: + +env: + ARTIFACTORY_REGISTRY: ${{ secrets.ARTIFACTORY_REGISTRY }} + ARTIFACTORY_REPO: ${{ secrets.ARTIFACTORY_REPO }} + ARTIFACTORY_API_KEY: ${{ secrets.ARTIFACTORY_API_KEY }} + ARTIFACTORY_USERNAME: ${{ secrets.ARTIFACTORY_USERNAME }} + METABASE_IMAGE_TAG: v0.50.6 + +jobs: + + push-dependent-images-to-artifactory: + name: Push dependent images to Artifactory + runs-on: ubuntu-latest + steps: + - name: Install skopeo + run: | + sudo apt-get update + sudo apt-get install -y skopeo + + - name: Login to Artifactory + run: | + set -euo pipefail + AUTH_CURL_FLAGS=(-H "X-JFrog-Art-Api: ${ARTIFACTORY_API_KEY}") + AUTH_PASSWORD="${ARTIFACTORY_API_KEY}" + + curl -fsS "${AUTH_CURL_FLAGS[@]}" \ + "https://${ARTIFACTORY_REGISTRY}/artifactory/api/system/ping" + REPO_INFO="$(curl -fsS "${AUTH_CURL_FLAGS[@]}" \ + "https://${ARTIFACTORY_REGISTRY}/artifactory/api/repositories/${ARTIFACTORY_REPO}")" + echo "${REPO_INFO}" | tr -d '\n' | grep -Eq '"packageType"[[:space:]]*:[[:space:]]*"docker"' || \ + { echo "Repo ${ARTIFACTORY_REPO} is not a Docker repo."; exit 1; } + echo "${REPO_INFO}" | tr -d '\n' | grep -Eq '"rclass"[[:space:]]*:[[:space:]]*"(local|virtual|federated)"' || \ + { echo "Repo ${ARTIFACTORY_REPO} must be local or virtual or federated."; exit 1; } + skopeo login --authfile /tmp/artifactory-auth.json \ + --username "${ARTIFACTORY_USERNAME}" \ + --password "${AUTH_PASSWORD}" \ + "${ARTIFACTORY_REGISTRY}" + + - name: Copy Metabase image from Docker Hub + run: | + set -euo pipefail + echo "Pushing Metabase image" + skopeo copy --src-tls-verify=true --dest-tls-verify=true \ + --dest-authfile /tmp/artifactory-auth.json \ + "docker://metabase/metabase:${METABASE_IMAGE_TAG}" \ + "docker://${ARTIFACTORY_REGISTRY}/${ARTIFACTORY_REPO}/cthub/prod/metabase/metabase:${METABASE_IMAGE_TAG}" diff --git a/.github/workflows/push-images-to-artifactory.yaml b/.github/workflows/push-images-to-artifactory.yaml new file mode 100644 index 00000000..c7648f00 --- /dev/null +++ b/.github/workflows/push-images-to-artifactory.yaml @@ -0,0 +1,166 @@ +name: Push images to Artifactory + +on: + workflow_call: + inputs: + env: + description: Deployment environment (dev, test, prod). + required: true + type: string + app_name: + description: Application name used in Artifactory paths. + required: true + type: string + image_stream: + description: Image stream name to copy (e.g. cthub-backend). + required: true + type: string + image_tag: + description: Image tag to copy to Artifactory. + required: true + type: string + secrets: + ARTIFACTORY_REGISTRY: + required: true + ARTIFACTORY_REPO: + required: true + ARTIFACTORY_API_KEY: + required: true + ARTIFACTORY_USERNAME: + required: true + OPENSHIFT_SERVER: + required: true + OPENSHIFT_TOKEN: + required: true + OPENSHIFT_NAMESPACE_PLATE: + required: true + +permissions: + contents: read + issues: write + +jobs: + push-images-to-artifactory: + name: Push images to Artifactory + runs-on: ubuntu-latest + timeout-minutes: 60 + env: + ARTIFACTORY_REGISTRY: ${{ secrets.ARTIFACTORY_REGISTRY }} + ARTIFACTORY_REPO: ${{ secrets.ARTIFACTORY_REPO }} + ARTIFACTORY_API_KEY: ${{ secrets.ARTIFACTORY_API_KEY }} + ARTIFACTORY_USERNAME: ${{ secrets.ARTIFACTORY_USERNAME }} + OPENSHIFT_SERVER: ${{ secrets.OPENSHIFT_SERVER }} + OPENSHIFT_TOKEN: ${{ secrets.OPENSHIFT_TOKEN }} + DEPLOY_ENV: ${{ inputs.env }} + APP_NAME: ${{ inputs.app_name }} + SOURCE_NAMESPACE: ${{ format('{0}-{1}', secrets.OPENSHIFT_NAMESPACE_PLATE, inputs.env) }} + IMAGE_STREAM: ${{ inputs.image_stream }} + IMAGE_TAG: ${{ inputs.image_tag }} + + steps: + - name: Install skopeo + run: | + sudo apt-get update + sudo apt-get install -y skopeo + + - name: Login to Artifactory + run: | + set -euo pipefail + if [ -z "${ARTIFACTORY_REPO}" ]; then + echo "ARTIFACTORY_REPO is empty; set it to the Artifactory repo key (e.g. docker-local)." >&2 + exit 1 + fi + if [[ "${ARTIFACTORY_REPO}" == */* ]]; then + echo "ARTIFACTORY_REPO includes a path; using repo key '${ARTIFACTORY_REPO%%/*}' for API calls." >&2 + ARTIFACTORY_REPO_KEY="${ARTIFACTORY_REPO%%/*}" + else + ARTIFACTORY_REPO_KEY="${ARTIFACTORY_REPO}" + fi + echo "ARTIFACTORY_REPO_KEY=${ARTIFACTORY_REPO_KEY}" >> "${GITHUB_ENV}" + AUTH_CURL_FLAGS=(-H "X-JFrog-Art-Api: ${ARTIFACTORY_API_KEY}") + AUTH_PASSWORD="${ARTIFACTORY_API_KEY}" + PING_URL="https://${ARTIFACTORY_REGISTRY}/artifactory/api/system/ping" + REPO_URL="https://${ARTIFACTORY_REGISTRY}/artifactory/api/repositories/${ARTIFACTORY_REPO_KEY}" + if ! curl -fsS "${AUTH_CURL_FLAGS[@]}" "${PING_URL}"; then + echo "Artifactory ping failed: ${PING_URL}" + exit 1 + fi + REPO_HTTP_STATUS="$(curl -sS -o /tmp/artifactory-repo-info.txt -w "%{http_code}" \ + "${AUTH_CURL_FLAGS[@]}" "${REPO_URL}")" + if [ "${REPO_HTTP_STATUS}" -ge 200 ] && [ "${REPO_HTTP_STATUS}" -lt 300 ]; then + REPO_INFO="$(cat /tmp/artifactory-repo-info.txt)" + else + echo "Artifactory repo query failed: ${REPO_URL} (HTTP ${REPO_HTTP_STATUS})" + cat /tmp/artifactory-repo-info.txt + exit 1 + fi + echo "${REPO_INFO}" | tr -d '\n' | grep -Eq '"packageType"[[:space:]]*:[[:space:]]*"docker"' || \ + { echo "Repo ${ARTIFACTORY_REPO} is not a Docker repo."; exit 1; } + echo "${REPO_INFO}" | tr -d '\n' | grep -Eq '"rclass"[[:space:]]*:[[:space:]]*"(local|virtual|federated)"' || \ + { echo "Repo ${ARTIFACTORY_REPO} must be local or virtual or federated."; exit 1; } + skopeo login --authfile /tmp/artifactory-auth.json \ + --username "${ARTIFACTORY_USERNAME}" \ + --password "${AUTH_PASSWORD}" \ + "${ARTIFACTORY_REGISTRY}" + + - name: Restore oc command from Cache + uses: actions/cache@v4.2.0 + with: + path: /usr/local/bin/oc + key: oc-cli-${{ runner.os }} + + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.3 + with: + openshift_server_url: ${{ env.OPENSHIFT_SERVER }} + openshift_token: ${{ env.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ env.SOURCE_NAMESPACE }} + + - name: Copy OpenShift image to Artifactory + run: | + set -euo pipefail + REPO_REF="docker://${ARTIFACTORY_REGISTRY}/${ARTIFACTORY_REPO_KEY}/${APP_NAME}/${DEPLOY_ENV}/${IMAGE_STREAM}" + echo "Fetching tags for ${APP_NAME}/${DEPLOY_ENV}/${IMAGE_STREAM}..." + if TAGS_JSON="$(skopeo list-tags --authfile /tmp/artifactory-auth.json "${REPO_REF}")"; then + export TAGS_JSON + TAGS="$(python3 - <<'PY' + import json + import os + import sys + + raw = os.environ.get("TAGS_JSON", "") + try: + data = json.loads(raw) + except json.JSONDecodeError: + print("::error::Failed to parse tag list JSON from skopeo.", file=sys.stderr) + sys.exit(1) + + tags = data.get("Tags") + if not isinstance(tags, list): + print("::error::Tag list response missing Tags array.", file=sys.stderr) + sys.exit(1) + + print("\n".join(tag for tag in tags if tag)) + PY + )" + if [ -z "${TAGS// }" ]; then + echo "No tags found for ${APP_NAME}/${DEPLOY_ENV}/${IMAGE_STREAM}." + else + echo "Deleting tags for ${APP_NAME}/${DEPLOY_ENV}/${IMAGE_STREAM}..." + echo "${TAGS}" | while IFS= read -r tag; do + [ -z "${tag}" ] && continue + skopeo delete --authfile /tmp/artifactory-auth.json "${REPO_REF}:${tag}" + done + fi + else + echo "Repository ${APP_NAME}/${DEPLOY_ENV}/${IMAGE_STREAM} not found; skipping tag cleanup." + fi + + OPENSHIFT_REGISTRY="$(oc registry info --public)" + oc registry login --registry="${OPENSHIFT_REGISTRY}" --to=/tmp/openshift-auth.json + + skopeo copy --src-tls-verify=false --dest-tls-verify=true \ + --src-authfile /tmp/openshift-auth.json --dest-authfile /tmp/artifactory-auth.json \ + "docker://${OPENSHIFT_REGISTRY}/${SOURCE_NAMESPACE}/${IMAGE_STREAM}:${IMAGE_TAG}" \ + "${REPO_REF}:${IMAGE_TAG}" diff --git a/.github/workflows/replace-DC.yaml b/.github/workflows/replace-DC.yaml new file mode 100644 index 00000000..166331f0 --- /dev/null +++ b/.github/workflows/replace-DC.yaml @@ -0,0 +1,115 @@ +## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly + +name: CTHUB Replace DC on Dev + +on: + push: + branches: [ replace-dc-0.3.0 ] + workflow_dispatch: + +env: + VERSION: 0.3.0 + GIT_URL: https://github.com/bcgov/cthub.git + TOOLS_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools + DEV_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev + + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + + set-pre-release: + name: Calculate pre-release number + runs-on: ubuntu-latest + + outputs: + output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }} + + steps: + - id: set-pre-release + run: echo "PRE_RELEASE=$(date +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT + + build: + + name: Build CTHUB + runs-on: ubuntu-latest + needs: set-pre-release + timeout-minutes: 60 + + env: + PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} + + steps: + + - name: Check out repository + uses: actions/checkout@v4.1.1 + + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.3 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ env.TOOLS_NAMESPACE }} + + - name: Build CTHUB Backend + run: | + cd openshift/templates/backend + oc process -f ./backend-bc-docker.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + sleep 5s + oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + + deploy: + + name: Deploy CTHUB on Dev + runs-on: ubuntu-latest + timeout-minutes: 60 + needs: [set-pre-release, build] + + env: + PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} + + steps: + + - name: Checkout Manifest repository + uses: actions/checkout@v4.1.1 + with: + repository: bcgov-c/tenant-gitops-30b186 + ref: main + ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} + + - name: Update tags + uses: mikefarah/yq@v4.40.5 + with: + cmd: | + yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + + # - name: GitHub Commit & Push + # run: | + # git config --global user.email "actions@github.com" + # git config --global user.name "GitHub Actions" + # git add cthub/values-dev.yaml + # git commit -m "Update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }} on Dev" + # git push + + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.3 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ env.TOOLS_NAMESPACE }} + + - name: Tag and deploy to Prod + run: | + helm -n ${{ env.PROD_NAMESPACE }} list + oc tag ${{ env.DEV_NAMESPACE }}/tfrs-backend:${{ env.BUILD_SUFFIX }} ${{ env.PROD_NAMESPACE }}/tfrs-backend:${{ env.BUILD_SUFFIX }} + + - name: Helm Deployment + run: | + cd tfrs/charts/backend + helm -n ${{ env.PROD_NAMESPACE }} -f ./values-dev.yaml upgrade --install cthub-dev-backend . \ + --set podAnnotations.rolloutTriggered="A$(date +%s)E" diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml index 390f7992..f8c0adbb 100644 --- a/.github/workflows/test-ci.yaml +++ b/.github/workflows/test-ci.yaml @@ -1,23 +1,37 @@ ## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly -name: CTHUB 0.4.0 Test CI +name: CTHUB 0.5.0 Test CI on: workflow_dispatch: env: - VERSION: 0.4.0 - GIT_URL: https://github.com/bcgov/cthub.git + VERSION: 0.5.0 + GIT_URL: https://github.com/bcgov/cthub.git DEV_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev TEST_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-test +permissions: + contents: read + issues: write concurrency: group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true + cancel-in-progress: true jobs: + set-version: + name: Parse version from branch + runs-on: ubuntu-latest + outputs: + version: ${{ steps.set-version.outputs.VERSION }} + steps: + - id: set-version + run: | + echo "VERSION=${{ env.VERSION }}" >> "$GITHUB_OUTPUT" + install-oc: runs-on: ubuntu-latest + needs: [set-version] outputs: cache-hit: ${{ steps.cache.outputs.cache-hit }} steps: @@ -28,7 +42,7 @@ jobs: id: cache uses: actions/cache@v4.2.0 with: - path: /usr/local/bin/oc # Path where the `oc` binary will be installed + path: /usr/local/bin/oc # Path where the `oc` binary will be installed key: oc-cli-${{ runner.os }} - name: Install OpenShift CLI (if not cached) @@ -46,7 +60,7 @@ jobs: name: Find Dev deployment pre-release number runs-on: ubuntu-latest needs: [install-oc] - + outputs: output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }} @@ -70,7 +84,6 @@ jobs: echo "PRE_RELEASE=$(oc -n ${{ env.DEV_NAMESPACE }} describe deployment/cthub-dev-frontend | grep Image | awk -F '-' '{print $NF}')" >> $GITHUB_OUTPUT deploy: - name: Deploy CTHUB on Test runs-on: ubuntu-latest timeout-minutes: 60 @@ -86,8 +99,8 @@ jobs: secret: ${{ github.TOKEN }} approvers: emi-hi,kuanfandevops,tim738745,JulianForeman minimum-approvals: 1 - issue-title: "CTHUB release-${{ env.VERSION }}-${{ env.PRE_RELEASE }} Test Deployment" - + issue-title: "CTHUB release-${{ env.VERSION }}-${{ env.PRE_RELEASE }} Test Deployment" + - name: Restore oc command from Cache uses: actions/cache@v4.2.0 with: @@ -100,7 +113,7 @@ jobs: openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} insecure_skip_tls_verify: true - namespace: ${{ env.DEV_NAMESPACE }} + namespace: ${{ env.DEV_NAMESPACE }} - name: Tag CTHUB images to Test run: | @@ -108,22 +121,22 @@ jobs: oc tag ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} oc tag ${{ env.DEV_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} oc tag ${{ env.DEV_NAMESPACE }}/cthub-vinpower:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-vinpower:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - + - name: Checkout Manifest repository uses: actions/checkout@v4.1.1 with: repository: bcgov-c/tenant-gitops-30b186 ref: main ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} - + - name: Update frontend tag uses: mikefarah/yq@v4.40.5 with: - cmd: | - yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml - yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml - yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml - yq -i '.vinpower.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml + cmd: | + yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml + yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml + yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml + yq -i '.vinpower.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml - name: GitHub Commit & Push run: | @@ -132,4 +145,22 @@ jobs: git add cthub/values-test.yaml git commit -m "Update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }} on Test" git push - \ No newline at end of file + + push-to-artifactory: + name: Push images to Artifactory (${{ matrix.image_stream }}) + needs: [set-version, set-pre-release, deploy] + strategy: + matrix: + image_stream: + - cthub-backend + - cthub-frontend + - cthub-task-queue + - cthub-vinpower + uses: ./.github/workflows/push-images-to-artifactory.yaml + with: + env: test + app_name: cthub + image_stream: ${{ matrix.image_stream }} + image_tag: ${{ needs.set-version.outputs.version }}-${{ needs.set-pre-release.outputs.output1 }} + secrets: inherit + diff --git a/.pipeline/.nvmrc b/.pipeline/.nvmrc deleted file mode 100644 index 6b12bc74..00000000 --- a/.pipeline/.nvmrc +++ /dev/null @@ -1 +0,0 @@ -v10.15.2 \ No newline at end of file diff --git a/.pipeline/build-metabase.js b/.pipeline/build-metabase.js deleted file mode 100755 index 35935c29..00000000 --- a/.pipeline/build-metabase.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict'; -const task = require('./lib/build-metabase.js') -const settings = require('./lib/config.js') - -task(Object.assign(settings, { phase: 'build'})) diff --git a/.pipeline/build-patroni.js b/.pipeline/build-patroni.js deleted file mode 100755 index f9dbb87d..00000000 --- a/.pipeline/build-patroni.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict'; -const task = require('./lib/build-patroni.js') -const settings = require('./lib/config.js') - -task(Object.assign(settings, { phase: 'build'})) diff --git a/.pipeline/build.js b/.pipeline/build.js deleted file mode 100755 index 3ac899f8..00000000 --- a/.pipeline/build.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict'; -const task = require('./lib/build.js') -const settings = require('./lib/config.js') - -task(Object.assign(settings, { phase: 'build'})) diff --git a/.pipeline/clean-tools.js b/.pipeline/clean-tools.js deleted file mode 100755 index 42f4c43e..00000000 --- a/.pipeline/clean-tools.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/clean-tools.js') - -task(Object.assign(settings, { phase: settings.options.env})); diff --git a/.pipeline/clean.js b/.pipeline/clean.js deleted file mode 100755 index 42231d7f..00000000 --- a/.pipeline/clean.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/clean.js') - -task(Object.assign(settings, { phase: settings.options.env})); diff --git a/.pipeline/deploy-knp.js b/.pipeline/deploy-knp.js deleted file mode 100755 index ccbc4048..00000000 --- a/.pipeline/deploy-knp.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/deploy-knp.js') - -task(Object.assign(settings, { phase: settings.options.env})); diff --git a/.pipeline/deploy-metabase.js b/.pipeline/deploy-metabase.js deleted file mode 100755 index 02d551f0..00000000 --- a/.pipeline/deploy-metabase.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/deploy-metabase.js') - -task(Object.assign(settings, { phase: settings.options.env})); diff --git a/.pipeline/deploy-patroni.js b/.pipeline/deploy-patroni.js deleted file mode 100755 index 348313cd..00000000 --- a/.pipeline/deploy-patroni.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/deploy-patroni.js') - -task(Object.assign(settings, { phase: settings.options.env})); diff --git a/.pipeline/deploy-unittest.js b/.pipeline/deploy-unittest.js deleted file mode 100644 index c6c95ded..00000000 --- a/.pipeline/deploy-unittest.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/deploy-unittest.js') - -task(Object.assign(settings, { phase: settings.options.env})); diff --git a/.pipeline/deploy.js b/.pipeline/deploy.js deleted file mode 100755 index 59550945..00000000 --- a/.pipeline/deploy.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict'; -const settings = require('./lib/config.js') -const task = require('./lib/deploy.js') - -task(Object.assign(settings, { phase: settings.options.env})); diff --git a/.pipeline/lib/build-metabase.js b/.pipeline/lib/build-metabase.js deleted file mode 100755 index 6ac832d9..00000000 --- a/.pipeline/lib/build-metabase.js +++ /dev/null @@ -1,24 +0,0 @@ -"use strict"; -const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); -const path = require("path"); - -module.exports = settings => { - const phases = settings.phases; - const options = settings.options; - const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options)); - const phase = "build"; - let objects = []; - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); - - // The building of your cool app goes here ▼▼▼ - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/metabase/metabase-bc.yaml`, {})); - - oc.applyRecommendedLabels( - objects, - phases[phase].name, - phase, - phases[phase].changeId, - phases[phase].instance, - ); - oc.applyAndBuild(objects); -}; diff --git a/.pipeline/lib/build-patroni.js b/.pipeline/lib/build-patroni.js deleted file mode 100755 index 5ea254bf..00000000 --- a/.pipeline/lib/build-patroni.js +++ /dev/null @@ -1,24 +0,0 @@ -"use strict"; -const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); -const path = require("path"); - -module.exports = settings => { - const phases = settings.phases; - const options = settings.options; - const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options)); - const phase = "build"; - let objects = []; - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); - - // The building of your cool app goes here ▼▼▼ - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/patroni-2.1.1/templates/build.yaml`, {})); - - oc.applyRecommendedLabels( - objects, - phases[phase].name, - phase, - phases[phase].changeId, - phases[phase].instance, - ); - oc.applyAndBuild(objects); -}; diff --git a/.pipeline/lib/build.js b/.pipeline/lib/build.js deleted file mode 100755 index c9fe35d1..00000000 --- a/.pipeline/lib/build.js +++ /dev/null @@ -1,45 +0,0 @@ -"use strict"; -const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); -const path = require("path"); - -module.exports = settings => { - const phases = settings.phases; - const options = settings.options; - const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options)); - const phase = "build"; - let objects = []; - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); - - // The building of your cool app goes here ▼▼▼ - - // build frontend - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-bc.yaml`, { - 'param':{ - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix, - 'VERSION': phases[phase].tag, - 'GIT_URL': oc.git.http_url, - 'GIT_REF': oc.git.ref - } - })) - - //build backend - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/backend/backend-bc.yaml`, { - 'param':{ - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix, - 'VERSION': phases[phase].tag, - 'GIT_URL': oc.git.http_url, - 'GIT_REF': oc.git.ref - } - })) - - oc.applyRecommendedLabels( - objects, - phases[phase].name, - phase, - phases[phase].changeId, - phases[phase].instance, - ); - oc.applyAndBuild(objects); -}; diff --git a/.pipeline/lib/clean-tools.js b/.pipeline/lib/clean-tools.js deleted file mode 100755 index 0521bd6f..00000000 --- a/.pipeline/lib/clean-tools.js +++ /dev/null @@ -1,64 +0,0 @@ -"use strict"; -const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); - -// The clean tasks should be based on the following five labels added by BCDK pipeline -// namespace: '30b186-tools', -// transient: true, -// name: 'cthub', -// phase: 'build', -// changeId: '46', -// suffix: '-build-46', -// instance: 'cthub-build-46', -// version: '1.0.0-46', -// tag: 'build-1.0.0-46', -// ocpName: 'apps.silver.devops' - -const getTargetPhases = (env, phases) => { - let target_phase = []; - for (const phase in phases) { - if (env.match(/^(all|transient)$/) && phases[phase].transient) { - target_phase.push(phase); - } else if (env === phase) { - target_phase.push(phase); - break; - } - } - - return target_phase; -}; - -module.exports = settings => { - const phases = settings.phases; - const options = settings.options; - const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options)); - const target_phases = getTargetPhases(options.env, phases); - - target_phases.forEach(k => { - if (phases.hasOwnProperty(k)) { - - const phase = phases[k]; - oc.namespace(phase.namespace); - - let buildConfigs = oc.get("bc", { - selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, - namespace: phase.namespace, - }); - - buildConfigs.forEach(bc => { - if (bc.spec.output.to.kind == "ImageStreamTag") { - oc.delete([`ImageStreamTag/${bc.spec.output.to.name}`], { - "ignore-not-found": "true", - wait: "true", - namespace: phase.namespace, - }); - } - oc.delete([`BuildConfig/${bc.metadata.name}`], { - "ignore-not-found": "true", - wait: "true", - namespace: phase.namespace, - }); - }); - - } - }); -}; diff --git a/.pipeline/lib/clean.js b/.pipeline/lib/clean.js deleted file mode 100755 index 7ec61c13..00000000 --- a/.pipeline/lib/clean.js +++ /dev/null @@ -1,132 +0,0 @@ -"use strict"; -const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); - -// The clean tasks should be based on the following five labels added by BCDK pipeline -// app: cthub-dev-45 -// template-hash: 5ee0ba9e32efa8ac4d0ed2b9923ea2be3ddda2f4 -// github-owner: bcgov -// env-name: dev -// app.kubernetes.io/component: database -// app.kubernetes.io/managed-by: template -// app-name: cthub -// app.kubernetes.io/name: patroni -// env-id: '45' -// github-repo: cthub - -const getTargetPhases = (env, phases) => { - let target_phase = []; - for (const phase in phases) { - if (env.match(/^(all|transient)$/) && phases[phase].transient) { - target_phase.push(phase); - } else if (env === phase) { - target_phase.push(phase); - break; - } - } - - return target_phase; -}; - -module.exports = settings => { - const phases = settings.phases; - const options = settings.options; - const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options)); - const target_phases = getTargetPhases(options.env, phases); - - target_phases.forEach(k => { - - //k is dve, test or prod - if (phases.hasOwnProperty(k)) { - - const phase = phases[k]; - oc.namespace(phase.namespace); - - let deploymentConfigs = oc.get("dc", { - selector: `app=${phase.instance},env-id=${phase.changeId},env-name=${k},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, - namespace: phase.namespace, - }); - deploymentConfigs.forEach(dc => { - dc.spec.triggers.forEach(trigger => { - if ( - trigger.type == "ImageChange" && - trigger.imageChangeParams.from.kind == "ImageStreamTag" - ) { - oc.delete([`ImageStreamTag/${trigger.imageChangeParams.from.name}`], { - "ignore-not-found": "true", - wait: "true", - namespace: phase.namespace, - }); - } - }); - oc.delete([`DeploymentConfig/${dc.metadata.name}`], { - "ignore-not-found": "true", - wait: "true", - namespace: phase.namespace, - }); - }); - oc.raw( - "delete", - ["Secret,configmap,endpoints,RoleBinding,role,ServiceAccount,Endpoints,service,route"], - { - selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, - wait: "true", - namespace: phase.namespace, - } - ); - - //get all statefulsets before they are deleted - const statefulsets = oc.get("statefulset", { - selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, - namespace: phase.namespace, - }); - //remove all the PVCs associated with each statefulset, after they get deleted by above delete all operation - statefulsets.forEach(statefulset => { - //delete StatefulSet - oc.delete([`StatefulSet/${statefulset.metadata.name}`], { - "ignore-not-found": "true", - wait: "true", - namespace: phase.namespace, - }); - //delete configmaps create by patroni - let patroniConfigmaps = oc.get("configmap", { - selector: `app.kubernetes.io/name=patroni,cluster-name=${statefulset.metadata.name}`, - namespace: phase.namespace, - }); - if(Object.entries(patroniConfigmaps).length > 0) { - oc.raw( - "delete", - ["configmap"], - { - selector: `app.kubernetes.io/name=patroni,cluster-name=${statefulset.metadata.name}`, - wait: "true", - "ignore-not-found": "true", - namespace: phase.namespace, - }, - ); - }; - //delete PVCs mounted for statfulset - oc.raw("delete", ["pvc"], { - selector: `app=${phase.instance},statefulset=${statefulset.metadata.name},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, - "ignore-not-found": "true", - wait: "true", - namespace: phase.namespace, - }); - - }); - - //remove all PR's network policies - const knps = oc.get("networkpolicies", { - selector: `app=${phase.instance},env-id=${phase.changeId},env-name=${k},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, - namespace: phase.namespace, - }); - knps.forEach(knp => { - oc.delete([`networkpolicy/${knp.metadata.name}`], { - "ignore-not-found": "true", - wait: "true", - namespace: phase.namespace, - }); - }); - - } - }); -}; diff --git a/.pipeline/lib/config.js b/.pipeline/lib/config.js deleted file mode 100644 index b03f1886..00000000 --- a/.pipeline/lib/config.js +++ /dev/null @@ -1,82 +0,0 @@ -'use strict'; -const options= require('@bcgov/pipeline-cli').Util.parseArguments() -const changeId = options.pr //aka pull-request -const version = '0.2.0' -const name = 'cthub' -const ocpName = 'apps.silver.devops' - -//if work directly on bcgov repo, the value is bcgov -//if work on forked developer repo, the value is the developer's GitHub Id -//without this line of code, the pr deployment cann't removed when the pr is closed - -/* -Resource usage base: - -Name CPU Memory -backend 40m 520M -frontend 70m 500M -superset 1m 180M -metabase 130m 820M -patroni 70m 410M -redis 2m 20M -Minio 3m 150M -backup 1m 20M - -Set the cpu usage 20m as the lowest -Set the limit as two times of request - -*/ -options.git.owner='bcgov' -options.git.repository='cthub' - -const phases = { - - build: {namespace:'30b186-tools' , transient:true, name: `${name}`, phase: 'build', - changeId:`${changeId}`, suffix: `-build-${changeId}` , instance: `${name}-build-${changeId}`, - version:`${version}-${changeId}`, tag:`build-${version}-${changeId}`, ocpName: `${ocpName}`}, - - dev: {namespace:'30b186-dev', transient:true, name: `${name}`, ssoSuffix:'-dev', - ssoName:'dev.oidc.gov.bc.ca', phase: 'dev' , changeId:`${changeId}`, suffix: '-dev', - instance: `${name}-dev` , version:`${version}`, tag:`dev-${version}`, - host: `cthub-dev.${ocpName}.gov.bc.ca`, djangoDebug: 'True', logoutHostName: 'logontest7.gov.bc.ca', dbHost: 'cthub-crunchy-dev-pgbouncer', - metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, - frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, - backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendHost: `cthub-backend-dev.${ocpName}.gov.bc.ca`, backendReplicas: 1, - minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3Gi', minioBucketName: 'zevadv', - schemaspyCpuRequest: '50m', schemaspyCpuLimit: '200m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, - rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '1G', rabbitmqPvcSize: '1G', rabbitmqReplica: 1, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', - patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '500Mi', patroniPvcSize: '2G', patroniReplica: 2, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`}, - - test: {namespace:'30b186-test', name: `${name}`, ssoSuffix:'-test', - ssoName:'test.oidc.gov.bc.ca', phase: 'test' , changeId:`${changeId}`, suffix: `-test`, - instance: `${name}-test`, version:`${version}`, tag:`test-${version}`, - host: `cthub-test.${ocpName}.gov.bc.ca`, djangoDebug: 'False', logoutHostName: 'logontest7.gov.bc.ca', dbHost: 'cthub-crunchy-test-pgbouncer', - metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, - frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, frontendMinReplicas: 1, frontendMaxReplicas: 3, - backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendReplicas: 1, backendMinReplicas: 1, backendMaxReplicas: 3, backendHost: `cthub-backend-test.${ocpName}.gov.bc.ca`, - minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3G', minioBucketName: 'zevats', - schemaspyCpuRequest: '20m', schemaspyCpuLimit: '200m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, - rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '700M', rabbitmqPvcSize: '1G', rabbitmqReplica: 2, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', - patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '500Mi', patroniPvcSize: '5G', patroniReplica: 2, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`}, - - prod: {namespace:'30b186-prod', name: `${name}`, ssoSuffix:'', - ssoName:'oidc.gov.bc.ca', phase: 'prod' , changeId:`${changeId}`, suffix: `-prod`, - instance: `${name}-prod`, version:`${version}`, tag:`prod-${version}`, - metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, - host: `cthub-prod.${ocpName}.gov.bc.ca`, djangoDebug: 'False', logoutHostName: 'logon7.gov.bc.ca', dbHost: 'patroni-master-prod', - frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, frontendMinReplicas: 1, frontendMaxReplicas: 3, - backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendReplicas: 1, backendMinReplicas: 1, backendMaxReplicas: 3, backendHost: `cthub-backend-prod.${ocpName}.gov.bc.ca`, - minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3G', minioBucketName: 'zevapr', - schemaspyCpuRequest: '50m', schemaspyCpuLimit: '400m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, - rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '1G', rabbitmqPvcSize: '5G', rabbitmqReplica: 2, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', - patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '500Mi', patroniPvcSize: '8G', patroniReplica: 3, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`} - -}; - -// This callback forces the node process to exit as failure. -process.on('unhandledRejection', (reason) => { - console.log(reason); - process.exit(1); -}); - -module.exports = exports = {phases, options}; diff --git a/.pipeline/lib/deploy-knp.js b/.pipeline/lib/deploy-knp.js deleted file mode 100755 index 6458596c..00000000 --- a/.pipeline/lib/deploy-knp.js +++ /dev/null @@ -1,34 +0,0 @@ -"use strict"; -const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); -const path = require("path"); - -module.exports = settings => { - const phases = settings.phases; - const options = settings.options; - const phase = options.env; - const changeId = phases[phase].changeId; - const oc = new OpenShiftClientX(Object.assign({namespace: phases[phase].namespace}, options)); - - //add Valid Redirect URIs for the pull request to keycloak - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); - var objects = []; - - // The deployment of your cool app goes here ▼▼▼ - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/knp/knp-env-pr.yaml`, { - 'param': { - 'SUFFIX': phases[phase].suffix, - 'ENVIRONMENT': phases[phase].phase - } - })) - - oc.applyRecommendedLabels( - objects, - phases[phase].name, - phase, - `${changeId}`, - phases[phase].instance, - ); - oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag); - oc.applyAndDeploy(objects, phases[phase].instance); - -}; diff --git a/.pipeline/lib/deploy-metabase.js b/.pipeline/lib/deploy-metabase.js deleted file mode 100755 index 16a5c424..00000000 --- a/.pipeline/lib/deploy-metabase.js +++ /dev/null @@ -1,39 +0,0 @@ -"use strict"; -const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); -const path = require("path"); -//const KeyCloakClient = require('./keycloak'); - -module.exports = settings => { - const phases = settings.phases; - const options = settings.options; - const phase = options.env; - const changeId = phases[phase].changeId; - const oc = new OpenShiftClientX(Object.assign({namespace: phases[phase].namespace}, options)); - - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); - var objects = []; - - // The deployment of your cool app goes here ▼▼▼ - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/metabase-postgresql/metabase-dc.yaml`, { - 'param': { - 'ENV_NAME': phases[phase].phase, - 'SUFFIX': phases[phase].suffix, - 'CPU_REQUEST': phases[phase].metabaseCpuRequest, - 'CPU_LIMIT': phases[phase].metabaseCpuLimit, - 'MEMORY_REQUEST': phases[phase].metabaseMemoryRequest, - 'MEMORY_LIMIT': phases[phase].metabaseMemoryLimit, - 'REPLICAS': phases[phase].metabaseReplicas, - } - })) - - oc.applyRecommendedLabels( - objects, - phases[phase].name, - phase, - `${changeId}`, - phases[phase].instance, - ); - oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag); - oc.applyAndDeploy(objects, phases[phase].instance); - -}; diff --git a/.pipeline/lib/deploy-patroni.js b/.pipeline/lib/deploy-patroni.js deleted file mode 100755 index e7eb5f97..00000000 --- a/.pipeline/lib/deploy-patroni.js +++ /dev/null @@ -1,54 +0,0 @@ -"use strict"; -const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); -const path = require("path"); -//const KeyCloakClient = require('./keycloak'); - -module.exports = settings => { - const phases = settings.phases; - const options = settings.options; - const phase = options.env; - const changeId = phases[phase].changeId; - const oc = new OpenShiftClientX(Object.assign({namespace: phases[phase].namespace}, options)); - - //add Valid Redirect URIs for the pull request to keycloak - /************ - if(phase === 'dev') { - const kc = new KeyCloakClient(settings, oc); - kc.addUris(); - } - *************/ - - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); - var objects = []; - - // The deployment of your cool app goes here ▼▼▼ - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/patroni-2.1.1/templates/prerequisite.yaml`, { - 'param': { - 'SUFFIX': phases[phase].suffix - } - })) - - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/patroni-2.1.1/templates/deploy.yaml`, { - 'param': { - 'SUFFIX': phases[phase].suffix, - 'CPU_REQUEST': phases[phase].patroniCpuRequest, - 'CPU_LIMIT': phases[phase].patroniCpuLimit, - 'MEMORY_REQUEST': phases[phase].patroniMemoryRequest, - 'MEMORY_LIMIT': phases[phase].patroniMemoryLimit, - 'REPLICAS': phases[phase].patroniReplica, - 'PVC_SIZE': phases[phase].patroniPvcSize, - 'STORAGE_CLASS': phases[phase].storageClass - } - })) - - oc.applyRecommendedLabels( - objects, - phases[phase].name, - phase, - `${changeId}`, - phases[phase].instance, - ); - oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag); - oc.applyAndDeploy(objects, phases[phase].instance); - -}; diff --git a/.pipeline/lib/deploy-unittest.js b/.pipeline/lib/deploy-unittest.js deleted file mode 100644 index 93e01e50..00000000 --- a/.pipeline/lib/deploy-unittest.js +++ /dev/null @@ -1,67 +0,0 @@ -"use strict"; -const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); -const path = require("path"); - -module.exports = settings => { - const phases = settings.phases; - const options = settings.options; - const phase = options.env; - const changeId = phases[phase].changeId; - const oc = new OpenShiftClientX(Object.assign({namespace: phases[phase].namespace}, options)); - - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); - var objects = []; - - // The deployment of your cool app goes here ▼▼▼ - - //deploy separate database and backend pod for unit test - if( phase === 'dev' ) { - - //create unit test database init scripts - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/unittest/zeva-postgresql-init.yaml`, { - 'param': { - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix - } - })) - - //deploy postgresql unit test - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/unittest/postgresql-dc-unittest.yaml`, { - 'param': { - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix, - 'ENV_NAME': phases[phase].phase - } - })) - - //deploy backend unit test - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/unittest/backend-dc-unittest.yaml`, { - 'param': { - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix, - 'VERSION': phases[phase].tag, - 'ENV_NAME': phases[phase].phase, - 'BACKEND_HOST_NAME': phases[phase].backendHost, - 'RABBITMQ_CLUSTER_NAME': 'rabbitmq-cluster', - 'CPU_REQUEST': phases[phase].backendCpuRequest, - 'CPU_LIMIT': '700m', - 'MEMORY_REQUEST': phases[phase].backendMemoryRequest, - 'MEMORY_LIMIT': phases[phase].backendMemoryLimit, - 'HEALTH_CHECK_DELAY': phases[phase].backendHealthCheckDelay, - 'REPLICAS': phases[phase].backendReplicas - } - })) - - } - - oc.applyRecommendedLabels( - objects, - phases[phase].name, - phase, - `${changeId}`, - phases[phase].instance, - ); - oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag); - oc.applyAndDeploy(objects, phases[phase].instance); - -}; diff --git a/.pipeline/lib/deploy.js b/.pipeline/lib/deploy.js deleted file mode 100755 index 8b5b0f54..00000000 --- a/.pipeline/lib/deploy.js +++ /dev/null @@ -1,69 +0,0 @@ -"use strict"; -const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); -const path = require("path"); -//const KeyCloakClient = require('./keycloak'); - -module.exports = settings => { - const phases = settings.phases; - const options = settings.options; - const phase = options.env; - const changeId = phases[phase].changeId; - const oc = new OpenShiftClientX(Object.assign({namespace: phases[phase].namespace}, options)); - - //add Valid Redirect URIs for the pull request to keycloak - /************ - if(phase === 'dev') { - const kc = new KeyCloakClient(settings, oc); - kc.addUris(); - } - *************/ - - const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift")); - var objects = []; - - // The deployment of your cool app goes here ▼▼▼ - - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-dc.yaml`, { - 'param': { - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix, - 'VERSION': phases[phase].tag, - 'ENV_NAME': phases[phase].phase, - 'HOST_NAME': phases[phase].host, - 'CPU_REQUEST': phases[phase].frontendCpuRequest, - 'CPU_LIMIT': phases[phase].frontendCpuLimit, - 'MEMORY_REQUEST': phases[phase].frontendMemoryRequest, - 'MEMORY_LIMIT': phases[phase].frontendMemoryLimit, - 'REPLICAS': phases[phase].frontendReplicas - } - })) - - objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/backend/backend-dc.yaml`, { - 'param': { - 'NAME': phases[phase].name, - 'SUFFIX': phases[phase].suffix, - 'VERSION': phases[phase].tag, - 'ENV_NAME': phases[phase].phase, - 'BACKEND_HOST_NAME': phases[phase].backendHost, - 'CPU_REQUEST': phases[phase].backendCpuRequest, - 'CPU_LIMIT': phases[phase].backendCpuLimit, - 'MEMORY_REQUEST': phases[phase].backendMemoryRequest, - 'MEMORY_LIMIT': phases[phase].backendMemoryLimit, - 'HEALTH_CHECK_DELAY': phases[phase].backendHealthCheckDelay, - 'REPLICAS': phases[phase].backendReplicas, - 'DB_HOST': phases[phase].dbHost, - 'MINIO_BUCKET_NAME': phases[phase].minioBucketName - } - })) - - oc.applyRecommendedLabels( - objects, - phases[phase].name, - phase, - `${changeId}`, - phases[phase].instance, - ); - oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag); - oc.applyAndDeploy(objects, phases[phase].instance); - -}; diff --git a/.pipeline/lib/keycloak.js b/.pipeline/lib/keycloak.js deleted file mode 100644 index 5310a10b..00000000 --- a/.pipeline/lib/keycloak.js +++ /dev/null @@ -1,137 +0,0 @@ -"use strict"; -const axios = require("axios"); -const _ = require("lodash"); -//code reference https://github.com/bcgov/HMCR/blob/0.7/.pipeline/lib/keycloak.js -module.exports = class KeyCloakClient { - constructor(settings, oc) { - this.phases = settings.phases; - this.options = settings.options; - this.oc = oc; - this.zevaHost = this.phases.dev.host; - } - - async init() { - - this.getSecrets(); - - this.apiTokenPath = `/auth/realms/${this.realmId}/protocol/openid-connect/token`; - this.zevaPublicClientPath = `auth/admin/realms/${this.realmId}/clients/${this.zevaClientId}`; - - this.api = axios.create({ - baseURL: `https://${this.ssoHost}` - }); - - const token = await this.getAccessToken(); - - this.api.defaults.headers.common = { - Authorization: `Bearer ${token}` - }; - } - - getSecrets() { - const keycloakSecret = this.oc.raw("get", [ - "secret", - "zeva-keycloak", - "-o", - "json" - ]); - const secret = JSON.parse(keycloakSecret.stdout).data; - - this.clientId = Buffer.from(secret.clientId, "base64").toString(); - this.clientSecret = Buffer.from(secret.clientSecret, "base64").toString(); - this.zevaClientId = Buffer.from(secret.zevaPublic, "base64").toString(); - this.realmId = Buffer.from(secret.realmId, "base64").toString(); - this.ssoHost = Buffer.from(secret.host, "base64").toString(); - - if (!this.clientId || !this.clientSecret || !this.zevaClientId) - throw new Error( - "Unable to retrieve Keycloak service account info from OpenShift" - ); - } - - getAccessToken() { - - return this.api - .post(this.apiTokenPath, "grant_type=client_credentials", { - headers: { "Content-Type": "application/x-www-form-urlencoded" }, - auth: { - username: this.clientId, - password: this.clientSecret - } - }) - .then(function(response) { - if (!response.data.access_token) - throw new Error( - "Unable to retrieve Keycloak service account access token" - ); - - return Promise.resolve(response.data.access_token); - }); - } - - async getUris() { - - const response = await this.api.get(this.zevaPublicClientPath); - - const data = { ...response.data }; - const redirectUris = data.redirectUris; - - return { data, redirectUris }; - } - - async addUris() { - await this.init(); - - console.log("Attempting to add RedirectUri and WebOrigins"); - - const { data, redirectUris} = await this.getUris(); - - const putData = { id: data.id, clientId: data.clientId }; - - const hasRedirectUris = redirectUris.find(item => - item.includes(this.zevaHost) - ); - - if (!hasRedirectUris) { - redirectUris.push(`https://${this.zevaHost}/*`); - putData.redirectUris = redirectUris; - } - - if (!(hasRedirectUris)) { - this.api - .put(this.zevaPublicClientPath, putData) - .then(() => console.log("RedirectUri and WebOrigins added.")); - } else { - console.log("RedirectUri and WebOrigins add skipped."); - } - } - - async removeUris() { - await this.init(); - - console.log("Attempting to remove RedirectUri and WebOrigins"); - - const { data, redirectUris } = await this.getUris(); - - const putData = { id: data.id, clientId: data.clientId }; - - const hasRedirectUris = redirectUris.find(item => - item.includes(this.zevaHost) - ); - - if (hasRedirectUris) { - putData.redirectUris = redirectUris.filter( - item => !item.includes(this.zevaHost) - ); - } - - if (hasRedirectUris) { - this.api - .put(this.zevaPublicClientPath, putData) - .then(() => console.log("RedirectUri and WebOrigins removed.")); - } else { - console.log("RedirectUri and WebOrigins remove skipped."); - } - - } -}; diff --git a/.pipeline/npmw b/.pipeline/npmw deleted file mode 100755 index 1eed7c95..00000000 --- a/.pipeline/npmw +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh -set +x -type -t nvm && nvm deactivate -export NVM_DIR="$(git rev-parse --show-toplevel)/.nvm" -if [ ! -f "$NVM_DIR/nvm.sh" ]; then - mkdir -p "${NVM_DIR}" - curl -sSL -o- https://raw.githubusercontent.com/creationix/nvm/v0.34.0/install.sh | bash &>/dev/null -fi -source "$NVM_DIR/nvm.sh" &>/dev/null -METHOD=script nvm install --no-progress &>/dev/null -nvm use &>/dev/null -exec npm "$@" diff --git a/.pipeline/package.json b/.pipeline/package.json deleted file mode 100644 index 1e097da3..00000000 --- a/.pipeline/package.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "name": "pipeline", - "version": "1.0.0", - "description": "This a pipeliene script", - "engines": { - "node": ">=8" - }, - "scripts": { - "build": "node build.js", - "build-metabase": "node build-metabase.js", - "build-patroni": "node build-patroni.js", - "clean-tools": "node clean-tools.js", - "clean": "node clean.js", - "deploy": "node deploy.js", - "deploy-unittest": "node deploy-unittest.js", - "deploy-metabase": "node deploy-metabase.js", - "deploy-patroni": "node deploy-patroni.js", - "deploy-knp": "node deploy-knp.js", - "version": "echo \"node@$(node --version) ($(which node))\" && echo \"npm@$(npm --version) ($(which npm))\" && npm ls" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/bcgov/ocp-sso.git" - }, - "author": "", - "license": "Apache-2.0", - "dependencies": { - "@bcgov/gh-deploy": "^1.1.4", - "@bcgov/pipeline-cli": "^1.0.1", - "axios": "^0.21.1", - "lodash": "^4.17.21" - } -} diff --git a/README.md b/README.md index 7b1a67cf..302c71c9 100644 --- a/README.md +++ b/README.md @@ -1,38 +1,30 @@ # CTHUB The Clean Transportation Data Hub provides an evidence base for the Clean Transportation Branch through data storage, analysis and visualization, that improves decision making to increase energy efficiency and de-carbonise the transportation system. It aims to be the most comprehensive, reliable and accessible data management system for clean transportation in the world. -# Docker Instructions -- Make sure Docker is installed and running +# Podman Instructions +- Make sure Podman Desktop (https://podman-desktop.io/) is installed and running - In your terminal, go to your project folder and execute the following: - - ```docker-compose up``` + - ```podman compose up``` -## Useful Docker Commands +## Useful Podman Commands - To access postgres: - Go to your project folder in your terminal - Execute the following: - - ```docker-compose exec db psql -U postgres``` + - ```podman compose exec db psql -U postgres``` - Some notes about the structure of the command - - docker-compose exec - this is your standard command to execute something within the context of the docker-compose yml file + - podman compose exec - this is your standard command to execute something within the context of the docker-compose yml file - db - this is the service you want to execute your command in - psql -U postgres - execute psql with a the default user of postgres - To access the backend: (to do migrations and other stuff with the backend) - Go to your project folder in your terminal - Execute the following: - - ```docker-compose exec api bash``` + - ```podman compose exec api bash``` - Here you can do your standard django stuff like: - ```python manage.py makemigrations``` - ```python manage.py migrate``` -- To access the frontend: (to install/update a package, etc) - - Go to your project folder in your terminal - - Execute the following: - - ```docker-compose exec web bash``` - - This is where you can make changes to your package.json - - You can technically make changes to your packages without going into your container, but you'll need npm installed into your system - - - To run in testing mode if you don't have docker-compose-local-dev.yml locally, create a new file and add the contents from docker-compose plus a line for: @@ -40,7 +32,7 @@ The Clean Transportation Data Hub provides an evidence base for the Clean Transp in api environment to run using this docker file: - docker-compose -f docker-compose-local-dev.yml up + podman compose -f docker-compose-local-dev.yml up this ensures that the authentication skips the actual keycloak authentication and uses the user table to get permissions diff --git a/bcgovpubcode.yml b/bcgovpubcode.yml new file mode 100644 index 00000000..198fce08 --- /dev/null +++ b/bcgovpubcode.yml @@ -0,0 +1,45 @@ +--- +data_management_roles: + data_custodian: James Donald + product_owner: Daniel Clancy +product_external_dependencies: + identity_authorization: + - IDIR + notification_standard: [] +product_information: + api_specifications: [] + ministry: + - Energy, Mines and Low Carbon Innovation + product_acronym: CTHUB + product_description: >- + It provides an evidence base for the Clean Transportation Branch through + data storage, analysis and visualization, that improves decision making to + increase energy efficiency and de-carbonise the transportation system. + product_name: Clean Transportation Data Hub + product_urls: [] + program_area: Clean Transporation Branch +product_technology_information: + backend_frameworks: + - name: Django + version: 3.2.25 + - name: Spring Boot + version: 3.2.1 + backend_languages_version: + - name: Python + version: 3.9.1 + ci_cd_tools: + - GitHub-Actions + data_storage_platforms: + - Postgresql + frontend_frameworks: + - name: React + version: 18.2.0 + - name: Node.js + version: 16.13 + frontend_languages: + - name: JavaScript + version: current + hosting_platforms: + - Private-Cloud-Openshift + other_tools: MinIO (current) +version: 1 diff --git a/charts/cthub-spilo/.helmignore b/charts/cthub-spilo/.helmignore deleted file mode 100644 index 0e8a0eb3..00000000 --- a/charts/cthub-spilo/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/charts/cthub-spilo/Chart.lock b/charts/cthub-spilo/Chart.lock deleted file mode 100644 index 97c1cd25..00000000 --- a/charts/cthub-spilo/Chart.lock +++ /dev/null @@ -1,6 +0,0 @@ -dependencies: -- name: spilo - repository: file://../spilo - version: 0.2.0 -digest: sha256:7511538016e34905d07c80deed4ac95eadf6e208c2f1e7036eebfcef7b296897 -generated: "2023-01-20T11:30:53.758009-08:00" diff --git a/charts/cthub-spilo/Chart.yaml b/charts/cthub-spilo/Chart.yaml deleted file mode 100644 index a7329f9a..00000000 --- a/charts/cthub-spilo/Chart.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: v2 -name: cthub-spilo -description: A Helm chart for setting up splio for tfrs project on Openshift - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.0.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "0.2.0" - -#repository: "https://bcgov.github.io/spilo-chart" - -dependencies: - - name: spilo - version: "0.2.0" - repository: "file://../spilo" \ No newline at end of file diff --git a/charts/cthub-spilo/Readme.md b/charts/cthub-spilo/Readme.md deleted file mode 100644 index 71d31279..00000000 --- a/charts/cthub-spilo/Readme.md +++ /dev/null @@ -1,82 +0,0 @@ -## Before running Helm -* Create secret cthub-patroni-admin - * Create the secret by using cthub/openshift/templates/spilo/cthub-patroni-admin.yaml, the three passwords are generated randomly - -* Create secret cthub-patroni-app - * Create the secret by using cthub/openshift-v4/templates/spilo/cthub-patroni-app.yaml, the three password fields must be in sync with the existing secret patroni-dev - * It contains: app-db-name, app-db-password, app-db-username, metabaseuser-name, metabaseuser-password - * The replication- and superuser- are not needed - * If this secret is aleady existed, please verify the password fields - -* Create Object Storage secret for database continuous backup, cthub-object-storage - * Create the secret by using cthub/openshift-v4/templates/object-storage/object-storage-secret.yaml - * The secret should have been created, verify it by using CyberDuck - -* Create secret cthub-db-backup-s3 - * It includes AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_ENDPOINT - * The values are in sync with secret cthub-object-storage - -* Verify values-dev.yaml. Create the bucket on object storage if needed - -* Add new KNPs templates/knp/knp-env-pr-new-cthub-spilo.yaml - * oc process -f ./knp-env-pr-new-cthub-spilo.yaml ENVIRONMENT=test | oc apply -f - -n 30b186-dev - -## Heml command -helm install -n 30b186-dev -f ./values-dev.yaml cthub-spilo . -helm uninstall -n 30b186-dev cthub-spilo - -## Migrate Postgresql 10 on Patroni to 14 on Spilo container - -### Bring down the cthub application and route the frontend to maintenance mode - -### Run a final backup on backup container - -### Create cthub database user and database -* Login to the cthub-spilo leader pod -* If the username contains upper case letters, should be double quoted - * create user for cthub database, the username should be the same on v10 otherwise the restore may encounter issue - * create user [username] with password '[password]' - * The password can be found in secret cthub-patroni-app - * create cthub database - * create database cthub owner [username] ENCODING 'UTF8' LC_COLLATE = 'en_US.UTF-8' LC_CTYPE = 'en_US.UTF-8' -### Reset postgresql logging -* login cthub-spilo leader pod, run the following psql to only keep 24 hours log files, otherwise they take too much space - ALTER SYSTEM SET log_filename='postgresql-%H.log'; - ALTER SYSTEM SET log_connections='off'; - ALTER SYSTEM SET log_disconnections='off'; - ALTER SYSTEM SET log_checkpoints='off'; - select pg_reload_conf(); -### Create metabase user -* login cthub-spilo leader pod - CREATE USER metabaseuser WITH PASSWORD 'xxxxxx'; - GRANT CONNECT ON DATABASE cthub TO metabaseuser; - GRANT USAGE ON SCHEMA public TO metabaseuser; - GRANT SELECT ON ALL TABLES IN SCHEMA public TO metabaseuser; - ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO metabaseuser; - verify permissions are granted: select * from information_schema.role_table_grants where grantee='metabaseuser'; - -## Backup the existing v10 database and restore to v14 cluster -* Make sure the application is stopped -* Login to patroni-dev leader pod - * make an empty dir /home/postgres/migration and cd into it - * backup cthub database: pg_dump cthub > cthub.sql -* Restore cthub database - * psql cthub < ./cthub.sql >> ./restore.log 2>&1 - * verify the restore.log when complete - -* Point the applications to v14 cluster, update the enviuronment variables for - * backend: DATABASE_SERVICE_NAME, POSTGRESQL_SERVICE_HOST - * celery: DATABASE_SERVICE_NAME - * scan-handler: DATABASE_SERVICE_NAME -* Bring down the v10 cluster -* Bring down the maintenance page -* Bring up the cthub appliation -* Update patroni backup to only backup minio data -* Update metabase connection from CTHUB -* Update dbServiceName to be cthub-spilo in .pipeline/lib/config.js - -## Notes for uninstalling cthub-spilo when needed -* After the helm uninstall command, remember to remove the followings: - * The two configmaps: cthub-spilo-config, cthub-spilo-leader - * The PVCs storage-volume-cthub-spilo-* - * The backup bucket in object storage diff --git a/charts/cthub-spilo/charts/spilo-0.2.0.tgz b/charts/cthub-spilo/charts/spilo-0.2.0.tgz deleted file mode 100644 index a275b1cc..00000000 Binary files a/charts/cthub-spilo/charts/spilo-0.2.0.tgz and /dev/null differ diff --git a/charts/cthub-spilo/templates/_helpers.tpl b/charts/cthub-spilo/templates/_helpers.tpl deleted file mode 100644 index 0107d298..00000000 --- a/charts/cthub-spilo/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "cthub-spilo.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "cthub-spilo.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "cthub-spilo.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "cthub-spilo.labels" -}} -helm.sh/chart: {{ include "cthub-spilo.chart" . }} -{{ include "cthub-spilo.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "cthub-spilo.selectorLabels" -}} -app.kubernetes.io/name: {{ include "cthub-spilo.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "cthub-spilo.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "cthub-spilo.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/charts/cthub-spilo/values-dev.yaml b/charts/cthub-spilo/values-dev.yaml deleted file mode 100644 index 57b20db8..00000000 --- a/charts/cthub-spilo/values-dev.yaml +++ /dev/null @@ -1,49 +0,0 @@ -spilo: - - replicaCount: 2 - - credentials: - useExistingSecret: true - existingSecret: - name: cthub-patroni-admin - superuserKey: password-superuser - adminKey: password-admin - standbyKey: password-standby - - continuousArchive: - enabled: false - scheduleCronJob: "0 */3 * * *" - retainBackups: 3 - storage: s3 - s3: - bucket: cthubdv - secretName: cthub-db-backup-s3 - - shipLogs: - enabled: false -# s3: -# bucket: s3://cthubdv -# shipSchedule: 0 7 * * * - - persistentVolume: - size: 1Gi - storageClass: netapp-block-standard - - resources: - limits: - cpu: 120m - memory: 400Mi - requests: - cpu: 60m - memory: 200Mi - - podDisruptionBudget: - enabled: false - - probes: - liveness: - enabled: false - readiness: - enabled: true - initialDelaySeconds: 60 - failureThreshold: 20 \ No newline at end of file diff --git a/charts/cthub-spilo/values-prod.yaml b/charts/cthub-spilo/values-prod.yaml deleted file mode 100644 index 3e822b66..00000000 --- a/charts/cthub-spilo/values-prod.yaml +++ /dev/null @@ -1,50 +0,0 @@ -spilo: - - replicaCount: 3 - - credentials: - useExistingSecret: true - existingSecret: - name: itvr-patroni-admin - superuserKey: password-superuser - adminKey: password-admin - standbyKey: password-standby - - continuousArchive: - enabled: true - scheduleCronJob: "0 */3 * * *" - retainBackups: 3 - storage: s3 - s3: - bucket: itvrpr - secretName: itvr-db-backup-s3 - - shipLogs: - enabled: false -# s3: -# bucket: s3://itvrpr -# shipSchedule: 0 7 * * * - - persistentVolume: - size: 5Gi - storageClass: netapp-block-standard - - resources: - limits: - cpu: 120m - memory: 400Mi - requests: - cpu: 60m - memory: 200Mi - - podDisruptionBudget: - enabled: false - - probes: - liveness: - enabled: false - readiness: - enabled: true - initialDelaySeconds: 60 - failureThreshold: 20 - diff --git a/charts/cthub-spilo/values-test.yaml b/charts/cthub-spilo/values-test.yaml deleted file mode 100644 index 19062815..00000000 --- a/charts/cthub-spilo/values-test.yaml +++ /dev/null @@ -1,50 +0,0 @@ -spilo: - - replicaCount: 2 - - credentials: - useExistingSecret: true - existingSecret: - name: tfrs-patroni-admin - superuserKey: password-superuser - adminKey: password-admin - standbyKey: password-standby - - continuousArchive: - enabled: true - scheduleCronJob: "0 */3 * * *" - retainBackups: 3 - storage: s3 - s3: - bucket: tfrsts/postgresbackup - secretName: tfrs-db-backup-s3 - - shipLogs: - enabled: false -# s3: -# bucket: s3://tfrsts -# shipSchedule: 0 7 * * * - - persistentVolume: - size: 2Gi - storageClass: netapp-block-standard - - resources: - limits: - cpu: 120m - memory: 400Mi - requests: - cpu: 60m - memory: 200Mi - - podDisruptionBudget: - enabled: false - - probes: - liveness: - enabled: false - readiness: - enabled: true - initialDelaySeconds: 60 - failureThreshold: 20 - diff --git a/charts/spilo/Chart.yaml b/charts/spilo/Chart.yaml deleted file mode 100644 index c9510193..00000000 --- a/charts/spilo/Chart.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -name: spilo -description: "Highly available elephant herd: HA PostgreSQL cluster." -version: 0.2.0 -appVersion: 2.1-p4 -home: https://github.com/bcgov/spilo-chart -sources: - - https://github.com/zalando/spilo - - https://github.com/bcgov/spilo-chart diff --git a/charts/spilo/docs/restore.md b/charts/spilo/docs/restore.md deleted file mode 100644 index 4f806707..00000000 --- a/charts/spilo/docs/restore.md +++ /dev/null @@ -1,87 +0,0 @@ -# Restore - -The process of rescovering the spilo postgresql cluster from the continuous archived files store on S3 storage - -## Process - -Make sure there are no connections to the database cluster. - -### Stop Patroni failover and shutdown Postgresql database - -1. On the leader pod, disable the failover -``` -$ patronictl pause [patroni cluster name] -``` -2. On the the secondary pods, stop the postgresql databse -``` -$ pg_ctl stop -``` - -3. On leader pod, stop the postgresql databse by running the same command as above - -4. Make sure all pods are stopped and maintenance mode is on -``` -$ patronictl list -``` - -### Retrieve the latest base backup - -Retrieve the latest base backup and all archieved WAL files. - -1. On the leader pod, fetch the latest backup from S3 Object Storage -``` -$ envdir /run/etc/wal-e.d/env wal-g backup-fetch /home/postgres/pgdata/pgroot/latest-backup LATEST -It copies the latest base backup base_0000..0037 according to the sample below. -``` -![archieved files on S3 storage](./s3.png) - -2. Rename data folder to data-ori and rename latest-backup to data -``` -$ cd /home/postgres/pgdata/pgroot -$ mv data data-ori -$ mv latest-backup data -``` - -3. Download, unzip and copy the files under wal_005 folder to /home/postgres/pgdata/pgroot/allWALs - -4. Copy the files didn't had a chance to be archieved -Compare the files under /home/postgres/pgdata/pgroot/data-ori/pg_wal and the filder under wal_005 folder and copy the additional files to /home/postgres/pgdata/pgroot/allWALs if there are any -``` -According to the sample, the final files in allWALs folder should be -000000010000000000000037.00000028.backup -- from S3 storage -000000010000000000000037 -- from S3 storage -000000010000000000000038 -- from S3 storage -000000010000000000000039 -- from S3 storage -000000010000000000000040 -- from data-ori/pg_wal -``` - -### Restore - -1. Create recovery.signal file -``` -$ touch /home/postgres/pgdata/pgroot/data/recovery.signal -``` - -2. Add the recovery command to postgresql.conf -``` -restore_command = 'cp /home/postgres/pgdata/pgroot/allWALs/%f "%p"' -``` - -3. Start the postgresql on the leader pod -``` -$ pg_ctl start -The file recovery.signal will be removed -``` - -4. Start the postgresql on the secondary pods - -5. Run patronistl list, the maintenance mond is still on - -6. Resume the patroni cluster on the leader pod -``` -$ patronictl resume -``` -Now the patroni cluster is fully restored, login to the database to verify the laest changes -The restore_command=.. in postgresql.conf is removed automatically. - - diff --git a/charts/spilo/docs/s3.png b/charts/spilo/docs/s3.png deleted file mode 100644 index 97d451be..00000000 Binary files a/charts/spilo/docs/s3.png and /dev/null differ diff --git a/charts/spilo/templates/_helpers.tpl b/charts/spilo/templates/_helpers.tpl deleted file mode 100644 index 5a976e89..00000000 --- a/charts/spilo/templates/_helpers.tpl +++ /dev/null @@ -1,63 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "spilo.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "spilo.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "spilo.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create the name of the service account to use. -*/}} -{{- define "spilo.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "spilo.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "spilo.labels" -}} -helm.sh/chart: {{ include "spilo.chart" . }} -{{ include "spilo.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "spilo.selectorLabels" -}} -app.kubernetes.io/name: {{ include "spilo.name" . }} -app.kubernetes.io/instance: {{ include "spilo.fullname" . }} -{{- end }} diff --git a/charts/spilo/templates/archive-pvc.yaml b/charts/spilo/templates/archive-pvc.yaml deleted file mode 100644 index 157c74a5..00000000 --- a/charts/spilo/templates/archive-pvc.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if and (.Values.continuousArchive.enabled) (eq .Values.continuousArchive.storage "pvc") }} -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: {{ template "spilo.fullname" . }}-archive - labels: {{ include "spilo.labels" . | nindent 4 }} -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: {{ .Values.continuousArchive.pvc.size }} - storageClassName: {{ .Values.continuousArchive.pvc.storageClass }} -{{- end }} diff --git a/charts/spilo/templates/networkpolicy.yaml b/charts/spilo/templates/networkpolicy.yaml deleted file mode 100644 index c702b058..00000000 --- a/charts/spilo/templates/networkpolicy.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.networkPolicy.enabled }} -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: {{ template "spilo.fullname" . }}-intra-release - labels: {{ include "spilo.labels" . | nindent 4 }} -spec: - podSelector: - matchLabels: {{ include "spilo.selectorLabels" . | nindent 6 }} - ingress: - - from: - - podSelector: - matchLabels: {{ include "spilo.selectorLabels" . | nindent 10 }} -{{- end }} diff --git a/charts/spilo/templates/poddisruptionbudget.yaml b/charts/spilo/templates/poddisruptionbudget.yaml deleted file mode 100644 index 4ad65f87..00000000 --- a/charts/spilo/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.podDisruptionBudget.enabled }} -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: {{ template "spilo.fullname" . }}-pdb - labels: {{ include "spilo.labels" . | nindent 4 }} -spec: - minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} - maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} - selector: - matchLabels: {{ include "spilo.selectorLabels" . | nindent 6 }} -{{- end }} diff --git a/charts/spilo/templates/role.yaml b/charts/spilo/templates/role.yaml deleted file mode 100644 index bfb3aff2..00000000 --- a/charts/spilo/templates/role.yaml +++ /dev/null @@ -1,44 +0,0 @@ -{{- if .Values.rbac.create }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ template "spilo.fullname" . }} - labels: {{ include "spilo.labels" . | nindent 4 }} -rules: -- apiGroups: [""] - resources: ["configmaps"] - verbs: - - create - - get - - list - - patch - - update - - watch - # delete is required only for 'patronictl remove' - - delete -- apiGroups: [""] - resources: ["services"] - verbs: - - create -- apiGroups: [""] - resources: ["endpoints"] - verbs: - - create - - get - - patch - - update - # the following three privileges are necessary only when using endpoints - - list - - watch - # delete is required only for for 'patronictl remove' - - delete - - deletecollection -- apiGroups: [""] - resources: ["pods"] - verbs: - - get - - list - - patch - - update - - watch -{{- end }} diff --git a/charts/spilo/templates/rolebinding.yaml b/charts/spilo/templates/rolebinding.yaml deleted file mode 100644 index 9b4337e1..00000000 --- a/charts/spilo/templates/rolebinding.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.rbac.create }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ template "spilo.fullname" . }} - labels: {{ include "spilo.labels" . | nindent 4 }} -subjects: - - kind: ServiceAccount - name: {{ template "spilo.serviceAccountName" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ template "spilo.fullname" . }} -{{- end }} diff --git a/charts/spilo/templates/secret.yaml b/charts/spilo/templates/secret.yaml deleted file mode 100644 index 6f1dffd7..00000000 --- a/charts/spilo/templates/secret.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{{- if not .Values.credentials.useExistingSecret }} -{{- $superuserPassword := (randAlphaNum 32) | b64enc | quote }} -{{- $adminPassword := (randAlphaNum 32) | b64enc | quote }} -{{- $standbyPassword := (randAlphaNum 32) | b64enc | quote }} -{{- $secretName := print (include "spilo.fullname" .) }} - -{{- $secret := (lookup "v1" "Secret" .Release.Namespace $secretName ) }} -{{- if $secret }} -{{- $superuserPassword = index $secret.data "password-superuser" }} -{{- $adminPassword = index $secret.data "password-admin" }} -{{- $standbyPassword = index $secret.data "password-standby" }} -{{- end -}} - -apiVersion: v1 -kind: Secret -metadata: - name: {{ template "spilo.fullname" . }} - labels: {{ include "spilo.labels" . | nindent 4 }} -type: Opaque -data: - {{- if .Values.credentials.random }} - password-superuser: {{ $superuserPassword }} - password-admin: {{ $adminPassword }} - password-standby: {{ $standbyPassword }} - {{- else }} - password-superuser: {{ .Values.credentials.superuser | b64enc }} - password-admin: {{ .Values.credentials.admin | b64enc }} - password-standby: {{ .Values.credentials.standby | b64enc }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/spilo/templates/serviceaccount.yaml b/charts/spilo/templates/serviceaccount.yaml deleted file mode 100644 index 77d4bf54..00000000 --- a/charts/spilo/templates/serviceaccount.yaml +++ /dev/null @@ -1,7 +0,0 @@ -{{- if .Values.serviceAccount.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "spilo.serviceAccountName" . }} - labels: {{ include "spilo.labels" . | nindent 4 }} -{{- end }} diff --git a/charts/spilo/templates/services.yaml b/charts/spilo/templates/services.yaml deleted file mode 100644 index 717fd30d..00000000 --- a/charts/spilo/templates/services.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "spilo.fullname" . }} - labels: {{ include "spilo.labels" . | nindent 4 }} -spec: - type: ClusterIP - ports: - - name: postgresql - port: 5432 - targetPort: postgresql - protocol: TCP - selector: {{ include "spilo.selectorLabels" . | nindent 4 }} - spilo-role: master ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "spilo.fullname" . }}-readonly - labels: {{ include "spilo.labels" . | nindent 4 }} -spec: - type: ClusterIP - ports: - - name: postgresql - port: 5432 - targetPort: postgresql - protocol: TCP - selector: {{ include "spilo.selectorLabels" . | nindent 4 }} - spilo-role: replica diff --git a/charts/spilo/templates/statefulset.yaml b/charts/spilo/templates/statefulset.yaml deleted file mode 100644 index 73f94aea..00000000 --- a/charts/spilo/templates/statefulset.yaml +++ /dev/null @@ -1,251 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "spilo.fullname" . }} - labels: {{ include "spilo.labels" . | nindent 4 }} -spec: - serviceName: {{ template "spilo.fullname" . }} - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: {{ include "spilo.selectorLabels" . | nindent 6 }} - template: - metadata: - name: {{ template "spilo.fullname" . }} - labels: {{ include "spilo.labels" . | nindent 8 }} - spec: - serviceAccountName: {{ template "spilo.serviceAccountName" . }} - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} - containers: - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: PGPASSWORD_SUPERUSER - valueFrom: - secretKeyRef: - {{- if .Values.credentials.useExistingSecret }} - name: {{ .Values.credentials.existingSecret.name }} - key: {{ .Values.credentials.existingSecret.superuserKey }} - {{- else }} - name: {{ template "spilo.fullname" . }} - key: password-superuser - {{- end }} - - name: PGPASSWORD_ADMIN - valueFrom: - secretKeyRef: - {{- if .Values.credentials.useExistingSecret }} - name: {{ .Values.credentials.existingSecret.name }} - key: {{ .Values.credentials.existingSecret.adminKey }} - {{- else }} - name: {{ template "spilo.fullname" . }} - key: password-admin - {{- end }} - - name: PGPASSWORD_STANDBY - valueFrom: - secretKeyRef: - {{- if .Values.credentials.useExistingSecret }} - name: {{ .Values.credentials.existingSecret.name }} - key: {{ .Values.credentials.existingSecret.standbyKey }} - {{- else }} - name: {{ template "spilo.fullname" . }} - key: password-standby - {{- end }} - - name: DCS_ENABLE_KUBERNETES_API - value: "true" - - name: KUBERNETES_LABELS - value: {{ (printf "{ \"app.kubernetes.io/instance\": \"%s\"}" (include "spilo.fullname" .)) | quote }} - - name: KUBERNETES_SCOPE_LABEL - value: "app.kubernetes.io/instance" - - name: KUBERNETES_USE_CONFIGMAPS - value: "true" - - name: SCOPE - value: {{ template "spilo.fullname" . }} - - name: PGVERSION - value: {{ .Values.postgresMajorVersion | quote }} - {{- if .Values.continuousArchive.enabled }} - - name: USE_WALG - value: 'true' - - name: USE_WALG_BACKUP - value: 'true' - - name: USE_WALG_RESTORE - value: 'true' - {{- if .Values.continuousArchive.scheduleCronJob }} - - name: BACKUP_SCHEDULE - value: {{ .Values.continuousArchive.scheduleCronJob | quote}} - {{- end }} - {{- if .Values.continuousArchive.retainBackups }} - - name: BACKUP_NUM_TO_RETAIN - value: {{ .Values.continuousArchive.retainBackups | quote}} - {{- end }} - {{- if eq .Values.continuousArchive.storage "s3"}} - - name: WAL_S3_BUCKET - value: {{ .Values.continuousArchive.s3.bucket | quote }} - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: {{ .Values.continuousArchive.s3.secretName }} - key: AWS_ACCESS_KEY_ID - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: {{ .Values.continuousArchive.s3.secretName }} - key: AWS_SECRET_ACCESS_KEY - - name: AWS_ENDPOINT - valueFrom: - secretKeyRef: - name: {{ .Values.continuousArchive.s3.secretName }} - key: AWS_ENDPOINT - {{- end }} - {{- if eq .Values.continuousArchive.storage "gcs"}} - - name: WAL_GS_BUCKET - value: {{ .Values.continuousArchive.gcs.bucket | quote }} - - name: WALE_GS_PREFIX - value: gs://{{ .Values.continuousArchive.gcs.bucket }}/{{ .Values.continuousArchive.gcs.bucketBackupLocation }} - - name: GOOGLE_APPLICATION_CREDENTIALS - value: "/etc/credentials/credentials.json" - {{- end }} - {{- if eq .Values.continuousArchive.storage "pvc"}} - - name: WALG_FILE_PREFIX - value: /home/postgres/archive - {{- end }} - {{- else }} - - name: USE_WALE - value: "" - {{- end }} - {{- if .Values.shipLogs.enabled }} - - name: LOG_S3_BUCKET - value: {{ .Values.shipLogs.s3.bucket | quote }} - - name: LOG_SHIP_SCHEDULE - value: {{ .Values.shipLogs.s3.shipSchedule | quote }} - {{- end }} - - name: PGROOT - value: "{{ .Values.persistentVolume.mountPath }}/pgroot" - - name: POD_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: ALLOW_NOSSL - value: "true" - {{- if .Values.env }} - {{- range $key, $val := .Values.env }} - - name: {{ $key | quote | upper }} - value: {{ $val | quote }} - {{- end }} - {{- end }} - lifecycle: - preStop: - exec: - command: - - /usr/bin/env - - bash - - -c - - | - # switch leader pod if the current pod is the leader - if curl --fail http://localhost:8008/read-write; then - patronictl switchover --force - fi - ports: - - containerPort: 8008 - - containerPort: 5432 - name: postgresql - volumeMounts: - - name: storage-volume - mountPath: "{{ .Values.persistentVolume.mountPath }}" - subPath: "{{ .Values.persistentVolume.subPath }}" - - mountPath: /etc/spilo - name: spilo-config - readOnly: true - {{- if and (.Values.continuousArchive.enabled) (eq .Values.continuousArchive.storage "pvc") }} - - mountPath: /home/postgres/archive - name: archive-volume - {{- end }} - {{- if .Values.continuousArchive.enabled }} - {{- if .Values.continuousArchive.gcs.kubernetesSecret }} - - name: archive-gcs-credentials - mountPath: /etc/credentials - readOnly: true - {{- end }} - {{- end }} - resources: -{{ toYaml .Values.resources | indent 10 }} - {{- if .Values.probes.liveness.enabled }} - livenessProbe: - httpGet: - scheme: HTTP - path: /liveness - port: 8008 - initialDelaySeconds: 3 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 3 - {{- end }} - {{- if .Values.probes.readiness.enabled }} - readinessProbe: - httpGet: - scheme: HTTP - path: /readiness - port: 8008 - initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }} - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: {{ .Values.probes.readiness.failureThreshold }} - {{- end }} - affinity: - # Make sure we don't schedule multiple pods on the same node - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - topologyKey: "kubernetes.io/hostname" - labelSelector: - matchLabels: {{ include "spilo.selectorLabels" . | nindent 26 }} - volumes: - - name: spilo-config - secret: - {{- if .Values.credentials.useExistingSecret }} - secretName: {{ .Values.credentials.existingSecret.name }} - {{- else }} - secretName: {{ template "spilo.fullname" . }} - {{- end }} - {{- if .Values.continuousArchive.enabled }} - {{- if .Values.continuousArchive.gcs.kubernetesSecret }} - - name: archive-gcs-credentials - secret: - secretName: {{ .Values.continuousArchive.gcs.kubernetesSecret }} - {{- end }} - {{- end }} - {{- if not .Values.persistentVolume.enabled }} - - name: storage-volume - emptyDir: {} - {{- end }} - {{- if and (.Values.continuousArchive.enabled) (eq .Values.continuousArchive.storage "pvc") }} - - name: archive-volume - persistentVolumeClaim: - claimName: {{ template "spilo.fullname" . }}-archive - {{- end }} - {{- if .Values.persistentVolume.enabled }} - volumeClaimTemplates: - - metadata: - name: storage-volume - annotations: - {{- if .Values.persistentVolume.annotations }} -{{ toYaml .Values.persistentVolume.annotations | indent 8 }} - {{- end }} - labels: {{ include "spilo.labels" . | nindent 10 }} - spec: - accessModes: -{{ toYaml .Values.persistentVolume.accessModes | indent 8 }} - resources: - requests: - storage: "{{ .Values.persistentVolume.size }}" - {{- if .Values.persistentVolume.storageClass }} - {{- if (eq "-" .Values.persistentVolume.storageClass) }} - storageClassName: "" - {{- else }} - storageClassName: "{{ .Values.persistentVolume.storageClass }}" - {{- end }} - {{- end }} - {{- end }} diff --git a/charts/spilo/values.yaml b/charts/spilo/values.yaml deleted file mode 100644 index 0bc52c01..00000000 --- a/charts/spilo/values.yaml +++ /dev/null @@ -1,112 +0,0 @@ -replicaCount: 3 - -image: - # Image was built from - # https://github.com/zalando/spilo - repository: registry.opensource.zalan.do/acid/spilo-14 - # defaults to the chart appVersion - tag: ~ - pullPolicy: IfNotPresent - -# Credentials used by Patroni -# https://github.com/zalando/patroni/blob/master/docs/SETTINGS.rst#postgresql -# https://github.com/zalando/spilo/blob/master/ENVIRONMENT.rst -credentials: - useExistingSecret: false - existingSecret: - # Name of the existing secret containing the credentials - name: ~ - # Key of the existing secret containing the superuser (postgres) password - superuserKey: ~ - # Key of the existing secret containing the admin password - adminKey: ~ - # Key of the existing secret containing the standby user password - standbyKey: ~ - - random: true - # The values below allow you to specify passwords for the superuser (postgres), admin, and standby users. - superuser: tea - admin: cola - standby: pinacolada - -# Extra custom environment variables. -# See https://github.com/zalando/spilo/blob/master/ENVIRONMENT.rst -env: {} - -continuousArchive: - # Specifies whether continuous archiving with wal-g should be enabled - enabled: true - # Cron schedule for doing base backups - scheduleCronJob: 00 01 * * * - # Amount of base backups to retain - retainBackups: 15 - - # The storage solution used by wal-g: pvc (default), s3, or gcs are supported by this helm chart - # https://github.com/wal-g/wal-g/blob/master/docs/STORAGES.md - storage: pvc - pvc: - storageClass: netapp-file-backup - size: 500Mi - - # If choose s3 as the storage type, the secret has to be created first. - # It contains three key/value pairs: - # AWS_ACCESS_KEY_ID - # AWS_SECRET_ACCESS_KEY - # AWS_ENDPOINT - s3: - bucket: ~ - secretName: ~ - - gcs: - bucket: ~ - # The GCS bucket folder where the logs will be updated - backupLocation: wal - # Name of the secret that holds the credentials to the bucket - credentialSecretName: ~ - -persistentVolume: - enabled: true - size: 250Mi - storageClass: "netapp-block-standard" - subPath: "" - mountPath: "/home/postgres/pgdata" - annotations: {} - accessModes: - - ReadWriteOnce - -resources: - {} - # If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -rbac: - create: true - -serviceAccount: - name: ~ - -networkPolicy: - enabled: true - -podDisruptionBudget: - enabled: true - minAvailable: ~ - maxUnavailable: 1 - -postgresMajorVersion: 14 - -probes: - liveness: - enabled: false - readiness: - enabled: true - initialDelaySeconds: 60 - failureThreshold: 10 - -terminationGracePeriodSeconds: 30 diff --git a/django/Dockerfile-Openshift b/django/Dockerfile-Openshift new file mode 100644 index 00000000..854d2032 --- /dev/null +++ b/django/Dockerfile-Openshift @@ -0,0 +1,30 @@ +# Use an official Python runtime as a parent image +FROM artifacts.developer.gov.bc.ca/docker-remote/python:3.9-slim + +# Set environment variables +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 + +# Set the working directory in the container +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + build-essential \ + libpq-dev \ + && rm -rf /var/lib/apt/lists/* + +# Install Python dependencies +COPY . /app/ + +RUN pwd && \ + ls -l + +RUN pip install --upgrade pip && \ + pip install --no-cache-dir -r requirements.txt + +# Expose the port the app runs on +EXPOSE 8080 + +# Start the Django server +CMD ["gunicorn", "wsgi", "--config", "gunicorn.cfg.py"] diff --git a/django/api/constants/constants.py b/django/api/constants/constants.py index ffe20391..857f224a 100644 --- a/django/api/constants/constants.py +++ b/django/api/constants/constants.py @@ -12,10 +12,12 @@ from api.models.scrap_it import ScrapIt from api.models.go_electric_rebates import GoElectricRebates from api.models.cvp_data import CVPData +from api.models.ldv_data import LdvData from api.services.spreadsheet_uploader_prep import ( prepare_arc_project_tracking, prepare_hydrogen_fleets, prepare_hydrogen_fueling, + prepare_ldv_data, prepare_ldv_rebates, prepare_public_charging, prepare_scrap_it, @@ -30,7 +32,7 @@ format_postal_codes ) from api.services.resolvers import get_google_resolver -from api.constants.misc import GER_VALID_FIELD_VALUES, ARC_VALID_FIELD_VALUES, LOCALITY_FEATURES_MAP, CVP_DATA_VALID_FIELD_VALUES +from api.constants.misc import GER_VALID_FIELD_VALUES, ARC_VALID_FIELD_VALUES, LOCALITY_FEATURES_MAP, CVP_DATA_VALID_FIELD_VALUES, LDV_DATA_VALID_FIELD_VALUES from enum import Enum @@ -522,6 +524,67 @@ class CVPDataColumnMapping(Enum): notes = "Notes" imhzev = "iMHZEV" +from enum import Enum + +class LDVDataColumns(Enum): + APPLICANT_TYPE = "ApplicantType" + APPLICATION_ID = "ApplicationID" + DEALERSHIP_NAME = "DealershipName" + DATE_SUBMITTED = "DateSubmitted" + PAYMENT_DT = "PaymentDT" + BC_DRIVERS_LICENSE_NO = "BC_DriverLicenseNo" + BC_INC_NO = "BC_IncNo" + ELIGIBLE_REBATE_AMT = "EligibleRebateAmt" + SALE_TYPE = "SaleType" + VIN = "VIN" + YEAR = "Year" + MANUFACTURER = "Manufacturer" + MODEL = "Model" + TRIM = "Trim" + VEHICLE_TYPE = "VehicleType" + VEHICLE_CLASS = "Class" + MSRP = "MSRP" + ELECTRIC_RANGE = "ElectricRange" + VIN_TOKEN = "VIN_Token" + CITY = "City" + POSTAL_CODE = "PostalCode" + BUSINESS_CORP_NAME = "BusinessCorpName" + CAR_SHARE_NAME = "CarShareName" + NON_PROFIT_NAME = "NonProfitName" + MUNICIPALITY_NAME = "MunicipalityName" + DATE_OF_DELIVERY = "DateOfDelivery" + LEASE_TERM = "LeaseTerm" + + +class LDVDataColumnMapping(Enum): + applicant_type = "ApplicantType" + application_id = "ApplicationID" + dealership_name = "DealershipName" + date_submitted = "DateSubmitted" + payment_dt = "PaymentDT" + bc_drivers_license_no = "BC_DriverLicenseNo" + bc_inc_no = "BC_IncNo" + eligible_rebate_amt = "EligibleRebateAmt" + sale_type = "SaleType" + vin = "VIN" + year = "Year" + manufacturer = "Manufacturer" + model = "Model" + trim = "Trim" + vehicle_type = "VehicleType" + vehicle_class = "Class" + msrp = "MSRP" + electric_range = "ElectricRange" + vin_token = "VIN_Token" + city = "City" + postal_code = "PostalCode" + business_corp_name = "BusinessCorpName" + car_share_name = "CarShareName" + non_profit_name = "NonProfitName" + municipality_name = "MunicipalityName" + date_of_delivery = "DateOfDelivery" + lease_term = "LeaseTerm" + FIELD_TYPES = { "ARC Project Tracking": { @@ -753,7 +816,35 @@ class CVPDataColumnMapping(Enum): "notes": str, "imhzev": str, }, - + "LDV Data": { + "applicant_type": str, + "application_id": str, + "dealership_name": str, + "date_submitted": datetime.date, + "payment_dt": datetime.date, + "bc_drivers_license_no": str, + "bc_inc_no": str, + "eligible_rebate_amt": int, + "sale_type": str, + "vin": str, + "year": int, + "manufacturer": str, + "model": str, + "trim": str, + "vehicle_type": str, + "vehicle_class": str, + "msrp": int, + "electric_range": int, + "vin_token": str, + "city": str, + "postal_code": str, + "business_corp_name": str, + "car_share_name": str, + "non_profit_name": str, + "municipality_name": str, + "date_of_delivery": datetime.date, + "lease_term": str, + } } @@ -830,7 +921,7 @@ class CVPDataColumnMapping(Enum): {"function": location_checker, "columns": ["City"], "kwargs": {"columns_to_features_map": {"City": LOCALITY_FEATURES_MAP}, "indices_offset":2}}, {"function": email_validator, "columns": ["Email"], "kwargs": {"indices_offset":2, "get_resolver": get_google_resolver}}, {"function": validate_field_values, "columns": [], "kwargs": {"indices_offset":2, "fields_and_values": GER_VALID_FIELD_VALUES}}, - {"function": format_postal_codes, "columns": ["Postal code"], "kwargs": {"indices_offset":2, "validate": True}} + {"function": format_postal_codes, "columns": ["Postal code"], "kwargs": {"indices_offset":2, "validate": True, "allow_empty": True}} ] }, "CVP Data": { @@ -841,5 +932,16 @@ class CVPDataColumnMapping(Enum): "preparation_functions": [prepare_cvp_data], "validation_functions": [{"function": validate_field_values, "columns": [], "kwargs": {"indices_offset":2, "fields_and_values": CVP_DATA_VALID_FIELD_VALUES, "delimiter": ","}},] }, + "LDV Data": { + "model": LdvData, + "columns": LDVDataColumns, + "column_mapping": LDVDataColumnMapping, + "sheet_name": "SP-Complete sheet", + "preparation_functions": [prepare_ldv_data], + "validation_functions": [ + {"function": validate_field_values, "columns": [], "kwargs": {"indices_offset":2, "fields_and_values": LDV_DATA_VALID_FIELD_VALUES, "delimiter": ","}}, + {"function": format_postal_codes, "columns": ["PostalCode"], "kwargs": {"indices_offset":2, "validate": True}} + ] + }, } diff --git a/django/api/constants/decoder.py b/django/api/constants/decoder.py index 8915e498..695e2b1e 100644 --- a/django/api/constants/decoder.py +++ b/django/api/constants/decoder.py @@ -29,3 +29,72 @@ def get_service(service_name): if service.NAME.value == service_name: return service return None + + +class ICBC_FILE(Enum): + DELIMITER = "|" + NA_VALUES = ["NIL", "Unknown", "unknown", "UNKNOWN"] + DATA_TYPES = { + "model": str, + "vin_error_code": str, + "rate_class_group": str, + "hybrid_vehicle_flag": str, + "postal_code": str, + "make": str, + "change": str, + "vin": str, + "fuel_type": str, + "use_category": str, + "policy_type": str, + "fleet_flag": str, + "city": str, + "owner_giver_relationship": str, + "personal_or_commercial": str, + "electric_vehicle_flag": str, + "body_style": str, + "vin_error_code_description": str, + "vehicle_type": str, + } + COLUMNS_TO_DROP = [ + "ytd_policy_years_earned", + ] + NUMERIC_COLUMNS = [ + "rate_class", + "model_year", + "net_weight", + "motorcycle_displacement_size", + "fleet_number_of_vehicles", + "vehicle_registration_number", + "odometer_reading", + "licenced_gross_vehicle_weight", + "fleet_identifier", + ] + DATE_COLUMNS = [ + "snapshot_date", + "change_date", + "vehicle_purchase_date", + "vehicle_registration_date", + ] + MODIFICATION_MAP = { + "upper": [ + "vin", + "make", + "model", + "postal_code", + "electric_vehicle_flag", + "fleet_flag", + "hybrid_vehicle_flag", + ], + "lower": [ + "body_style", + "fuel_type", + "owner_giver_relationship", + "personal_or_commercial", + "policy_type", + "rate_class_group", + "vehicle_type", + "vin_error_code", + "vin_error_code_description", + ], + "title": ["city", "use_category"], + } diff --git a/django/api/constants/misc.py b/django/api/constants/misc.py index 5ede204a..27c20136 100644 --- a/django/api/constants/misc.py +++ b/django/api/constants/misc.py @@ -107,3 +107,12 @@ 'Procurement', 'New Design', 'Hybrid Retrofit', 'BEV Retrofit', 'H2 Retrofit' ] } + +LDV_DATA_VALID_FIELD_VALUES = { + 'ApplicantType': ['Individual', 'Business / Corporation', 'Municipality / School District', 'Car Share Fleet', 'Non Profit Organization'], + 'EligibleRebateAmt': ['1000', '2000', '1500', '500', '666', '1334', '4000', '3000', '334', '2668', '667', '5000', '2500', '1668', '6000', '833', '1665', '3335', '1001', '2001', '1667'], + 'SaleType': ['Purchase', 'Lease', 'lease_agreement', 'oem_order_form'], + 'VehicleType': ['BEV', 'ER-EV', 'FCEV', 'PHEV'], + 'Class': ['0', 'Car', 'Larger Vehicle'], + 'LeaseTerm': ['12 Mos', '24', '24 Mos', '36+ Mos'] +} diff --git a/django/api/migrations/0042_ldvdata.py b/django/api/migrations/0042_ldvdata.py new file mode 100644 index 00000000..54502717 --- /dev/null +++ b/django/api/migrations/0042_ldvdata.py @@ -0,0 +1,53 @@ +# Generated by Django 3.2.25 on 2025-03-17 18:11 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0041_add_datasets_and_file_requirements'), + ] + + operations = [ + migrations.CreateModel( + name='LdvData', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), + ('create_user', models.CharField(default='SYSTEM', max_length=130)), + ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), + ('update_user', models.CharField(max_length=130, null=True)), + ('applicant_type', models.CharField(max_length=100)), + ('application_id', models.CharField(max_length=100, unique=True)), + ('dealership_name', models.CharField(max_length=100)), + ('date_submitted', models.DateField()), + ('payment_dt', models.DateField()), + ('bc_drivers_license_no', models.CharField(max_length=100, unique=True)), + ('bc_inc_no', models.CharField(max_length=100, unique=True)), + ('eligible_rebate_amt', models.IntegerField()), + ('sale_type', models.CharField(blank=True, max_length=100, null=True)), + ('vin', models.CharField(max_length=17, unique=True)), + ('year', models.IntegerField()), + ('manufacturer', models.CharField(max_length=100)), + ('model', models.CharField(blank=True, max_length=100, null=True)), + ('trim', models.CharField(blank=True, max_length=100, null=True)), + ('vehicle_type', models.CharField(blank=True, max_length=50, null=True)), + ('vehicle_class', models.CharField(blank=True, max_length=50, null=True)), + ('msrp', models.IntegerField(blank=True, null=True)), + ('electric_range', models.IntegerField(blank=True, null=True)), + ('vin_token', models.CharField(blank=True, max_length=9, null=True)), + ('city', models.CharField(blank=True, max_length=100, null=True)), + ('postal_code', models.CharField(blank=True, max_length=10, null=True)), + ('business_corp_name', models.CharField(blank=True, max_length=200, null=True)), + ('car_share_name', models.CharField(blank=True, max_length=200, null=True)), + ('non_profit_name', models.CharField(blank=True, max_length=200, null=True)), + ('municipality_name', models.CharField(blank=True, max_length=200, null=True)), + ('date_of_delivery', models.DateField(blank=True, null=True)), + ('lease_term', models.CharField(blank=True, max_length=50, null=True)), + ], + options={ + 'db_table': 'ldv_data', + }, + ), + ] diff --git a/django/api/migrations/0043_alter_ldvdata_bc_drivers_license_no.py b/django/api/migrations/0043_alter_ldvdata_bc_drivers_license_no.py new file mode 100644 index 00000000..972f3e67 --- /dev/null +++ b/django/api/migrations/0043_alter_ldvdata_bc_drivers_license_no.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.25 on 2025-03-18 18:32 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0042_ldvdata'), + ] + + operations = [ + migrations.AlterField( + model_name='ldvdata', + name='bc_drivers_license_no', + field=models.CharField(max_length=100), + ), + ] diff --git a/django/api/migrations/0044_auto_20250318_1834.py b/django/api/migrations/0044_auto_20250318_1834.py new file mode 100644 index 00000000..3dd3f616 --- /dev/null +++ b/django/api/migrations/0044_auto_20250318_1834.py @@ -0,0 +1,25 @@ +# Generated by Django 3.2.25 on 2025-03-18 18:34 + +from django.db import migrations, models + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0043_alter_ldvdata_bc_drivers_license_no'), + ] + + operations = [ + migrations.AlterField( + model_name='ldvdata', + name='bc_inc_no', + field=models.CharField(max_length=100), + ), + migrations.AlterField( + model_name='ldvdata', + name='vin', + field=models.CharField(max_length=17), + ), + migrations.RunSQL( + "INSERT INTO datasets (create_user, update_user, name) VALUES ('SYSTEM', 'SYSTEM', 'LDV Data');" + ), + ] diff --git a/django/api/migrations/0045_auto_20250409_2041.py b/django/api/migrations/0045_auto_20250409_2041.py new file mode 100644 index 00000000..cd097777 --- /dev/null +++ b/django/api/migrations/0045_auto_20250409_2041.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.25 on 2025-04-09 20:41 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0044_auto_20250318_1834'), + ] + + operations = [ + migrations.AlterField( + model_name='ldvdata', + name='bc_drivers_license_no', + field=models.CharField(blank=True, max_length=100, null=True), + ), + migrations.AlterField( + model_name='ldvdata', + name='bc_inc_no', + field=models.CharField(blank=True, max_length=100, null=True), + ), + ] diff --git a/django/api/migrations/0046_auto_20250903_1745.py b/django/api/migrations/0046_auto_20250903_1745.py new file mode 100644 index 00000000..fb8cda50 --- /dev/null +++ b/django/api/migrations/0046_auto_20250903_1745.py @@ -0,0 +1,45 @@ +# Generated by Django 3.2.25 on 2025-09-03 17:45 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0045_auto_20250409_2041'), + ] + + operations = [ + migrations.RemoveConstraint( + model_name='uploadedvinrecord', + name='unique_vin_postal_code', + ), + migrations.RenameField( + model_name='uploadedvinsfile', + old_name='chunk_size', + new_name='chunksize', + ), + migrations.RemoveField( + model_name='uploadedvinrecord', + name='postal_code', + ), + migrations.RemoveField( + model_name='uploadedvinrecord', + name='timestamp', + ), + migrations.AddField( + model_name='uploadedvinrecord', + name='change', + field=models.CharField(choices=[('created', 'Created'), ('modified', 'Modified'), ('removed', 'Removed')], default='created', max_length=64), + ), + migrations.AddField( + model_name='uploadedvinsfile', + name='chunks_per_iteration', + field=models.IntegerField(default=100), + ), + migrations.AlterField( + model_name='uploadedvinrecord', + name='vin', + field=models.CharField(max_length=17, unique=True), + ), + ] diff --git a/django/api/models/ldv_data.py b/django/api/models/ldv_data.py new file mode 100644 index 00000000..0080d3f1 --- /dev/null +++ b/django/api/models/ldv_data.py @@ -0,0 +1,115 @@ +from auditable.models import Auditable +from django.db import models + +class LdvData(Auditable): + + applicant_type = models.CharField( + max_length=100, unique=False, blank=False, null=False + ) + + application_id = models.CharField( + max_length=100, unique=True, blank=False, null=False + ) + + dealership_name = models.CharField( + max_length=100, unique=False, blank=False, null=False + ) + + date_submitted = models.DateField( + blank=False, null=False + ) + + payment_dt = models.DateField( + blank=False, null=False + ) + + bc_drivers_license_no = models.CharField( + max_length=100, blank=True, null=True + ) + + bc_inc_no = models.CharField( + max_length=100, blank=True, null=True + ) + + eligible_rebate_amt = models.IntegerField( + blank=False, null=False + ) + + sale_type = models.CharField( + max_length=100, unique=False, blank=True, null=True + ) + + vin = models.CharField( + max_length=17, blank=False, null=False + ) + + year = models.IntegerField( + unique=False, blank=False, null=False + ) + + manufacturer = models.CharField( + max_length=100, unique=False, blank=False, null=False + ) + + model = models.CharField( + max_length=100, unique=False, blank=True, null=True + ) + + trim = models.CharField( + max_length=100, unique=False, blank=True, null=True + ) + + vehicle_type = models.CharField( + max_length=50, unique=False, blank=True, null=True + ) + + vehicle_class = models.CharField( + max_length=50, unique=False, blank=True, null=True + ) + + msrp = models.IntegerField( + unique=False, blank=True, null=True + ) + + electric_range = models.IntegerField( + unique=False, blank=True, null=True + ) + + vin_token = models.CharField( + max_length=9, unique=False, blank=True, null=True + ) + + city = models.CharField( + max_length=100, unique=False, blank=True, null=True + ) + + postal_code = models.CharField( + max_length=10, unique=False, blank=True, null=True + ) + + business_corp_name = models.CharField( + max_length=200, unique=False, blank=True, null=True + ) + + car_share_name = models.CharField( + max_length=200, unique=False, blank=True, null=True + ) + + non_profit_name = models.CharField( + max_length=200, unique=False, blank=True, null=True + ) + + municipality_name = models.CharField( + max_length=200, unique=False, blank=True, null=True + ) + + date_of_delivery = models.DateField( + blank=True, null=True + ) + + lease_term = models.CharField( + max_length=50, unique=False, blank=True, null=True + ) + + class Meta: + db_table = "ldv_data" diff --git a/django/api/models/uploaded_vin_record.py b/django/api/models/uploaded_vin_record.py index 1d6d306b..c13cbbde 100644 --- a/django/api/models/uploaded_vin_record.py +++ b/django/api/models/uploaded_vin_record.py @@ -1,13 +1,21 @@ from django.db import models from auditable.models import Auditable +from django.utils.translation import gettext_lazy as _ class UploadedVinRecord(Auditable): - vin = models.CharField(max_length=17) + vin = models.CharField(max_length=17, unique=True) - postal_code = models.CharField(max_length=7) + class Change(models.TextChoices): + CREATED = ("created", _("Created")) + MODIFIED = ("modified", _("Modified")) + REMOVED = ("removed", _("Removed")) - timestamp = models.DateTimeField() + change = models.CharField( + max_length=64, + default=Change.CREATED, + choices=Change.choices, + ) data = models.JSONField() @@ -21,10 +29,5 @@ class UploadedVinRecord(Auditable): class Meta: db_table = "uploaded_vin_record" - constraints = [ - models.UniqueConstraint( - fields=["vin", "postal_code"], name="unique_vin_postal_code" - ) - ] db_table_comment = "represents an uploaded VIN, and associated information" diff --git a/django/api/models/uploaded_vins_file.py b/django/api/models/uploaded_vins_file.py index 853397f7..f8214e34 100644 --- a/django/api/models/uploaded_vins_file.py +++ b/django/api/models/uploaded_vins_file.py @@ -5,10 +5,12 @@ class UploadedVinsFile(Auditable): filename = models.CharField(max_length=32, unique=True) - chunk_size = models.IntegerField(default=5000) + chunksize = models.IntegerField(default=5000) start_index = models.IntegerField(default=0) + chunks_per_iteration = models.IntegerField(default=100) + processed = models.BooleanField(default=False) class Meta: diff --git a/django/api/services/datasheet_template_generator.py b/django/api/services/datasheet_template_generator.py index 3949ddb3..3c4082d4 100644 --- a/django/api/services/datasheet_template_generator.py +++ b/django/api/services/datasheet_template_generator.py @@ -13,10 +13,11 @@ def generate_template(dataset_name): "Data Fleets": DataFleetsColumns, "Hydrogen Fleets": HydrogenFleetsColumns, "Hydrogen Fueling": HydrogenFuelingColumns, - "LDV Rebates": LDVRebatesColumns, + "LDV Data": LDVDataColumns, "Public Charging": PublicChargingColumns, "Scrap It": ScrapItColumns, "Go Electric Rebates Program": GoElectricRebatesColumns, + "CVP Data": CVPDataColumns } if dataset_name not in dataset_column_enum_map: diff --git a/django/api/services/decoded_vin_record.py b/django/api/services/decoded_vin_record.py index 5ec6108e..523f7018 100644 --- a/django/api/services/decoded_vin_record.py +++ b/django/api/services/decoded_vin_record.py @@ -5,11 +5,9 @@ get_number_of_decode_attempts, set_number_of_decode_attempts, ) -from django.db import transaction from django.utils import timezone -@transaction.atomic def save_decoded_data( uploaded_vin_records, vins_to_insert, diff --git a/django/api/services/spreadsheet_uploader_prep.py b/django/api/services/spreadsheet_uploader_prep.py index f17fce80..03947986 100644 --- a/django/api/services/spreadsheet_uploader_prep.py +++ b/django/api/services/spreadsheet_uploader_prep.py @@ -101,6 +101,14 @@ def prepare_go_electric_rebates(df): adjust_ger_manufacturer_names(df) return df +def prepare_ldv_data(df): + + df = df.applymap(lambda s: s.upper() if type(s) == str else s) + make_names_consistent(df) + make_prepositions_consistent(df) + + return df + def prepare_cvp_data(df): df = df.applymap(lambda s: s.upper() if type(s) == str else s) df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in "biufc" else x.fillna("")) @@ -158,29 +166,36 @@ def make_names_consistent(df): {', Inc.': ' Inc.', '(?i)\\bdba\\b': 'DBA'} # Matches word "dba" regardless of case ) - df[['Applicant Name', 'Manufacturer']] = df[['Applicant Name', 'Manufacturer']].replace( - consistent_name_dict, - regex=True) + existing_columns = {col.lower(): col for col in df.columns} + columns_to_process = [existing_columns[key] for key in ['applicant name', 'manufacturer'] if key in existing_columns] + + if columns_to_process: + df[columns_to_process] = df[columns_to_process].replace(consistent_name_dict, regex=True) def make_prepositions_consistent(df): - df[['Applicant Name', 'Manufacturer']] = df[['Applicant Name', 'Manufacturer']].replace( - dict.fromkeys( - ['(?i)\\bbc(?=\\W)', # Matches word "bc" regardless of case - '(?i)\\bb\\.c\\.(?=\\W)'], 'BC'), # Matches word "b.c." regardless of case - regex=True - ).replace( - {'BC Ltd.': 'B.C. Ltd.', - '\\bOf(?=\\W)': 'of', - '\\bAnd(?=\\W)': 'and', # Matches word "And" - '\\bThe(?=\\W)': 'the', - '\\bA(?=\\W)': 'a', - '\\bAn(?=\\W)': 'an'}, - regex=True - ) - ##The first letter should be capitalized - df[['Applicant Name', 'Manufacturer']] = df[['Applicant Name', 'Manufacturer']].applymap( - lambda x: x[0].upper() + x[1:] if isinstance(x, str) and len(x) > 1 else x.upper() if isinstance(x, str) and len(x) == 1 else x -) + existing_columns = {col.lower(): col for col in df.columns} + columns_to_process = [existing_columns[key] for key in ['applicant name', 'manufacturer'] if key in existing_columns] + + if columns_to_process: + df[columns_to_process] = df[columns_to_process].replace( + dict.fromkeys( + ['(?i)\\bbc(?=\\W)', # Matches word "bc" regardless of case + '(?i)\\bb\\.c\\.(?=\\W)'], 'BC' # Matches word "b.c." regardless of case + ), + regex=True + ).replace( + {'BC Ltd.': 'B.C. Ltd.', + '\\bOf(?=\\W)': 'of', + '\\bAnd(?=\\W)': 'and', + '\\bThe(?=\\W)': 'the', + '\\bA(?=\\W)': 'a', + '\\bAn(?=\\W)': 'an'}, + regex=True + ) + + df[columns_to_process] = df[columns_to_process].applymap( + lambda x: x[0].upper() + x[1:] if isinstance(x, str) and len(x) > 1 else x.upper() if isinstance(x, str) and len(x) == 1 else x + ) def adjust_ger_manufacturer_names(df): @@ -355,13 +370,13 @@ def email_validator(df, *columns, **kwargs): def validate_field_values(df, *columns, **kwargs): allowed_values = kwargs.get("fields_and_values") - invalid_values = [] result = {} delimiter = kwargs.get("delimiter") for column in df.columns: if column in allowed_values: indices = [] + invalid_values = [] series = df[column] for index, value in series.items(): if value is not None and pd.notna(value): @@ -414,6 +429,7 @@ def region_checker(df, *columns, **kwargs): def format_postal_codes(df, *columns, **kwargs): validate = kwargs.get('validate', False) + allow_empty = kwargs.get('allow_empty', False) indices_offset = kwargs.get("indices_offset", 0) result = {} @@ -425,13 +441,12 @@ def format_postal_codes(df, *columns, **kwargs): for value, indices in map_of_values_to_indices.items(): clean_value = value.replace(" ", "") if isinstance(value, str) else "" - - if len(clean_value) == 6: + if len(clean_value) == 6 or (allow_empty and len(clean_value) == 0): formatted_value = clean_value[:3] + " " + clean_value[3:] for index in indices: df.at[index - indices_offset, column] = formatted_value elif validate: - if pd.isna(value) or value == "": + if (pd.isna(value) or value == "") and not allow_empty: value = "Empty" invalid_groups.append({ "invalid_postal_code": value, diff --git a/django/api/services/uploaded_vin_record.py b/django/api/services/uploaded_vin_record.py index 711ad58e..d1dc46e7 100644 --- a/django/api/services/uploaded_vin_record.py +++ b/django/api/services/uploaded_vin_record.py @@ -1,24 +1,31 @@ -from datetime import datetime import pandas as pd from django.utils import timezone from api.models.uploaded_vin_record import UploadedVinRecord -from api.constants.decoder import get_service +from api.constants.decoder import get_service, ICBC_FILE def parse_and_save(uploaded_vins_file, file_response): - processed = True - start_index = uploaded_vins_file.start_index chunks = pd.read_csv( - file_response, sep="|", chunksize=uploaded_vins_file.chunk_size + file_response, + chunksize=uploaded_vins_file.chunksize, + sep=ICBC_FILE.DELIMITER.value, + na_values=ICBC_FILE.NA_VALUES.value, + dtype=ICBC_FILE.DATA_TYPES.value, ) - + start_index = uploaded_vins_file.start_index + end_index = start_index + uploaded_vins_file.chunks_per_iteration + processed = True for idx, df in enumerate(chunks): - if idx == start_index: + if idx < start_index: + continue + elif idx >= start_index and idx < end_index: + preprocess_chunk(df) df.fillna("", inplace=True) vins = [] for _, row in df.iterrows(): - if row["vin"] != "": - vins.append(row["vin"]) + vin = row["vin"] + if len(vin) == 17: + vins.append(vin) df_records_map = get_df_records_map(df) existing_records_map = get_existing_records_map(vins) records_to_insert = get_records_to_insert( @@ -29,89 +36,129 @@ def parse_and_save(uploaded_vins_file, file_response): df_records_map, existing_records_map ) UploadedVinRecord.objects.bulk_update( - records_to_update, ["data", "timestamp", "update_timestamp"] + records_to_update, ["data", "change", "update_timestamp"] ) - elif idx > start_index: + else: processed = False break - + if processed: + UploadedVinRecord.objects.exclude( + update_timestamp__gte=uploaded_vins_file.create_timestamp + ).update( + change=UploadedVinRecord.Change.REMOVED, update_timestamp=timezone.now() + ) uploaded_vins_file.processed = processed - uploaded_vins_file.start_index = start_index + 1 + uploaded_vins_file.start_index = end_index uploaded_vins_file.save() -# returns a dict of (vin, postal_code) -> {timestamp, data} +def format_case(s, case="skip"): + if len(s.dropna()) != 0: + output = ( + s[ + s.notna() + ] # I am applying this function to non NaN values only. If you do not, they get converted from NaN to nan and are more annoying to work with. + .astype(str) # Convert to string + .str.strip() # Strip white spaces (this dataset suffers from extra tabs, lines, etc.) + ) + if case == "title": + return output.str.title() + elif case == "upper": + return output.str.upper() + elif case == "lower": + return output.str.lower() + elif case == "skip": + pass + + +def format_numbers(s): + if len(s.dropna()) != 0: + output = pd.to_numeric( + s[ + s.notna() + ] # I am applying this function to non NaN values only. If you do not, they get converted from NaN to nan and are more annoying to work with. + .astype(str) # Convert to string + .str.strip() + .str.replace( + ",", "" + ) # Strip white spaces (this dataset suffers from extra tabs, lines, etc.) + .str.replace(" ", "") + ) + return output + + +def preprocess_chunk(df): + df.columns = df.columns.str.lower() + df.drop(columns=ICBC_FILE.COLUMNS_TO_DROP.value, inplace=True) + numeric_cols = list( + set(ICBC_FILE.NUMERIC_COLUMNS.value).intersection(set(df.columns)) + ) + numeric_cols_w_strings = df[numeric_cols].select_dtypes("object").columns + for col in numeric_cols_w_strings: + df[col] = format_numbers(df[col]) + date_cols = list(set(ICBC_FILE.DATE_COLUMNS.value).intersection(set(df.columns))) + for col in date_cols: + s = (pd.to_datetime(df[col], yearfirst=True, utc=True).dt.date).astype(str) + df[col] = s.where(s != "NaT") + for key, cols in ICBC_FILE.MODIFICATION_MAP.value.items(): + col_subset = list(set(cols).intersection(df.columns)) + if len(col_subset) != 0: + for col in col_subset: + df[col] = format_case(df[col], case=key) + + +# returns a dict of vin -> data def get_df_records_map(df): result = {} for _, row in df.iterrows(): vin = row["vin"] - postal_code = row["postal_code"] - df_timestamp = row["snapshot_date"] - if vin and postal_code and df_timestamp: - key = (vin, postal_code) - timestamp = timezone.make_aware( - datetime.strptime(df_timestamp, "%Y-%m-%d %H:%M:%S.%f") - ) - df_data = row.to_dict() - data = df_data if df_data else {} + if len(vin) == 17: + data = row.to_dict() del data["vin"] - del data["postal_code"] - del data["snapshot_date"] - if key in result: - most_recent_ts = result[key]["timestamp"] - if most_recent_ts < timestamp: - result[key] = {"timestamp": timestamp, "data": data} - else: - result[key] = {"timestamp": timestamp, "data": data} + result[vin] = data return result -# returns a dict of (vin, postal_code) -> {id, timestamp} +# returns a dict of vin -> id def get_existing_records_map(vins): result = {} - records = UploadedVinRecord.objects.only( - "id", "vin", "postal_code", "timestamp" - ).filter(vin__in=vins) + records = UploadedVinRecord.objects.only("id", "vin").filter(vin__in=vins) for record in records: - key = (record.vin, record.postal_code) - result[key] = {"id": record.id, "timestamp": record.timestamp} + result[record.vin] = record.id return result -# df_records_map should be dict of (vin, postal_code) -> {timestamp, data} -# existing_records_map should be dict of (vin, postal_code) -> {id, timestamp} +# df_records_map should be dict of vin -> data +# existing_records_map should be dict of vin -> id def get_records_to_insert(df_records_map, existing_records_map): result = [] - for key, value in df_records_map.items(): - if key not in existing_records_map: + for vin, data in df_records_map.items(): + if vin not in existing_records_map: result.append( UploadedVinRecord( - vin=key[0], - postal_code=key[1], - timestamp=value["timestamp"], - data=value["data"], + vin=vin, + data=data, ) ) return result -# df_records_map should be dict of (vin, postal_code) -> {timestamp, data} -# existing_records_map should be dict of (vin, postal_code) -> {id, timestamp} +# df_records_map should be dict of vin -> data +# existing_records_map should be dict of vin -> id +# assumes that there are no duplicate vins in the same file def get_records_to_update(df_records_map, existing_records_map): result = [] - for key, value in df_records_map.items(): - if key in existing_records_map: - existing_record = existing_records_map[key] - timestamp = value["timestamp"] - if existing_record["timestamp"] < timestamp: - result.append( - UploadedVinRecord( - id=existing_record["id"], - timestamp=timestamp, - data=value["data"], - update_timestamp=timezone.now(), - ) + for vin, data in df_records_map.items(): + if vin in existing_records_map: + existing_record_id = existing_records_map[vin] + result.append( + UploadedVinRecord( + id=existing_record_id, + data=data, + update_timestamp=timezone.now(), + change=UploadedVinRecord.Change.MODIFIED, ) + ) return result diff --git a/django/api/settings.py b/django/api/settings.py index c73db0c8..8bb91937 100644 --- a/django/api/settings.py +++ b/django/api/settings.py @@ -202,6 +202,7 @@ "orm": "default", "save_limit": -1, "max_attempts": 100, + "max_rss": 300000, } MAX_DECODE_ATTEMPTS = os.getenv("MAX_DECODE_ATTEMPTS", 5) diff --git a/django/api/viewsets/upload.py b/django/api/viewsets/upload.py index cd0243e7..23385dcf 100644 --- a/django/api/viewsets/upload.py +++ b/django/api/viewsets/upload.py @@ -1,19 +1,16 @@ -import urllib.request -import os from rest_framework import status from rest_framework.decorators import action from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.viewsets import GenericViewSet -from django.http import JsonResponse, FileResponse +from django.http import JsonResponse import pathlib from django.http import HttpResponse -from django.core.exceptions import ValidationError from django.utils.decorators import method_decorator from api.decorators.permission import check_upload_permission from api.models.datasets import Datasets from api.serializers.datasets import DatasetsSerializer -from api.services.minio import generate_presigned_url, minio_get_object, minio_remove_object +from api.services.minio import generate_presigned_url, get_minio_object, minio_remove_object from api.services.datasheet_template_generator import generate_template from api.services.spreadsheet_uploader import import_from_xls import api.constants.constants as constants @@ -70,9 +67,7 @@ def import_data(self, request): else: return Response({"success": False, "message": "File must be a csv."}, status=status.HTTP_400_BAD_REQUEST) try: - url = minio_get_object(filename) - urllib.request.urlretrieve(url, filename) - + file_data = get_minio_object(filename).data config = constants.DATASET_CONFIG.get(dataset_selected) if not config: return Response( @@ -90,7 +85,7 @@ def import_data(self, request): header_row = config.get("header_row", 0) result = import_from_xls( - excel_file=filename, + excel_file=file_data, sheet_name=sheet_name, model=model, header_row=header_row, @@ -115,7 +110,6 @@ def import_data(self, request): ) finally: - os.remove(filename) minio_remove_object(filename) @action(detail=False, methods=["get"]) diff --git a/django/workers/apps.py b/django/workers/apps.py index ae04b136..39011e32 100644 --- a/django/workers/apps.py +++ b/django/workers/apps.py @@ -6,6 +6,7 @@ class Config(AppConfig): name = "workers" def ready(self): + import workers.signal_receivers from workers.scheduled_jobs import ( schedule_create_minio_bucket, schedule_read_uploaded_vins_file, diff --git a/django/workers/decorators/tasks.py b/django/workers/decorators/tasks.py deleted file mode 100644 index ccba8e6c..00000000 --- a/django/workers/decorators/tasks.py +++ /dev/null @@ -1,31 +0,0 @@ -import ctypes -import threading - - -class TaskTimeoutException(Exception): - pass - - -def timeout(time): - def wrapper(func): - def wrapped(*args, **kwargs): - current_thread_id = threading.current_thread().ident - - def throw_timeout(): - ctypes.pythonapi.PyThreadState_SetAsyncExc( - ctypes.c_ulong(current_thread_id), - ctypes.py_object(TaskTimeoutException), - ) - - t = threading.Timer(time, throw_timeout) - t.start() - try: - func(*args, **kwargs) - t.cancel() - except Exception as ex: - t.cancel() - raise ex - - return wrapped - - return wrapper diff --git a/django/workers/scheduled_jobs.py b/django/workers/scheduled_jobs.py index f2b8dbc1..0b5fd5c4 100644 --- a/django/workers/scheduled_jobs.py +++ b/django/workers/scheduled_jobs.py @@ -20,8 +20,8 @@ def schedule_read_uploaded_vins_file(): "workers.tasks.read_uploaded_vins_file", name="read_uploaded_vins_file", schedule_type="C", - cron="*/3 * * * *", - q_options={"timeout": 165, "ack_failure": True}, + cron="*/6 * * * *", + q_options={"timeout": 300, "ack_failure": True}, ) except IntegrityError: pass diff --git a/django/workers/signal_receivers.py b/django/workers/signal_receivers.py new file mode 100644 index 00000000..3eb9e93e --- /dev/null +++ b/django/workers/signal_receivers.py @@ -0,0 +1,12 @@ +from django.dispatch import receiver +from django_q.signals import pre_execute +from django_q.brokers import get_broker + + +@receiver(pre_execute) +def ack(sender, task, **kwargs): + if task.get("ack_failure", False): + ack_id = task.pop("ack_id", None) + if ack_id: + broker = get_broker() + broker.acknowledge(ack_id) diff --git a/django/workers/tasks.py b/django/workers/tasks.py index e236ded9..ecef80cc 100644 --- a/django/workers/tasks.py +++ b/django/workers/tasks.py @@ -7,7 +7,6 @@ from api.services.decoded_vin_record import save_decoded_data from api.services.uploaded_vin_record import parse_and_save from django.db import transaction -from workers.decorators.tasks import timeout def create_minio_bucket(): @@ -18,8 +17,6 @@ def create_minio_bucket(): client.make_bucket(bucket_name) -@transaction.atomic -@timeout(150) def read_uploaded_vins_file(): vins_file = ( UploadedVinsFile.objects.filter(processed=False) @@ -29,16 +26,16 @@ def read_uploaded_vins_file(): if vins_file is not None: file_response = get_minio_object(vins_file.filename) if file_response is not None: - parse_and_save(vins_file, file_response) - try: - file_response.close() - file_response.release_conn() - except Exception: - pass + with transaction.atomic(): + parse_and_save(vins_file, file_response) + file_response.close() + file_response.release_conn() -@timeout(90) def batch_decode_vins(service_name, batch_size=50): + vins_file_in_progress = UploadedVinsFile.objects.filter(processed=False).exists() + if vins_file_in_progress: + return max_decode_attempts = settings.MAX_DECODE_ATTEMPTS service = get_service(service_name) if service: @@ -74,19 +71,22 @@ def batch_decode_vins(service_name, batch_size=50): decoder = service.BATCH_DECODER.value decoded_data = decoder(uploaded_vin_records) + with transaction.atomic(): + save_decoded_data( + uploaded_vin_records, + vins_to_insert, + vins_to_decoded_record_ids_map, + service_name, + decoded_data, + ) - save_decoded_data( - uploaded_vin_records, - vins_to_insert, - vins_to_decoded_record_ids_map, - service_name, - decoded_data, - ) def remove_cleaned_datasets(): try: client = get_minio_client() - objects = client.list_objects(settings.MINIO_BUCKET_NAME, prefix="cleaned_datasets/") + objects = client.list_objects( + settings.MINIO_BUCKET_NAME, prefix="cleaned_datasets/" + ) for obj in objects: client.remove_object(settings.MINIO_BUCKET_NAME, obj.object_name) except Exception: diff --git a/docker-compose.yml b/docker-compose.yml index 117c935d..ad16f2d6 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -7,9 +7,11 @@ services: - ./postgres:/docker-entrypoint-initdb.d environment: - POSTGRES_DB=postgres - - POSTGRES_MULTIPLE_DATABASES=superset + - POSTGRES_MULTIPLE_DATABASES=metabase - POSTGRES_USER=postgres - POSTGRES_PASSWORD=postgres + ports: + - 5432:5432 healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres"] interval: 5s @@ -49,7 +51,6 @@ services: env_file: - keycloak.env - minio.env - - decoder.env environment: - ALLOWED_HOSTS=localhost - CORS_ORIGIN_ALLOW_ALL=False diff --git a/frontend/Dockerfile b/frontend/Dockerfile index 707160f3..f511363d 100644 --- a/frontend/Dockerfile +++ b/frontend/Dockerfile @@ -7,4 +7,6 @@ RUN npm install --legacy-peer-deps COPY . . +RUN mkdir -p /web/node_modules/.cache && chmod -R 777 /web/node_modules/.cache + EXPOSE 3000 diff --git a/frontend/package.json b/frontend/package.json index 73dfd4cd..1fd457b9 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,6 +1,6 @@ { "name": "frontend", - "version": "0.4.0", + "version": "0.5.0", "private": true, "dependencies": { "@emotion/react": "^11.6.0", diff --git a/frontend/src/uploads/components/UploadPage.js b/frontend/src/uploads/components/UploadPage.js index d200b607..b84c9c3d 100644 --- a/frontend/src/uploads/components/UploadPage.js +++ b/frontend/src/uploads/components/UploadPage.js @@ -70,7 +70,7 @@ const UploadPage = (props) => { > {selectionList} - {datasetSelected && ( + {datasetSelected && datasetSelected !== 'ICBC Vins' && (