|
| 1 | +name: Build and Deploy to PROD (Lightsail Blue/Green via SSH + Docker) |
| 2 | + |
| 3 | +on: |
| 4 | + push: |
| 5 | + branches: [ "main" ] |
| 6 | + |
| 7 | +env: |
| 8 | + # --- 애플리케이션/컨테이너 공통 --- |
| 9 | + PROJECT_NAME: "devdevdev" |
| 10 | + IMAGE_NAME: "devdevdev/app" # 로컬 빌드 이미지 이름(태그 latest, SHA) |
| 11 | + CONTAINER_BASE: "devdevdev-main-server" # 컨테이너 베이스명 |
| 12 | + BLUE_SUFFIX: "-blue" |
| 13 | + GREEN_SUFFIX: "-green" |
| 14 | + |
| 15 | + # --- 포트 구성 --- |
| 16 | + BLUE_PORT: "18080" # 호스트 포트(Blue) |
| 17 | + GREEN_PORT: "18081" # 호스트 포트(Green) |
| 18 | + APP_PORT: "8080" # 컨테이너 내부 포트(Spring Boot) |
| 19 | + |
| 20 | + # --- 헬스체크 --- |
| 21 | + HEALTHCHECK_PATH: "/actuator/health" # Actuator 미사용이면 "/" 로 변경하세요 |
| 22 | + HEALTHCHECK_TIMEOUT: "3" # curl 타임아웃(초) |
| 23 | + HEALTHCHECK_RETRY: "20" # 재시도 횟수 (5초 * 20 = 최대 100초) |
| 24 | + |
| 25 | + # --- SSH/Lightsail --- |
| 26 | + SSH_USER: "ec2-user" # SSH 접속 사용자 |
| 27 | + LIGHTSAIL_HOST: "${{ secrets.LIGHTSAIL_HOST }}" # 퍼블릭 IP 또는 도메인 |
| 28 | + |
| 29 | +jobs: |
| 30 | + build: |
| 31 | + name: Build and Deploy (Blue/Green) |
| 32 | + runs-on: ubuntu-latest |
| 33 | + |
| 34 | + steps: |
| 35 | + - uses: actions/checkout@v3 |
| 36 | + |
| 37 | + # ====== 리소스/시크릿 주입 (현재 파이프라인과 동일) ====== |
| 38 | + - name: Set up JDK 21 |
| 39 | + uses: actions/setup-java@v3 |
| 40 | + with: |
| 41 | + java-version: 21 |
| 42 | + distribution: corretto |
| 43 | + |
| 44 | + - name: make application-prod.yml |
| 45 | + run: | |
| 46 | + cd ./src/main/resources |
| 47 | + echo "${{ secrets.application_prod }}" >> ./application-prod.yml |
| 48 | + echo "${{ secrets.application_jwt_prod }}" >> ./application-jwt-prod.yml |
| 49 | + echo "${{ secrets.application_oauth2_prod }}" >> ./application-oauth2-prod.yml |
| 50 | + echo "${{ secrets.application_storage_s3_prod }}" >> ./application-storage-s3-prod.yml |
| 51 | + echo "${{ secrets.application_open_ai }}" >> ./application-open-ai.yml |
| 52 | + echo "${{ secrets.application_opensearch_prod }}" >> ./application-opensearch-prod.yml |
| 53 | +
|
| 54 | + - name: make application-test.yml |
| 55 | + run: | |
| 56 | + cd ./src/test/resources |
| 57 | + echo "${{ secrets.application_storage_s3 }}" >> ./application-storage-s3.yml |
| 58 | + echo "${{ secrets.application_open_ai }}" >> ./application-open-ai.yml |
| 59 | + echo "${{ secrets.application_opensearch_test }}" >> ./application-opensearch-test.yml |
| 60 | +
|
| 61 | + # ====== Gradle 빌드 (Docker가 JAR을 COPY할 수 있도록 선행) ====== |
| 62 | + - name: Grant execute permission for gradlew |
| 63 | + run: chmod +x ./gradlew |
| 64 | + |
| 65 | + - name: Build with Gradle (bootJar) |
| 66 | + run: ./gradlew bootJar -x test -x asciidoctor |
| 67 | + |
| 68 | + # ====== Docker 빌드 ====== |
| 69 | + - name: Use Dockerfile-prod if present |
| 70 | + run: | |
| 71 | + if [ -f Dockerfile-prod ]; then |
| 72 | + rm -f Dockerfile |
| 73 | + cp Dockerfile-prod Dockerfile |
| 74 | + fi |
| 75 | +
|
| 76 | + - name: Build Docker image |
| 77 | + run: | |
| 78 | + docker build \ |
| 79 | + -t ${IMAGE_NAME}:${GITHUB_SHA} \ |
| 80 | + -t ${IMAGE_NAME}:latest \ |
| 81 | + . |
| 82 | +
|
| 83 | + - name: Save image as archive |
| 84 | + run: | |
| 85 | + mkdir -p out |
| 86 | + # SHA와 latest 두 태그 모두 아카이브에 포함 |
| 87 | + docker save ${IMAGE_NAME}:${GITHUB_SHA} ${IMAGE_NAME}:latest | gzip > out/image.tar.gz |
| 88 | + echo "ARCHIVE=out/image.tar.gz" >> $GITHUB_ENV |
| 89 | +
|
| 90 | + # ====== SSH 준비 ====== |
| 91 | + - name: Prepare SSH key |
| 92 | + run: | |
| 93 | + echo "${{ secrets.LIGHTSAIL_SSH_KEY }}" > key.pem |
| 94 | + chmod 600 key.pem |
| 95 | + mkdir -p ~/.ssh |
| 96 | + ssh-keyscan -H ${LIGHTSAIL_HOST} >> ~/.ssh/known_hosts |
| 97 | +
|
| 98 | + # ====== 아카이브/스크립트 전송 ====== |
| 99 | + - name: Upload image archive |
| 100 | + run: | |
| 101 | + scp -i key.pem -o StrictHostKeyChecking=yes "$ARCHIVE" \ |
| 102 | + ${SSH_USER}@${LIGHTSAIL_HOST}:/home/${SSH_USER}/image.tar.gz |
| 103 | +
|
| 104 | + - name: Upload blue/green deploy script |
| 105 | + run: | |
| 106 | + cat > deploy_blue_green.sh <<'EOS' |
| 107 | + #!/usr/bin/env bash |
| 108 | + set -euo pipefail |
| 109 | +
|
| 110 | + sudo systemctl enable --now docker >/dev/null 2>&1 || true |
| 111 | +
|
| 112 | + IMAGE_NAME=${IMAGE_NAME:-devdevdev/app} |
| 113 | + CONTAINER_BASE=${CONTAINER_BASE:-devdevdev-main-server} |
| 114 | + BLUE_SUFFIX=${BLUE_SUFFIX:--blue} |
| 115 | + GREEN_SUFFIX=${GREEN_SUFFIX:--green} |
| 116 | + BLUE_PORT=${BLUE_PORT:-18080} |
| 117 | + GREEN_PORT=${GREEN_PORT:-18081} |
| 118 | + APP_PORT=${APP_PORT:-8080} |
| 119 | + HEALTHCHECK_PATH=${HEALTHCHECK_PATH:-/} # actuator 없으면 / |
| 120 | + HEALTHCHECK_TIMEOUT=${HEALTHCHECK_TIMEOUT:-3} |
| 121 | + HEALTHCHECK_RETRY=${HEALTHCHECK_RETRY:-20} |
| 122 | +
|
| 123 | + UPSTREAM_FILE="/etc/nginx/conf.d/backend-upstream.upstream" |
| 124 | + BLUE_NAME="${CONTAINER_BASE}${BLUE_SUFFIX}" |
| 125 | + GREEN_NAME="${CONTAINER_BASE}${GREEN_SUFFIX}" |
| 126 | +
|
| 127 | + # 아카이브 경로는 HOME 기준으로 |
| 128 | + ARCHIVE_FILE="$HOME/image.tar.gz" |
| 129 | + |
| 130 | + echo "[1/9] Load image: ${ARCHIVE_FILE}" |
| 131 | + ls -lh "${ARCHIVE_FILE}" || { echo "[!] archive missing"; exit 1; } |
| 132 | + gzip -t "${ARCHIVE_FILE}" |
| 133 | + gunzip -c "${ARCHIVE_FILE}" | sudo docker load |
| 134 | + |
| 135 | + # 보강: :latest 태그가 없으면 가장 최근 태그를 latest로 재태깅 |
| 136 | + if ! sudo docker image inspect "${IMAGE_NAME}:latest" >/dev/null 2>&1; then |
| 137 | + echo "[info] ${IMAGE_NAME}:latest not found. Retagging…" |
| 138 | + # 해당 리포의 임의의 태그 하나를 찾아 latest로 붙임 |
| 139 | + NEW_TAG=$(sudo docker images --format '{{.Repository}}:{{.Tag}}' \ |
| 140 | + | awk -v repo="${IMAGE_NAME}" -F: '$1==repo && $2!="latest"{print $2; exit}') |
| 141 | + if [ -n "${NEW_TAG:-}" ]; then |
| 142 | + sudo docker tag "${IMAGE_NAME}:${NEW_TAG}" "${IMAGE_NAME}:latest" |
| 143 | + else |
| 144 | + echo "[!] no tag to retag as latest"; exit 1 |
| 145 | + fi |
| 146 | + fi |
| 147 | + |
| 148 | + ACTIVE_PORT="" |
| 149 | + if [ -f "${UPSTREAM_FILE}" ]; then |
| 150 | + ACTIVE_PORT=$(grep -oE '127\.0\.0\.1:([0-9]+)' "${UPSTREAM_FILE}" | awk -F: '{print $2}' || true) |
| 151 | + fi |
| 152 | + if [ -z "${ACTIVE_PORT}" ]; then |
| 153 | + echo "server 127.0.0.1:${BLUE_PORT};" | sudo tee "${UPSTREAM_FILE}" >/dev/null |
| 154 | + ACTIVE_PORT="${BLUE_PORT}" |
| 155 | + fi |
| 156 | + echo "[2/9] Current active port: ${ACTIVE_PORT}" |
| 157 | +
|
| 158 | + if [ "${ACTIVE_PORT}" = "${BLUE_PORT}" ]; then |
| 159 | + TARGET_NAME="${GREEN_NAME}"; TARGET_PORT="${GREEN_PORT}" |
| 160 | + OLD_NAME="${BLUE_NAME}"; OLD_PORT="${BLUE_PORT}" |
| 161 | + else |
| 162 | + TARGET_NAME="${BLUE_NAME}"; TARGET_PORT="${BLUE_PORT}" |
| 163 | + OLD_NAME="${GREEN_NAME}"; OLD_PORT="${GREEN_PORT}" |
| 164 | + fi |
| 165 | + echo "[3/9] Target container: ${TARGET_NAME} on ${TARGET_PORT}" |
| 166 | +
|
| 167 | + if sudo docker ps -a --format '{{.Names}}' | grep -qw "${TARGET_NAME}"; then |
| 168 | + sudo docker stop "${TARGET_NAME}" || true |
| 169 | + sudo docker rm "${TARGET_NAME}" || true |
| 170 | + fi |
| 171 | +
|
| 172 | + echo "[4/9] Run new container" |
| 173 | + sudo docker run -d \ |
| 174 | + --name "${TARGET_NAME}" \ |
| 175 | + --restart=always \ |
| 176 | + -p 127.0.0.1:${TARGET_PORT}:${APP_PORT} \ |
| 177 | + -e SPRING_PROFILES_ACTIVE=prod \ |
| 178 | + ${IMAGE_NAME}:latest |
| 179 | +
|
| 180 | + echo "[5/9] Health check http://127.0.0.1:${TARGET_PORT}${HEALTHCHECK_PATH}" |
| 181 | + code=$(curl -sS -o /dev/null -w "%{http_code}" \ |
| 182 | + --max-time ${HEALTHCHECK_TIMEOUT} --noproxy '*' \ |
| 183 | + "http://127.0.0.1:${TARGET_PORT}${HEALTHCHECK_PATH}" || echo "000") |
| 184 | + ok=0 |
| 185 | + for i in $(seq 1 ${HEALTHCHECK_RETRY}); do |
| 186 | + if curl -fsS --max-time ${HEALTHCHECK_TIMEOUT} "http://127.0.0.1:${TARGET_PORT}${HEALTHCHECK_PATH}" >/dev/null 2>&1; then |
| 187 | + ok=1; break |
| 188 | + fi |
| 189 | + echo " retry $i/${HEALTHCHECK_RETRY}..." |
| 190 | + sleep 5 |
| 191 | + done |
| 192 | + if [ "$ok" -ne 1 ]; then |
| 193 | + echo "[!] Health check failed. Rollback." |
| 194 | + sudo docker logs --tail 200 "${TARGET_NAME}" || true |
| 195 | + sudo docker stop "${TARGET_NAME}" || true |
| 196 | + sudo docker rm "${TARGET_NAME}" || true |
| 197 | + exit 1 |
| 198 | + fi |
| 199 | +
|
| 200 | + echo "[6/9] Switch upstream to ${TARGET_PORT}" |
| 201 | + echo "server 127.0.0.1:${TARGET_PORT};" | sudo tee "${UPSTREAM_FILE}" >/dev/null |
| 202 | + sudo nginx -t |
| 203 | + sudo systemctl reload nginx |
| 204 | +
|
| 205 | + echo "[7/9] Stop old container: ${OLD_NAME} (if any)" |
| 206 | + if sudo docker ps -a --format '{{.Names}}' | grep -qw "${OLD_NAME}"; then |
| 207 | + sudo docker stop "${OLD_NAME}" || true |
| 208 | + sudo docker rm "${OLD_NAME}" || true |
| 209 | + fi |
| 210 | +
|
| 211 | + echo "[8/9] Cleanup old archives (keep last 3)" |
| 212 | + cd "$HOME" && ls -t image*.tar.gz | tail -n +4 | xargs -r rm -f |
| 213 | +
|
| 214 | + echo "[9/9] Done." |
| 215 | + EOS |
| 216 | + chmod +x deploy_blue_green.sh |
| 217 | + scp -i key.pem -o StrictHostKeyChecking=yes deploy_blue_green.sh ${SSH_USER}@${LIGHTSAIL_HOST}:/home/${SSH_USER}/ |
| 218 | +
|
| 219 | + # ====== 원격 실행 ====== |
| 220 | + - name: Remote Blue/Green deploy |
| 221 | + run: | |
| 222 | + ssh -i key.pem -o StrictHostKeyChecking=yes ${SSH_USER}@${LIGHTSAIL_HOST} \ |
| 223 | + "env IMAGE_NAME='${IMAGE_NAME}' \ |
| 224 | + CONTAINER_BASE='${CONTAINER_BASE}' \ |
| 225 | + BLUE_SUFFIX='${BLUE_SUFFIX}' \ |
| 226 | + GREEN_SUFFIX='${GREEN_SUFFIX}' \ |
| 227 | + BLUE_PORT='${BLUE_PORT}' \ |
| 228 | + GREEN_PORT='${GREEN_PORT}' \ |
| 229 | + APP_PORT='${APP_PORT}' \ |
| 230 | + HEALTHCHECK_PATH='${HEALTHCHECK_PATH}' \ |
| 231 | + HEALTHCHECK_TIMEOUT='${HEALTHCHECK_TIMEOUT}' \ |
| 232 | + HEALTHCHECK_RETRY='${HEALTHCHECK_RETRY}' \ |
| 233 | + bash /home/${SSH_USER}/deploy_blue_green.sh" |
| 234 | +
|
| 235 | + # ====== Slack 알림 ====== |
| 236 | + - name: action-slack |
| 237 | + uses: 8398a7/action-slack@v3 |
| 238 | + with: |
| 239 | + status: ${{ job.status }} |
| 240 | + author_name: "[PROD] 배포 결과를 알려드려요" |
| 241 | + fields: repo,message,commit,author,eventName,ref,took |
| 242 | + env: |
| 243 | + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} |
| 244 | + if: always() |
0 commit comments