diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 49b0b9e..56a0ac3 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -18,6 +18,7 @@ env: REGISTRY: ghcr.io IMAGE_NAME: devkor-github/ontime-back IMAGE_TAG: ${{ github.sha }} + PUBLIC_BACKEND_HOST: ontime-back.kro.kr jobs: build-and-push: @@ -66,6 +67,16 @@ jobs: target: "/home/ubuntu/OnTime-back" strip_components: 1 + - name: Upload Caddyfile to EC2 + uses: appleboy/scp-action@v0.1.7 + with: + host: ${{ secrets.EC2_HOST }} + username: ${{ secrets.EC2_USER }} + key: ${{ secrets.EC2_SSH_KEY }} + source: "ontime-back/Caddyfile" + target: "/home/ubuntu/OnTime-back" + strip_components: 1 + - name: Pull image and restart container uses: appleboy/ssh-action@v1.0.3 with: @@ -77,6 +88,7 @@ jobs: DEPLOY_DIR="/home/ubuntu/OnTime-back" CONTAINER_NAME="ontime-container" + PUBLIC_BACKEND_HOST="${{ env.PUBLIC_BACKEND_HOST }}" mkdir -p "$DEPLOY_DIR" cd "$DEPLOY_DIR" @@ -86,7 +98,7 @@ jobs: IMAGE_TAG=${{ env.IMAGE_TAG }} BACKEND_IMAGE=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} BACKEND_CONTAINER_NAME=ontime-container - BACKEND_HTTP_PORT=${{ secrets.BACKEND_HTTP_PORT || '8080' }} + BACKEND_HTTP_PORT=${{ secrets.BACKEND_HTTP_PORT || '127.0.0.1:8080' }} BACKEND_MEMORY_LIMIT=${{ secrets.BACKEND_MEMORY_LIMIT || '768m' }} BACKEND_CPU_LIMIT=${{ secrets.BACKEND_CPU_LIMIT || '1.0' }} SERVER_PORT=8080 @@ -149,9 +161,28 @@ jobs: grep -E "^$1=" .env | tail -n 1 | cut -d= -f2- } + install_caddy() { + if command -v caddy >/dev/null 2>&1; then + return + fi + + echo "Installing Caddy from the official package repository..." + sudo apt-get update + sudo apt-get install -y debian-keyring debian-archive-keyring apt-transport-https curl gnupg + curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' \ + | sudo gpg --dearmor --yes -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg + curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' \ + | sudo tee /etc/apt/sources.list.d/caddy-stable.list >/dev/null + sudo chmod o+r /usr/share/keyrings/caddy-stable-archive-keyring.gpg + sudo chmod o+r /etc/apt/sources.list.d/caddy-stable.list + sudo apt-get update + sudo apt-get install -y caddy + } + DB_URL="$(get_env_value SPRING_DATASOURCE_URL)" DB_USERNAME="$(get_env_value SPRING_DATASOURCE_USERNAME)" DB_PASSWORD="$(get_env_value SPRING_DATASOURCE_PASSWORD)" + BACKEND_HTTP_BIND="$(get_env_value BACKEND_HTTP_PORT)" DDL_AUTO="$(get_env_value SPRING_JPA_HIBERNATE_DDL_AUTO)" FLYWAY_BASELINE="$(get_env_value SPRING_FLYWAY_BASELINE_ON_MIGRATE)" NORMALIZED_DB_URL="$(printf '%s' "$DB_URL" | tr '[:upper:]' '[:lower:]')" @@ -171,6 +202,7 @@ jobs: [ -n "$DB_HOST" ] || fail_deploy "SPRING_DATASOURCE_URL must include an RDS host." [ "$DB_NAME" = "ontime_prod" ] || fail_deploy "SPRING_DATASOURCE_URL must use the ontime_prod database." [ "$NORMALIZED_DB_USERNAME" != "root" ] || fail_deploy "SPRING_DATASOURCE_USERNAME must not be root." + [ "$BACKEND_HTTP_BIND" = "127.0.0.1:8080" ] || fail_deploy "BACKEND_HTTP_PORT must be 127.0.0.1:8080 so public traffic goes through Caddy HTTPS." [ "$DDL_AUTO" = "validate" ] || fail_deploy "SPRING_JPA_HIBERNATE_DDL_AUTO must be validate." [ "$FLYWAY_BASELINE" = "false" ] || fail_deploy "SPRING_FLYWAY_BASELINE_ON_MIGRATE must be false." @@ -217,15 +249,53 @@ jobs: sudo docker-compose up -d --remove-orphans fi + HEALTHY=false for attempt in $(seq 1 30); do STATUS="$(sudo docker inspect -f '{{.State.Health.Status}}' "$CONTAINER_NAME" 2>/dev/null || true)" if [ "$STATUS" = "healthy" ]; then echo "Container is healthy." - exit 0 + HEALTHY=true + break fi echo "Waiting for healthy container status; current status: ${STATUS:-unknown}" sleep 5 done - sudo docker logs --tail=200 "$CONTAINER_NAME" || true - exit 1 + if [ "$HEALTHY" != "true" ]; then + sudo docker logs --tail=200 "$CONTAINER_NAME" || true + exit 1 + fi + + echo "Configuring Caddy reverse proxy for https://$PUBLIC_BACKEND_HOST..." + [ -f "$DEPLOY_DIR/Caddyfile" ] || fail_deploy "Caddyfile was not uploaded to $DEPLOY_DIR." + + if ! getent hosts "$PUBLIC_BACKEND_HOST" >/dev/null 2>&1; then + fail_deploy "DNS for $PUBLIC_BACKEND_HOST must resolve before Caddy can issue a TLS certificate." + fi + + install_caddy + sudo install -o root -g root -m 0644 "$DEPLOY_DIR/Caddyfile" /etc/caddy/Caddyfile + sudo caddy validate --config /etc/caddy/Caddyfile + sudo systemctl enable caddy + sudo systemctl reload caddy || sudo systemctl restart caddy + sudo systemctl is-active --quiet caddy + + echo "Verifying HTTPS through local Caddy..." + HTTPS_HEALTHY=false + for attempt in $(seq 1 6); do + if curl -fsS \ + --resolve "$PUBLIC_BACKEND_HOST:443:127.0.0.1" \ + "https://$PUBLIC_BACKEND_HOST/actuator/health/readiness"; then + HTTPS_HEALTHY=true + break + fi + echo "Waiting for HTTPS readiness through Caddy..." + sleep 10 + done + + if [ "$HTTPS_HEALTHY" != "true" ]; then + sudo journalctl -u caddy --no-pager -n 120 || true + exit 1 + fi + + echo "HTTPS is healthy at https://$PUBLIC_BACKEND_HOST." diff --git a/docs/deployment.md b/docs/deployment.md index a34dcf2..51311fd 100644 --- a/docs/deployment.md +++ b/docs/deployment.md @@ -14,7 +14,7 @@ Deployment access: Runtime image and port: -- `BACKEND_HTTP_PORT` (optional, defaults to `8080`) +- `BACKEND_HTTP_PORT` (optional, defaults to `127.0.0.1:8080`; if set, it must be exactly `127.0.0.1:8080`) - `BACKEND_MEMORY_LIMIT` (optional, defaults to `768m`; use `640m` if the EC2 instance is memory constrained) - `BACKEND_CPU_LIMIT` (optional, defaults to `1.0`) @@ -93,11 +93,22 @@ The workflow: 2. Pushes two GHCR tags: - `ghcr.io/devkor-github/ontime-back:` - `ghcr.io/devkor-github/ontime-back:deploy-latest` -3. Uploads `docker-compose.yml` to `/home/ubuntu/OnTime-back`. +3. Uploads `docker-compose.yml` and `Caddyfile` to `/home/ubuntu/OnTime-back`. 4. Writes `/home/ubuntu/OnTime-back/.env` from GitHub secrets. 5. Verifies EC2 can reach private RDS on `3306`. 6. Runs `docker compose pull && docker compose up -d --remove-orphans`. 7. Waits until the `ontime-container` Docker health status is `healthy`. +8. Installs Caddy if needed, configures `/etc/caddy/Caddyfile`, and verifies HTTPS for `ontime-back.kro.kr`. + +## HTTPS Prerequisites + +Before running the production deploy, configure AWS and DNS: + +- Point `ontime-back.kro.kr` to the EC2 public IPv4 address with an `A` record. Prefer an Elastic IP so the address is stable. +- Allow EC2 security group inbound TCP `80` and `443` from `0.0.0.0/0`. +- Restrict SSH `22` to trusted admin IPs. +- Remove or update any existing GitHub secret named `BACKEND_HTTP_PORT` unless it is exactly `127.0.0.1:8080`. +- Remove public inbound `8080` after HTTPS is verified. The deploy default binds the backend to `127.0.0.1:8080`, so Caddy can reach it locally without exposing it publicly. ## Health Verification @@ -114,6 +125,7 @@ cd /home/ubuntu/OnTime-back sudo docker compose ps sudo docker inspect -f '{{.State.Health.Status}}' ontime-container curl -fsS http://localhost:8080/actuator/health/readiness +curl -fsS https://ontime-back.kro.kr/actuator/health/readiness nc -zv ontime-prod.cpoeguokwaq5.ap-northeast-2.rds.amazonaws.com 3306 ``` diff --git a/ontime-back/Caddyfile b/ontime-back/Caddyfile new file mode 100644 index 0000000..d50a28d --- /dev/null +++ b/ontime-back/Caddyfile @@ -0,0 +1,4 @@ +ontime-back.kro.kr { + encode gzip + reverse_proxy 127.0.0.1:8080 +} diff --git a/ontime-back/docker-compose.yml b/ontime-back/docker-compose.yml index f336b47..2f86e62 100644 --- a/ontime-back/docker-compose.yml +++ b/ontime-back/docker-compose.yml @@ -9,7 +9,7 @@ services: SERVER_PORT: "${SERVER_PORT:-8080}" JAVA_TOOL_OPTIONS: "${JAVA_TOOL_OPTIONS:--XX:InitialRAMPercentage=50.0 -XX:MaxRAMPercentage=75.0 -Djava.security.egd=file:/dev/./urandom}" ports: - - "${BACKEND_HTTP_PORT:-8080}:${SERVER_PORT:-8080}" + - "${BACKEND_HTTP_PORT:-127.0.0.1:8080}:${SERVER_PORT:-8080}" restart: unless-stopped stop_grace_period: 30s mem_limit: "${BACKEND_MEMORY_LIMIT:-768m}" diff --git a/ontime-back/docs/deployment/ec2.md b/ontime-back/docs/deployment/ec2.md index 1ae690e..62762e7 100644 --- a/ontime-back/docs/deployment/ec2.md +++ b/ontime-back/docs/deployment/ec2.md @@ -4,11 +4,13 @@ This service deploys to Amazon EC2 through `.github/workflows/deploy.yml`. ## How to Deploy -1. Make sure the EC2 instance has Docker installed and the security group allows inbound traffic for the service port, currently `8080`. -2. Add the required GitHub Actions secrets listed below. -3. Run the `Deploy` workflow manually from GitHub Actions, or push to the `main` branch. +1. Make sure the EC2 instance has Docker installed. +2. Point `ontime-back.kro.kr` to the EC2 public IPv4 address. +3. Allow inbound security group traffic on TCP `80` and `443`. +4. Add the required GitHub Actions secrets listed below. +5. Run the `Deploy` workflow manually from GitHub Actions, or push to the `main` branch. -The workflow builds a Docker image, pushes it to GHCR, uploads `docker-compose.yml` to `/home/ubuntu/OnTime-back`, writes a production `.env` from GitHub Secrets, verifies private RDS connectivity, and restarts Docker Compose on the EC2 instance. +The workflow builds a Docker image, pushes it to GHCR, uploads `docker-compose.yml` and `Caddyfile` to `/home/ubuntu/OnTime-back`, writes a production `.env` from GitHub Secrets, verifies private RDS connectivity, restarts Docker Compose, and configures Caddy HTTPS on the EC2 instance. ## Required EC2 Secrets @@ -37,7 +39,7 @@ The workflow builds a Docker image, pushes it to GHCR, uploads `docker-compose.y ## Optional Secrets -- `BACKEND_HTTP_PORT` defaults to `8080`. +- `BACKEND_HTTP_PORT` defaults to `127.0.0.1:8080`. If this secret exists, it must be exactly `127.0.0.1:8080`. - `BACKEND_MEMORY_LIMIT` defaults to `768m`; use `640m` if the EC2 instance is memory constrained. - `BACKEND_CPU_LIMIT` defaults to `1.0`. - `FEATURE_APPLE_LOGIN_ENABLED` defaults to `true`. @@ -48,8 +50,17 @@ The workflow builds a Docker image, pushes it to GHCR, uploads `docker-compose.y The deploy workflow writes these files under `/home/ubuntu/OnTime-back`: - `docker-compose.yml` +- `Caddyfile` - `.env` +Caddy is installed as a systemd service on the host and serves: + +```text +https://ontime-back.kro.kr +``` + +It proxies to the Spring Boot container through `http://127.0.0.1:8080`. After HTTPS is verified, remove public inbound `8080` from the EC2 security group. + Production uses the private RDS instance: ```text diff --git a/ontime-back/src/main/java/devkor/ontime_back/global/oauth/apple/AppleLoginService.java b/ontime-back/src/main/java/devkor/ontime_back/global/oauth/apple/AppleLoginService.java index 41c4d59..340cb40 100644 --- a/ontime-back/src/main/java/devkor/ontime_back/global/oauth/apple/AppleLoginService.java +++ b/ontime-back/src/main/java/devkor/ontime_back/global/oauth/apple/AppleLoginService.java @@ -51,7 +51,7 @@ public class AppleLoginService { private static final String APPLE_KEYS_URL = "https://appleid.apple.com/auth/keys"; private static final String APPLE_TOKEN_URL = "https://appleid.apple.com/auth/token"; - private static final String REDIRECT_URI = "https://ontime.devkor.club/oauth2/apple/callback"; + private static final String REDIRECT_URI = "https://ontime-back.kro.kr/oauth2/apple/callback"; private String issuer = "https://appleid.apple.com"; @Value("${apple.client.id}") private String clientId;