Files
dashboard-backend/.gitea/workflows/build-deploy.yml
CloudForge Dev bbddd9a7ed
Some checks failed
Build and Deploy Backend (Podman) / build (push) Failing after 2m39s
Build and Deploy Backend (Podman) / deploy (push) Has been skipped
Build and Deploy Backend (Podman) / verify (push) Has been skipped
Changed to build image from local podman
2025-12-05 17:01:24 +08:00

164 lines
4.9 KiB
YAML
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

name: Build and Deploy Backend (Podman)
on:
push:
branches:
- main
jobs:
# Job 1: Build with Podman
build:
runs-on: ubuntu-latest
timeout-minutes: 20
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install Podman
run: |
sudo apt-get update
sudo apt-get install -y podman
podman --version
- name: Build Image with Podman tt
run: |
podman build -t cloudforge-dashboard-backend:${GITHUB_SHA:0:8} .
podman tag cloudforge-dashboard-backend:${GITHUB_SHA:0:8} cloudforge-dashboard-backend:latest
- name: Save Image to TAR
run: |
mkdir -p /tmp/images
podman save cloudforge-dashboard-backend:latest -o /tmp/images/backend.tar
ls -lh /tmp/images/backend.tar
- name: Upload Image Artifact
uses: actions/upload-artifact@v3
with:
name: backend-image
path: /tmp/images/backend.tar
retention-days: 1
# Job 2: Load to K3S and Deploy
deploy:
runs-on: ubuntu-latest
needs: build
timeout-minutes: 15
steps:
- name: Restore kubeconfig
run: |
mkdir -p ~/.kube
echo "$KUBECONFIG_DATA" | base64 -d > ~/.kube/config
chmod 600 ~/.kube/config
env:
KUBECONFIG_DATA: ${{ secrets.KUBECONFIG }}
- name: Install kubectl
run: |
curl -LO "https://dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubectl"
chmod +x kubectl
sudo mv kubectl /usr/local/bin/kubectl
- name: Download Image Artifact
uses: actions/download-artifact@v3
with:
name: backend-image
path: /tmp/images
- name: Load Image to K3S
run: |
# 方法1直接用 kubectl cp 上傳到 K3S node
# 先找到一個 running pod 作為跳板
POD=$(kubectl get pods -n cloudforge -l app=dashboard-backend -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
if [ -z "$POD" ]; then
echo "⚠️ No existing pod found, will create deployment first"
# 先創建 deployment (如果唔存在)
kubectl create deployment dashboard-backend \
--image=cloudforge-dashboard-backend:latest \
--replicas=0 \
-n cloudforge --dry-run=client -o yaml | kubectl apply -f -
fi
# 方法2SSH 到 K3S node 直接 import (最快)
# 你需要設置 SSH key 到 secrets
# ssh user@k3s-node "sudo ctr -n k8s.io images import -" < /tmp/images/backend.tar
echo "✅ Image ready to load"
- name: Import Image via Job
run: |
# 用一個臨時 pod 來 import image
kubectl apply -f - <<EOJOB
apiVersion: batch/v1
kind: Job
metadata:
name: import-backend-${GITHUB_SHA:0:8}
namespace: cloudforge
spec:
ttlSecondsAfterFinished: 600
template:
spec:
containers:
- name: importer
image: alpine:latest
command:
- sh
- -c
- |
echo "Image import completed (mock)"
# 實際上要用 ctr 或 crictl import
restartPolicy: Never
EOJOB
kubectl wait --for=condition=complete --timeout=300s \
job/import-backend-${GITHUB_SHA:0:8} -n cloudforge
- name: Update Deployment
run: |
kubectl set image deployment/dashboard-backend \
backend=cloudforge-dashboard-backend:latest \
-n cloudforge \
--record || \
kubectl create deployment dashboard-backend \
--image=cloudforge-dashboard-backend:latest \
-n cloudforge
- name: Wait for Rollout
run: |
kubectl rollout status deployment/dashboard-backend \
-n cloudforge \
--timeout=600s
# Job 3: Verify
verify:
runs-on: ubuntu-latest
needs: deploy
timeout-minutes: 5
steps:
- name: Restore kubeconfig
run: |
mkdir -p ~/.kube
echo "$KUBECONFIG_DATA" | base64 -d > ~/.kube/config
chmod 600 ~/.kube/config
env:
KUBECONFIG_DATA: ${{ secrets.KUBECONFIG }}
- name: Install kubectl
run: |
curl -LO "https://dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubectl"
chmod +x kubectl
sudo mv kubectl /usr/local/bin/kubectl
- name: Verify Deployment
run: |
kubectl get pods -n cloudforge
kubectl get deployment dashboard-backend -n cloudforge
kubectl logs deployment/dashboard-backend -n cloudforge --tail=20
- name: Cleanup
run: |
kubectl delete job import-backend-${GITHUB_SHA:0:8} -n cloudforge || true