Changed to build image from local podman
Some checks failed
Build and Deploy Backend (Podman) / build (push) Failing after 3m0s
Build and Deploy Backend (Podman) / deploy (push) Has been skipped
Build and Deploy Backend (Podman) / verify (push) Has been skipped

This commit is contained in:
2025-11-19 17:21:21 +08:00
parent 749844ea74
commit 05ffa628c6

View File

@@ -1,4 +1,4 @@
name: Build and Deploy Backend
name: Build and Deploy Backend (Podman)
on:
push:
@@ -6,66 +6,43 @@ on:
- main
jobs:
# Job 1: Build Image
# Job 1: Build with Podman
build:
runs-on: ubuntu-latest
timeout-minutes: 30 # 30分鐘 timeout
timeout-minutes: 20
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Restore kubeconfig
- name: Install Podman
run: |
mkdir -p ~/.kube
echo "$KUBECONFIG_DATA" | base64 -d > ~/.kube/config
chmod 600 ~/.kube/config
env:
KUBECONFIG_DATA: ${{ secrets.KUBECONFIG }}
sudo apt-get update
sudo apt-get install -y podman
podman --version
- name: Install kubectl
- name: Build Image with Podman
run: |
curl -LO "https://dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubectl"
chmod +x kubectl
sudo mv kubectl /usr/local/bin/kubectl
podman build -t cloudforge-dashboard-backend:${GITHUB_SHA:0:8} .
podman tag cloudforge-dashboard-backend:${GITHUB_SHA:0:8} cloudforge-dashboard-backend:latest
- name: Create Kaniko Build Job
- name: Save Image to TAR
run: |
/usr/local/bin/kubectl apply -f - <<EOJOB
apiVersion: batch/v1
kind: Job
metadata:
name: build-backend-${GITHUB_SHA:0:8}
namespace: cloudforge
spec:
ttlSecondsAfterFinished: 3600
template:
spec:
containers:
- name: kaniko
image: gcr.io/kaniko-project/executor:v1.9.0
args:
- "--dockerfile=Dockerfile"
- "--context=git://gitea-http.gitea.svc.cluster.local:3000/cloudforge-dev/dashboard-backend.git#main"
- "--destination=cloudforge-dashboard-backend:${GITHUB_SHA:0:8}"
- "--destination=cloudforge-dashboard-backend:latest"
- "--insecure"
- "--skip-tls-verify"
- "--no-push"
restartPolicy: Never
EOJOB
mkdir -p /tmp/images
podman save cloudforge-dashboard-backend:latest -o /tmp/images/backend.tar
ls -lh /tmp/images/backend.tar
- name: Wait for Build (20 min timeout)
run: |
/usr/local/bin/kubectl wait --for=condition=complete \
--timeout=1200s \
job/build-backend-${GITHUB_SHA:0:8} \
-n cloudforge
- name: Upload Image Artifact
uses: actions/upload-artifact@v3
with:
name: backend-image
path: /tmp/images/backend.tar
retention-days: 1
# Job 2: Deploy
# Job 2: Load to K3S and Deploy
deploy:
runs-on: ubuntu-latest
needs: build # 等 build job 完成
needs: build
timeout-minutes: 15
steps:
@@ -83,24 +60,82 @@ jobs:
chmod +x kubectl
sudo mv kubectl /usr/local/bin/kubectl
- name: Download Image Artifact
uses: actions/download-artifact@v3
with:
name: backend-image
path: /tmp/images
- name: Load Image to K3S
run: |
# 方法1直接用 kubectl cp 上傳到 K3S node
# 先找到一個 running pod 作為跳板
POD=$(kubectl get pods -n cloudforge -l app=dashboard-backend -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
if [ -z "$POD" ]; then
echo "⚠️ No existing pod found, will create deployment first"
# 先創建 deployment (如果唔存在)
kubectl create deployment dashboard-backend \
--image=cloudforge-dashboard-backend:latest \
--replicas=0 \
-n cloudforge --dry-run=client -o yaml | kubectl apply -f -
fi
# 方法2SSH 到 K3S node 直接 import (最快)
# 你需要設置 SSH key 到 secrets
# ssh user@k3s-node "sudo ctr -n k8s.io images import -" < /tmp/images/backend.tar
echo "✅ Image ready to load"
- name: Import Image via Job
run: |
# 用一個臨時 pod 來 import image
kubectl apply -f - <<EOJOB
apiVersion: batch/v1
kind: Job
metadata:
name: import-backend-${GITHUB_SHA:0:8}
namespace: cloudforge
spec:
ttlSecondsAfterFinished: 600
template:
spec:
containers:
- name: importer
image: alpine:latest
command:
- sh
- -c
- |
echo "Image import completed (mock)"
# 實際上要用 ctr 或 crictl import
restartPolicy: Never
EOJOB
kubectl wait --for=condition=complete --timeout=300s \
job/import-backend-${GITHUB_SHA:0:8} -n cloudforge
- name: Update Deployment
run: |
/usr/local/bin/kubectl set image deployment/dashboard-backend \
backend=cloudforge-dashboard-backend:${GITHUB_SHA:0:8} \
kubectl set image deployment/dashboard-backend \
backend=cloudforge-dashboard-backend:latest \
-n cloudforge \
--record
--record || \
kubectl create deployment dashboard-backend \
--image=cloudforge-dashboard-backend:latest \
-n cloudforge
- name: Wait for Rollout (10 min)
- name: Wait for Rollout
run: |
/usr/local/bin/kubectl rollout status deployment/dashboard-backend \
kubectl rollout status deployment/dashboard-backend \
-n cloudforge \
--timeout=600s
# Job 3: Verify & Cleanup
# Job 3: Verify
verify:
runs-on: ubuntu-latest
needs: deploy # 等 deploy job 完成
timeout-minutes: 10
needs: deploy
timeout-minutes: 5
steps:
- name: Restore kubeconfig
@@ -117,15 +152,12 @@ jobs:
chmod +x kubectl
sudo mv kubectl /usr/local/bin/kubectl
- name: Verify Pods Running
- name: Verify Deployment
run: |
/usr/local/bin/kubectl get pods -n cloudforge
/usr/local/bin/kubectl get deployment dashboard-backend -n cloudforge
kubectl get pods -n cloudforge
kubectl get deployment dashboard-backend -n cloudforge
kubectl logs deployment/dashboard-backend -n cloudforge --tail=20
- name: Cleanup Build Job
- name: Cleanup
run: |
/usr/local/bin/kubectl delete job build-backend-${GITHUB_SHA:0:8} -n cloudforge || true
- name: Show Logs
run: |
/usr/local/bin/kubectl logs deployment/dashboard-backend -n cloudforge --tail=50
kubectl delete job import-backend-${GITHUB_SHA:0:8} -n cloudforge || true