Trigger build
Some checks failed
Build and Deploy Backend (Kaniko) / build-and-deploy (push) Has been cancelled

This commit is contained in:
2025-12-09 11:26:56 +08:00
parent 4e555680af
commit 5866e05ec0
2 changed files with 65 additions and 116 deletions

View File

@@ -1,4 +1,4 @@
name: Build and Deploy Backend (Podman)
name: Build and Deploy Backend (Kaniko)
on:
push:
@@ -6,46 +6,14 @@ on:
- main
jobs:
# Job 1: Build with Podman
build:
build-and-deploy:
runs-on: ubuntu-latest
timeout-minutes: 20
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install Podman
run: |
sudo apt-get update
sudo apt-get install -y podman
podman --version
- name: Build Image with Podman tt
run: |
podman build -t cloudforge-dashboard-backend:${GITHUB_SHA:0:8} .
podman tag cloudforge-dashboard-backend:${GITHUB_SHA:0:8} cloudforge-dashboard-backend:latest
- name: Save Image to TAR
run: |
mkdir -p /tmp/images
podman save cloudforge-dashboard-backend:latest -o /tmp/images/backend.tar
ls -lh /tmp/images/backend.tar
- name: Upload Image Artifact
uses: actions/upload-artifact@v3
with:
name: backend-image
path: /tmp/images/backend.tar
retention-days: 1
# Job 2: Load to K3S and Deploy
deploy:
runs-on: ubuntu-latest
needs: build
timeout-minutes: 15
steps:
- name: Restore kubeconfig
run: |
mkdir -p ~/.kube
@@ -60,104 +28,52 @@ jobs:
chmod +x kubectl
sudo mv kubectl /usr/local/bin/kubectl
- name: Download Image Artifact
uses: actions/download-artifact@v3
with:
name: backend-image
path: /tmp/images
- name: Load Image to K3S
- name: Build with Kaniko
run: |
# 方法1直接用 kubectl cp 上傳到 K3S node
# 先找到一個 running pod 作為跳板
POD=$(kubectl get pods -n cloudforge -l app=dashboard-backend -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
if [ -z "$POD" ]; then
echo "⚠️ No existing pod found, will create deployment first"
# 先創建 deployment (如果唔存在)
kubectl create deployment dashboard-backend \
--image=cloudforge-dashboard-backend:latest \
--replicas=0 \
-n cloudforge --dry-run=client -o yaml | kubectl apply -f -
fi
# 方法2SSH 到 K3S node 直接 import (最快)
# 你需要設置 SSH key 到 secrets
# ssh user@k3s-node "sudo ctr -n k8s.io images import -" < /tmp/images/backend.tar
echo "✅ Image ready to load"
- name: Import Image via Job
run: |
# 用一個臨時 pod 來 import image
kubectl apply -f - <<EOJOB
/usr/local/bin/kubectl apply -f - <<EOJOB
apiVersion: batch/v1
kind: Job
metadata:
name: import-backend-${GITHUB_SHA:0:8}
name: kaniko-build-${GITHUB_SHA:0:8}
namespace: cloudforge
spec:
ttlSecondsAfterFinished: 600
ttlSecondsAfterFinished: 3600
template:
spec:
serviceAccountName: kaniko-builder
containers:
- name: importer
image: alpine:latest
command:
- sh
- -c
- |
echo "Image import completed (mock)"
# 實際上要用 ctr 或 crictl import
- name: kaniko
image: gcr.io/kaniko-project/executor:v1.20.0
args:
- "--dockerfile=Dockerfile"
- "--context=git://gitea-http.gitea.svc.cluster.local:3000/cloudforge-dev/dashboard-backend.git#main"
- "--destination=192.168.1.100:30500/cloudforge-dashboard-backend:${GITHUB_SHA:0:8}"
- "--destination=192.168.1.100:30500/cloudforge-dashboard-backend:latest"
- "--insecure"
- "--skip-tls-verify"
restartPolicy: Never
EOJOB
kubectl wait --for=condition=complete --timeout=300s \
job/import-backend-${GITHUB_SHA:0:8} -n cloudforge
/usr/local/bin/kubectl wait --for=condition=complete --timeout=1200s \
job/kaniko-build-${GITHUB_SHA:0:8} -n cloudforge
- name: Update Deployment
- name: Deploy
run: |
kubectl set image deployment/dashboard-backend \
backend=cloudforge-dashboard-backend:latest \
-n cloudforge \
--record || \
kubectl create deployment dashboard-backend \
--image=cloudforge-dashboard-backend:latest \
/usr/local/bin/kubectl set image deployment/dashboard-backend \
backend=192.168.1.100:30500/cloudforge-dashboard-backend:${GITHUB_SHA:0:8} \
-n cloudforge || \
/usr/local/bin/kubectl create deployment dashboard-backend \
--image=192.168.1.100:30500/cloudforge-dashboard-backend:${GITHUB_SHA:0:8} \
-n cloudforge
- name: Wait for Rollout
run: |
kubectl rollout status deployment/dashboard-backend \
-n cloudforge \
--timeout=600s
/usr/local/bin/kubectl rollout restart deployment/dashboard-backend -n cloudforge
# Job 3: Verify
verify:
runs-on: ubuntu-latest
needs: deploy
timeout-minutes: 5
steps:
- name: Restore kubeconfig
- name: Wait for deployment
run: |
mkdir -p ~/.kube
echo "$KUBECONFIG_DATA" | base64 -d > ~/.kube/config
chmod 600 ~/.kube/config
env:
KUBECONFIG_DATA: ${{ secrets.KUBECONFIG }}
/usr/local/bin/kubectl rollout status deployment/dashboard-backend \
-n cloudforge --timeout=300s
- name: Install kubectl
- name: Verify
run: |
curl -LO "https://dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubectl"
chmod +x kubectl
sudo mv kubectl /usr/local/bin/kubectl
- name: Verify Deployment
run: |
kubectl get pods -n cloudforge
kubectl get deployment dashboard-backend -n cloudforge
kubectl logs deployment/dashboard-backend -n cloudforge --tail=20
- name: Cleanup
run: |
kubectl delete job import-backend-${GITHUB_SHA:0:8} -n cloudforge || true
/usr/local/bin/kubectl get pods -n cloudforge
/usr/local/bin/kubectl logs deployment/dashboard-backend -n cloudforge --tail=20

View File

@@ -0,0 +1,33 @@
# kaniko-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: kaniko-builder
namespace: cloudforge
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kaniko-builder
namespace: cloudforge
rules:
- apiGroups: [""]
resources: ["pods", "pods/log"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kaniko-builder
namespace: cloudforge
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kaniko-builder
subjects:
- kind: ServiceAccount
name: kaniko-builder
namespace: cloudforge