Trigger build
Some checks failed
Build and Deploy Backend (Kaniko) / build-and-deploy (push) Has been cancelled

This commit is contained in:
2025-12-09 11:26:56 +08:00
parent 4e555680af
commit 5866e05ec0
2 changed files with 65 additions and 116 deletions

View File

@@ -1,4 +1,4 @@
name: Build and Deploy Backend (Podman) name: Build and Deploy Backend (Kaniko)
on: on:
push: push:
@@ -6,46 +6,14 @@ on:
- main - main
jobs: jobs:
# Job 1: Build with Podman build-and-deploy:
build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20 timeout-minutes: 30
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Install Podman
run: |
sudo apt-get update
sudo apt-get install -y podman
podman --version
- name: Build Image with Podman tt
run: |
podman build -t cloudforge-dashboard-backend:${GITHUB_SHA:0:8} .
podman tag cloudforge-dashboard-backend:${GITHUB_SHA:0:8} cloudforge-dashboard-backend:latest
- name: Save Image to TAR
run: |
mkdir -p /tmp/images
podman save cloudforge-dashboard-backend:latest -o /tmp/images/backend.tar
ls -lh /tmp/images/backend.tar
- name: Upload Image Artifact
uses: actions/upload-artifact@v3
with:
name: backend-image
path: /tmp/images/backend.tar
retention-days: 1
# Job 2: Load to K3S and Deploy
deploy:
runs-on: ubuntu-latest
needs: build
timeout-minutes: 15
steps:
- name: Restore kubeconfig - name: Restore kubeconfig
run: | run: |
mkdir -p ~/.kube mkdir -p ~/.kube
@@ -60,104 +28,52 @@ jobs:
chmod +x kubectl chmod +x kubectl
sudo mv kubectl /usr/local/bin/kubectl sudo mv kubectl /usr/local/bin/kubectl
- name: Download Image Artifact - name: Build with Kaniko
uses: actions/download-artifact@v3
with:
name: backend-image
path: /tmp/images
- name: Load Image to K3S
run: | run: |
# 方法1直接用 kubectl cp 上傳到 K3S node /usr/local/bin/kubectl apply -f - <<EOJOB
# 先找到一個 running pod 作為跳板
POD=$(kubectl get pods -n cloudforge -l app=dashboard-backend -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
if [ -z "$POD" ]; then
echo "⚠️ No existing pod found, will create deployment first"
# 先創建 deployment (如果唔存在)
kubectl create deployment dashboard-backend \
--image=cloudforge-dashboard-backend:latest \
--replicas=0 \
-n cloudforge --dry-run=client -o yaml | kubectl apply -f -
fi
# 方法2SSH 到 K3S node 直接 import (最快)
# 你需要設置 SSH key 到 secrets
# ssh user@k3s-node "sudo ctr -n k8s.io images import -" < /tmp/images/backend.tar
echo "✅ Image ready to load"
- name: Import Image via Job
run: |
# 用一個臨時 pod 來 import image
kubectl apply -f - <<EOJOB
apiVersion: batch/v1 apiVersion: batch/v1
kind: Job kind: Job
metadata: metadata:
name: import-backend-${GITHUB_SHA:0:8} name: kaniko-build-${GITHUB_SHA:0:8}
namespace: cloudforge namespace: cloudforge
spec: spec:
ttlSecondsAfterFinished: 600 ttlSecondsAfterFinished: 3600
template: template:
spec: spec:
serviceAccountName: kaniko-builder
containers: containers:
- name: importer - name: kaniko
image: alpine:latest image: gcr.io/kaniko-project/executor:v1.20.0
command: args:
- sh - "--dockerfile=Dockerfile"
- -c - "--context=git://gitea-http.gitea.svc.cluster.local:3000/cloudforge-dev/dashboard-backend.git#main"
- | - "--destination=192.168.1.100:30500/cloudforge-dashboard-backend:${GITHUB_SHA:0:8}"
echo "Image import completed (mock)" - "--destination=192.168.1.100:30500/cloudforge-dashboard-backend:latest"
# 實際上要用 ctr 或 crictl import - "--insecure"
- "--skip-tls-verify"
restartPolicy: Never restartPolicy: Never
EOJOB EOJOB
kubectl wait --for=condition=complete --timeout=300s \ /usr/local/bin/kubectl wait --for=condition=complete --timeout=1200s \
job/import-backend-${GITHUB_SHA:0:8} -n cloudforge job/kaniko-build-${GITHUB_SHA:0:8} -n cloudforge
- name: Update Deployment - name: Deploy
run: | run: |
kubectl set image deployment/dashboard-backend \ /usr/local/bin/kubectl set image deployment/dashboard-backend \
backend=cloudforge-dashboard-backend:latest \ backend=192.168.1.100:30500/cloudforge-dashboard-backend:${GITHUB_SHA:0:8} \
-n cloudforge \ -n cloudforge || \
--record || \ /usr/local/bin/kubectl create deployment dashboard-backend \
kubectl create deployment dashboard-backend \ --image=192.168.1.100:30500/cloudforge-dashboard-backend:${GITHUB_SHA:0:8} \
--image=cloudforge-dashboard-backend:latest \
-n cloudforge -n cloudforge
- name: Wait for Rollout /usr/local/bin/kubectl rollout restart deployment/dashboard-backend -n cloudforge
run: |
kubectl rollout status deployment/dashboard-backend \
-n cloudforge \
--timeout=600s
# Job 3: Verify - name: Wait for deployment
verify:
runs-on: ubuntu-latest
needs: deploy
timeout-minutes: 5
steps:
- name: Restore kubeconfig
run: | run: |
mkdir -p ~/.kube /usr/local/bin/kubectl rollout status deployment/dashboard-backend \
echo "$KUBECONFIG_DATA" | base64 -d > ~/.kube/config -n cloudforge --timeout=300s
chmod 600 ~/.kube/config
env:
KUBECONFIG_DATA: ${{ secrets.KUBECONFIG }}
- name: Install kubectl - name: Verify
run: | run: |
curl -LO "https://dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubectl" /usr/local/bin/kubectl get pods -n cloudforge
chmod +x kubectl /usr/local/bin/kubectl logs deployment/dashboard-backend -n cloudforge --tail=20
sudo mv kubectl /usr/local/bin/kubectl
- name: Verify Deployment
run: |
kubectl get pods -n cloudforge
kubectl get deployment dashboard-backend -n cloudforge
kubectl logs deployment/dashboard-backend -n cloudforge --tail=20
- name: Cleanup
run: |
kubectl delete job import-backend-${GITHUB_SHA:0:8} -n cloudforge || true

View File

@@ -0,0 +1,33 @@
# kaniko-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: kaniko-builder
namespace: cloudforge
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kaniko-builder
namespace: cloudforge
rules:
- apiGroups: [""]
resources: ["pods", "pods/log"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kaniko-builder
namespace: cloudforge
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kaniko-builder
subjects:
- kind: ServiceAccount
name: kaniko-builder
namespace: cloudforge