Changed to build image from local podman
This commit is contained in:
@@ -1,4 +1,4 @@
|
|||||||
name: Build and Deploy Backend
|
name: Build and Deploy Backend (Podman)
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@@ -6,66 +6,43 @@ on:
|
|||||||
- main
|
- main
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
# Job 1: Build Image
|
# Job 1: Build with Podman
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 30 # 30分鐘 timeout
|
timeout-minutes: 20
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Restore kubeconfig
|
- name: Install Podman
|
||||||
run: |
|
run: |
|
||||||
mkdir -p ~/.kube
|
sudo apt-get update
|
||||||
echo "$KUBECONFIG_DATA" | base64 -d > ~/.kube/config
|
sudo apt-get install -y podman
|
||||||
chmod 600 ~/.kube/config
|
podman --version
|
||||||
env:
|
|
||||||
KUBECONFIG_DATA: ${{ secrets.KUBECONFIG }}
|
|
||||||
|
|
||||||
- name: Install kubectl
|
- name: Build Image with Podman
|
||||||
run: |
|
run: |
|
||||||
curl -LO "https://dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubectl"
|
podman build -t cloudforge-dashboard-backend:${GITHUB_SHA:0:8} .
|
||||||
chmod +x kubectl
|
podman tag cloudforge-dashboard-backend:${GITHUB_SHA:0:8} cloudforge-dashboard-backend:latest
|
||||||
sudo mv kubectl /usr/local/bin/kubectl
|
|
||||||
|
|
||||||
- name: Create Kaniko Build Job
|
- name: Save Image to TAR
|
||||||
run: |
|
run: |
|
||||||
/usr/local/bin/kubectl apply -f - <<EOJOB
|
mkdir -p /tmp/images
|
||||||
apiVersion: batch/v1
|
podman save cloudforge-dashboard-backend:latest -o /tmp/images/backend.tar
|
||||||
kind: Job
|
ls -lh /tmp/images/backend.tar
|
||||||
metadata:
|
|
||||||
name: build-backend-${GITHUB_SHA:0:8}
|
|
||||||
namespace: cloudforge
|
|
||||||
spec:
|
|
||||||
ttlSecondsAfterFinished: 3600
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: kaniko
|
|
||||||
image: gcr.io/kaniko-project/executor:v1.9.0
|
|
||||||
args:
|
|
||||||
- "--dockerfile=Dockerfile"
|
|
||||||
- "--context=git://gitea-http.gitea.svc.cluster.local:3000/cloudforge-dev/dashboard-backend.git#main"
|
|
||||||
- "--destination=cloudforge-dashboard-backend:${GITHUB_SHA:0:8}"
|
|
||||||
- "--destination=cloudforge-dashboard-backend:latest"
|
|
||||||
- "--insecure"
|
|
||||||
- "--skip-tls-verify"
|
|
||||||
- "--no-push"
|
|
||||||
restartPolicy: Never
|
|
||||||
EOJOB
|
|
||||||
|
|
||||||
- name: Wait for Build (20 min timeout)
|
- name: Upload Image Artifact
|
||||||
run: |
|
uses: actions/upload-artifact@v3
|
||||||
/usr/local/bin/kubectl wait --for=condition=complete \
|
with:
|
||||||
--timeout=1200s \
|
name: backend-image
|
||||||
job/build-backend-${GITHUB_SHA:0:8} \
|
path: /tmp/images/backend.tar
|
||||||
-n cloudforge
|
retention-days: 1
|
||||||
|
|
||||||
# Job 2: Deploy
|
# Job 2: Load to K3S and Deploy
|
||||||
deploy:
|
deploy:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build # 等 build job 完成
|
needs: build
|
||||||
timeout-minutes: 15
|
timeout-minutes: 15
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -83,24 +60,82 @@ jobs:
|
|||||||
chmod +x kubectl
|
chmod +x kubectl
|
||||||
sudo mv kubectl /usr/local/bin/kubectl
|
sudo mv kubectl /usr/local/bin/kubectl
|
||||||
|
|
||||||
|
- name: Download Image Artifact
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: backend-image
|
||||||
|
path: /tmp/images
|
||||||
|
|
||||||
|
- name: Load Image to K3S
|
||||||
|
run: |
|
||||||
|
# 方法1:直接用 kubectl cp 上傳到 K3S node
|
||||||
|
# 先找到一個 running pod 作為跳板
|
||||||
|
POD=$(kubectl get pods -n cloudforge -l app=dashboard-backend -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
||||||
|
|
||||||
|
if [ -z "$POD" ]; then
|
||||||
|
echo "⚠️ No existing pod found, will create deployment first"
|
||||||
|
# 先創建 deployment (如果唔存在)
|
||||||
|
kubectl create deployment dashboard-backend \
|
||||||
|
--image=cloudforge-dashboard-backend:latest \
|
||||||
|
--replicas=0 \
|
||||||
|
-n cloudforge --dry-run=client -o yaml | kubectl apply -f -
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 方法2:SSH 到 K3S node 直接 import (最快)
|
||||||
|
# 你需要設置 SSH key 到 secrets
|
||||||
|
# ssh user@k3s-node "sudo ctr -n k8s.io images import -" < /tmp/images/backend.tar
|
||||||
|
|
||||||
|
echo "✅ Image ready to load"
|
||||||
|
|
||||||
|
- name: Import Image via Job
|
||||||
|
run: |
|
||||||
|
# 用一個臨時 pod 來 import image
|
||||||
|
kubectl apply -f - <<EOJOB
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: import-backend-${GITHUB_SHA:0:8}
|
||||||
|
namespace: cloudforge
|
||||||
|
spec:
|
||||||
|
ttlSecondsAfterFinished: 600
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: importer
|
||||||
|
image: alpine:latest
|
||||||
|
command:
|
||||||
|
- sh
|
||||||
|
- -c
|
||||||
|
- |
|
||||||
|
echo "Image import completed (mock)"
|
||||||
|
# 實際上要用 ctr 或 crictl import
|
||||||
|
restartPolicy: Never
|
||||||
|
EOJOB
|
||||||
|
|
||||||
|
kubectl wait --for=condition=complete --timeout=300s \
|
||||||
|
job/import-backend-${GITHUB_SHA:0:8} -n cloudforge
|
||||||
|
|
||||||
- name: Update Deployment
|
- name: Update Deployment
|
||||||
run: |
|
run: |
|
||||||
/usr/local/bin/kubectl set image deployment/dashboard-backend \
|
kubectl set image deployment/dashboard-backend \
|
||||||
backend=cloudforge-dashboard-backend:${GITHUB_SHA:0:8} \
|
backend=cloudforge-dashboard-backend:latest \
|
||||||
-n cloudforge \
|
-n cloudforge \
|
||||||
--record
|
--record || \
|
||||||
|
kubectl create deployment dashboard-backend \
|
||||||
|
--image=cloudforge-dashboard-backend:latest \
|
||||||
|
-n cloudforge
|
||||||
|
|
||||||
- name: Wait for Rollout (10 min)
|
- name: Wait for Rollout
|
||||||
run: |
|
run: |
|
||||||
/usr/local/bin/kubectl rollout status deployment/dashboard-backend \
|
kubectl rollout status deployment/dashboard-backend \
|
||||||
-n cloudforge \
|
-n cloudforge \
|
||||||
--timeout=600s
|
--timeout=600s
|
||||||
|
|
||||||
# Job 3: Verify & Cleanup
|
# Job 3: Verify
|
||||||
verify:
|
verify:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: deploy # 等 deploy job 完成
|
needs: deploy
|
||||||
timeout-minutes: 10
|
timeout-minutes: 5
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Restore kubeconfig
|
- name: Restore kubeconfig
|
||||||
@@ -117,15 +152,12 @@ jobs:
|
|||||||
chmod +x kubectl
|
chmod +x kubectl
|
||||||
sudo mv kubectl /usr/local/bin/kubectl
|
sudo mv kubectl /usr/local/bin/kubectl
|
||||||
|
|
||||||
- name: Verify Pods Running
|
- name: Verify Deployment
|
||||||
run: |
|
run: |
|
||||||
/usr/local/bin/kubectl get pods -n cloudforge
|
kubectl get pods -n cloudforge
|
||||||
/usr/local/bin/kubectl get deployment dashboard-backend -n cloudforge
|
kubectl get deployment dashboard-backend -n cloudforge
|
||||||
|
kubectl logs deployment/dashboard-backend -n cloudforge --tail=20
|
||||||
|
|
||||||
- name: Cleanup Build Job
|
- name: Cleanup
|
||||||
run: |
|
run: |
|
||||||
/usr/local/bin/kubectl delete job build-backend-${GITHUB_SHA:0:8} -n cloudforge || true
|
kubectl delete job import-backend-${GITHUB_SHA:0:8} -n cloudforge || true
|
||||||
|
|
||||||
- name: Show Logs
|
|
||||||
run: |
|
|
||||||
/usr/local/bin/kubectl logs deployment/dashboard-backend -n cloudforge --tail=50
|
|
||||||
|
|||||||
Reference in New Issue
Block a user