Skip to content

Commit 7634ffb

Browse files
authored
[GCU] Add CI (PaddlePaddle#3006)
1 parent 6ce3a8a commit 7634ffb

3 files changed

Lines changed: 207 additions & 0 deletions

File tree

.github/workflows/ci_gcu.yml

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
name: CI_GCU
2+
3+
on:
4+
pull_request:
5+
branches:
6+
- develop
7+
- 'release/*'
8+
workflow_dispatch:
9+
10+
concurrency:
11+
group: ${{ github.event.pull_request.number }}-gcu-ci
12+
cancel-in-progress: true
13+
14+
jobs:
15+
CI_GCU:
16+
runs-on: [self-hosted, GCU-S60-8Card]
17+
steps:
18+
- name: Print current runner name
19+
run: |
20+
echo "Current runner name: ${{ runner.name }}"
21+
22+
- name: Code Checkout
23+
env:
24+
docker_image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/device/paddle-gcu:topsrider3.5.102-ubuntu20-x86_64-gcc84
25+
run: |
26+
REPO="https://github.com/${{ github.repository }}.git"
27+
FULL_REPO="${{ github.repository }}"
28+
REPO_NAME="${FULL_REPO##*/}"
29+
BASE_BRANCH="${{ github.base_ref }}"
30+
# Clean the repository directory before starting
31+
docker run --rm --net=host -v $(pwd):/workspace -w /workspace \
32+
-e "REPO_NAME=${REPO_NAME}" \
33+
-e "BASE_BRANCH=${BASE_BRANCH}" \
34+
${docker_image} /bin/bash -c '
35+
if [ -d ${REPO_NAME} ]; then
36+
echo "Directory ${REPO_NAME} exists, removing it..."
37+
rm -rf ${REPO_NAME}
38+
fi
39+
'
40+
git config --global user.name "FastDeployCI"
41+
git config --global user.email "fastdeploy_ci@example.com"
42+
git clone ${REPO} ${REPO_NAME} -b ${BASE_BRANCH}
43+
cd FastDeploy
44+
if [ "${{ github.event_name }}" = "pull_request" ]; then
45+
git fetch origin pull/${{ github.event.pull_request.number }}/head:pr/${{ github.event.pull_request.number }}
46+
git merge pr/${{ github.event.pull_request.number }}
47+
git log -n 3 --oneline
48+
else
49+
git checkout ${{ github.sha }}
50+
git log -n 3 --oneline
51+
fi
52+
53+
- name: Run CI unittest
54+
env:
55+
docker_image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/device/paddle-gcu:topsrider3.5.102-ubuntu20-x86_64-gcc84
56+
run: |
57+
runner_name="${{ runner.name }}"
58+
last_char="${runner_name: -1}"
59+
60+
if [[ "$last_char" =~ [0-3] ]]; then
61+
gcu_id="$last_char"
62+
else
63+
gcu_id="0"
64+
fi
65+
FD_API_PORT=$((9180 + gcu_id * 100))
66+
FD_ENGINE_QUEUE_PORT=$((9150 + gcu_id * 100))
67+
FD_METRICS_PORT=$((9170 + gcu_id * 100))
68+
69+
PARENT_DIR=$(dirname "$WORKSPACE")
70+
echo "PARENT_DIR:$PARENT_DIR"
71+
echo "Install drivers..."
72+
cd /work/deps
73+
bash TopsRider_i3x_*_deb_amd64.run --driver --no-auto-load -y
74+
cd -
75+
docker run --rm --network=host --ipc=host -it --privileged \
76+
-v $(pwd):/workspace -w /workspace \
77+
-v "/home:/home" \
78+
-v "/work:/work" \
79+
-e "MODEL_PATH=/work/models" \
80+
-e "http_proxy=$(git config --global --get http.proxy)" \
81+
-e "https_proxy=$(git config --global --get https.proxy)" \
82+
-e "FD_API_PORT=${FD_API_PORT}" \
83+
-e "FD_ENGINE_QUEUE_PORT=${FD_ENGINE_QUEUE_PORT}" \
84+
-e "FD_METRICS_PORT=${FD_METRICS_PORT}" \
85+
${docker_image} /bin/bash -c "
86+
git config --global --add safe.directory /workspace/FastDeploy
87+
cd FastDeploy
88+
bash scripts/run_ci_gcu.sh
89+
"

scripts/run_ci_gcu.sh

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
#!/bin/bash
2+
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
3+
echo "$DIR"
4+
5+
#先kill一遍
6+
ps -efww | grep -E 'api_server' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
7+
ps -efww | grep -E '8188' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
8+
lsof -t -i :8188 | xargs kill -9 || true
9+
10+
export model_path=${MODEL_PATH}/paddle/ERNIE-4.5-21B-A3B-Paddle
11+
12+
echo "pip install requirements"
13+
python -m pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
14+
echo "uninstall org"
15+
python -m pip uninstall paddlepaddle -y
16+
python -m pip uninstall paddle-custom-gcu -y
17+
python -m pip install paddlepaddle==3.1.0a0 -i https://www.paddlepaddle.org.cn/packages/stable/cpu/
18+
echo "build whl"
19+
bash build.sh 1 || exit 1
20+
21+
unset http_proxy
22+
unset https_proxy
23+
unset no_proxy
24+
25+
# 起服务
26+
rm -rf log/*
27+
rm -f core*
28+
# pkill -9 python #流水线不执行这个
29+
#清空消息队列
30+
ipcrm --all=msg
31+
python -m fastdeploy.entrypoints.openai.api_server \
32+
--model ${model_path} \
33+
--port 8188 \
34+
--metrics-port 8200 \
35+
--tensor-parallel-size 4 \
36+
--num-gpu-blocks-override 4096 \
37+
--max-model-len 32768 \
38+
--max-num-seqs 8 \
39+
--quantization wint4 > server.log 2>&1 &
40+
41+
sleep 60
42+
# 探活
43+
TIMEOUT=$((5 * 60))
44+
INTERVAL=10 # 检查间隔(秒)
45+
ENDPOINT="http://0.0.0.0:8188/health"
46+
START_TIME=$(date +%s) # 记录开始时间戳
47+
echo "开始服务健康检查,最长等待时间:${TIMEOUT}"
48+
while true; do
49+
# 计算已耗时
50+
CURRENT_TIME=$(date +%s)
51+
ELAPSED=$((CURRENT_TIME - START_TIME))
52+
53+
# 超时判断
54+
if [ $ELAPSED -ge $TIMEOUT ]; then
55+
echo -e "\n服务启动超时:经过 $((TIMEOUT/60)) 分钟服务仍未启动!"
56+
cat server.log
57+
cat log/workerlog.0
58+
exit 1
59+
fi
60+
61+
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -m 2 "$ENDPOINT" || true)
62+
63+
if [ "$HTTP_CODE" = "200" ]; then
64+
echo -e "\n服务启动成功!耗时 ${ELAPSED}"
65+
break
66+
else
67+
sleep $INTERVAL
68+
fi
69+
done
70+
71+
cat server.log
72+
73+
# 执行服务化推理
74+
python test/ci_use/GCU/run_ernie.py
75+
exit_code=$?
76+
echo exit_code is ${exit_code}
77+
78+
ps -efww | grep -E 'api_server' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
79+
ps -efww | grep -E '8188' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
80+
lsof -t -i :8188 | xargs kill -9 || true
81+
82+
if [ ${exit_code} -ne 0 ]; then
83+
echo "log/workerlog.0"
84+
cat log/workerlog.0
85+
exit 1
86+
fi

test/ci_use/GCU/run_ernie.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import openai
16+
17+
ip = "0.0.0.0"
18+
service_http_port = "8188" # 服务配置的
19+
client = openai.Client(base_url=f"http://{ip}:{service_http_port}/v1", api_key="EMPTY_API_KEY")
20+
21+
# 非流式对话
22+
response = client.chat.completions.create(
23+
model="default",
24+
messages=[
25+
{"role": "user", "content": "The largest ocean is"},
26+
],
27+
temperature=1,
28+
top_p=0,
29+
max_tokens=64,
30+
stream=False,
31+
)
32+
print(response)

0 commit comments

Comments
 (0)