@@ -7,58 +7,56 @@ set -ex
77# allow to bind to different cores
88CORE_RANGE=${CORE_RANGE:- 0-16}
99OMP_CORE_RANGE=${OMP_CORE_RANGE:- 0-16}
10- NUMA_NODE=${NUMA_NODE:- 0}
1110
12- export CMAKE_BUILD_PARALLEL_LEVEL=32
11+ export CMAKE_BUILD_PARALLEL_LEVEL=16
1312
1413# Setup cleanup
1514remove_docker_container () {
1615 set -e;
17- docker rm -f cpu-test- " $NUMA_NODE " || true ;
16+ docker rm -f cpu-test || true ;
1817}
1918trap remove_docker_container EXIT
2019remove_docker_container
2120
2221# Try building the docker image
23- numactl -C " $CORE_RANGE " -N " $NUMA_NODE " docker build --tag cpu-test- " $NUMA_NODE " --target vllm-test -f docker/Dockerfile.cpu .
22+ docker build --tag cpu-test --target vllm-test -f docker/Dockerfile.cpu .
2423
25- # Run the image, setting --shm-size=4g for tensor parallel.
26- docker run -itd --cpuset-cpus=" $CORE_RANGE " --cpuset-mems= " $NUMA_NODE " -- entrypoint /bin/bash -v ~ /.cache/huggingface:/root/.cache/huggingface --privileged=true - e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=16 --env VLLM_CPU_CI_ENV=1 -e E2E_OMP_THREADS=" $OMP_CORE_RANGE " --shm-size=4g --name cpu-test- " $NUMA_NODE " cpu-test- " $NUMA_NODE "
24+ # Run the image
25+ docker run -itd --cpuset-cpus=" $CORE_RANGE " --entrypoint /bin/bash -v ~ /.cache/huggingface:/root/.cache/huggingface -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=16 --env VLLM_CPU_CI_ENV=1 -e E2E_OMP_THREADS=" $OMP_CORE_RANGE " --shm-size=4g --name cpu-test cpu-test
2726
2827function cpu_tests() {
2928 set -e
30- export NUMA_NODE=$2
3129
32- docker exec cpu-test- " $NUMA_NODE " bash -c "
30+ docker exec cpu-test bash -c "
3331 set -e
3432 pip list"
3533
3634 # offline inference
37- docker exec cpu-test- " $NUMA_NODE " bash -c "
35+ docker exec cpu-test bash -c "
3836 set -e
3937 python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m"
4038
4139 # Run kernel tests
42- docker exec cpu-test- " $NUMA_NODE " bash -c "
40+ docker exec cpu-test bash -c "
4341 set -e
4442 pytest -x -v -s tests/kernels/test_onednn.py
4543 pytest -x -v -s tests/kernels/attention/test_cpu_attn.py"
4644
4745 # basic online serving
48- docker exec cpu-test- " $NUMA_NODE " bash -c '
46+ docker exec cpu-test bash -c '
4947 set -e
50- VLLM_CPU_OMP_THREADS_BIND=$E2E_OMP_THREADS vllm serve meta-llama/Llama-3.2-3B-Instruct --max-model-len 2048 &
48+ VLLM_CPU_OMP_THREADS_BIND=$E2E_OMP_THREADS vllm serve Qwen/Qwen3-0.6B --max-model-len 2048 &
5149 server_pid=$!
5250 timeout 600 bash -c "until curl localhost:8000/v1/models; do sleep 1; done" || exit 1
5351 vllm bench serve \
5452 --backend vllm \
5553 --dataset-name random \
56- --model meta-llama/Llama-3.2-3B-Instruct \
54+ --model Qwen/Qwen3-0.6B \
5755 --num-prompts 20 \
5856 --endpoint /v1/completions
5957 kill -s SIGTERM $server_pid &'
6058}
6159
6260# All of CPU tests are expected to be finished less than 40 mins.
6361export -f cpu_tests
64- timeout 2h bash -c " cpu_tests $CORE_RANGE $NUMA_NODE "
62+ timeout 2h bash -c cpu_tests
0 commit comments