File tree Expand file tree Collapse file tree 4 files changed +12
-8
lines changed
Expand file tree Collapse file tree 4 files changed +12
-8
lines changed Original file line number Diff line number Diff line change 8888 uses : docker/build-push-action@v6
8989 with :
9090 platforms : linux/amd64,linux/arm64
91+ # use the current repo path as the build context, ensure .git is contained
92+ context : .
9193 # only trigger when tag, branch/main push
9294 push : ${{ github.event_name == 'push' && github.repository_owner == 'vllm-project' }}
9395 labels : ${{ steps.meta.outputs.labels }}
Original file line number Diff line number Diff line change 8888 uses : docker/build-push-action@v6
8989 with :
9090 platforms : linux/amd64,linux/arm64
91+ # use the current repo path as the build context, ensure .git is contained
92+ context : .
9193 # only trigger when tag, branch/main push
9294 push : ${{ github.event_name == 'push' && github.repository_owner == 'vllm-project' }}
9395 labels : ${{ steps.meta.outputs.labels }}
Original file line number Diff line number Diff line change @@ -31,16 +31,16 @@ RUN apt-get update -y && \
3131
3232WORKDIR /workspace
3333
34- COPY . /workspace/vllm-ascend/
34+ COPY . /vllm- workspace/vllm-ascend/
3535
3636RUN pip config set global.index-url ${PIP_INDEX_URL}
3737
3838# Install vLLM
3939ARG VLLM_REPO=https://github.com/vllm-project/vllm.git
4040ARG VLLM_TAG=v0.8.5
41- RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /workspace/vllm
41+ RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm- workspace/vllm
4242# In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it.
43- RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \
43+ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /vllm- workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \
4444 python3 -m pip uninstall -y triton && \
4545 python3 -m pip cache purge
4646
@@ -49,7 +49,7 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /workspace/vllm/ --e
4949RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh && \
5050 source /usr/local/Ascend/nnal/atb/set_env.sh && \
5151 export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \
52- python3 -m pip install -v -e /workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \
52+ python3 -m pip install -v -e /vllm- workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \
5353 python3 -m pip cache purge
5454
5555# Install modelscope (for fast download) and ray (for multinode)
Original file line number Diff line number Diff line change @@ -30,23 +30,23 @@ RUN pip config set global.index-url ${PIP_INDEX_URL}
3030
3131WORKDIR /workspace
3232
33- COPY . /workspace/vllm-ascend/
33+ COPY . /vllm- workspace/vllm-ascend/
3434
3535# Install vLLM
3636ARG VLLM_REPO=https://github.com/vllm-project/vllm.git
3737ARG VLLM_TAG=v0.8.5
3838
39- RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /workspace/vllm
39+ RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm- workspace/vllm
4040# In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it.
41- RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \
41+ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm- workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \
4242 python3 -m pip uninstall -y triton && \
4343 python3 -m pip cache purge
4444
4545# Install vllm-ascend
4646RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh && \
4747 source /usr/local/Ascend/nnal/atb/set_env.sh && \
4848 export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \
49- python3 -m pip install -v -e /workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \
49+ python3 -m pip install -v -e /vllm- workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \
5050 python3 -m pip cache purge
5151
5252# Install modelscope (for fast download) and ray (for multinode)
You can’t perform that action at this time.
0 commit comments