Skip to content

Commit 81493d2

Browse files
Remove ROCm skip conditions for transformers backend tests
Signed-off-by: badaoui <[email protected]>
1 parent 014ece9 commit 81493d2

File tree

1 file changed

+0
-12
lines changed

1 file changed

+0
-12
lines changed

tests/models/test_transformers.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66

77
import pytest
88

9-
from vllm.platforms import current_platform
10-
119
from ..conftest import HfRunner, VllmRunner
1210
from ..utils import multi_gpu_test, prep_prompts
1311
from .registry import HF_EXAMPLE_MODELS
@@ -59,10 +57,6 @@ def check_implementation(
5957
)
6058

6159

62-
@pytest.mark.skipif(
63-
current_platform.is_rocm(),
64-
reason="Llama-3.2-1B-Instruct, Ilama-3.2-1B produce memory access fault.",
65-
)
6660
@pytest.mark.parametrize(
6761
"model,model_impl",
6862
[
@@ -147,12 +141,6 @@ def test_quantization(
147141
max_tokens: int,
148142
num_logprobs: int,
149143
) -> None:
150-
if (
151-
current_platform.is_rocm()
152-
and quantization_kwargs.get("quantization", "") == "bitsandbytes"
153-
):
154-
pytest.skip("bitsandbytes quantization is currently not supported in rocm.")
155-
156144
with vllm_runner(
157145
model,
158146
model_impl="auto",

0 commit comments

Comments
 (0)