Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions src/art/dev/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,12 @@ def get_model_config(
disable_log_requests=True,
# Multi-step processing is not supported for the Xformers attention backend
# which is the fallback for devices with compute capability < 8.0
num_scheduler_steps=(
16
if config.get("torchtune_args") is None
and torch.cuda.get_device_capability()[0] >= 8
else 1
),
# num_scheduler_steps=(
# 16
# if config.get("torchtune_args") is None
# and torch.cuda.get_device_capability()[0] >= 8
# else 1
# ),
enable_sleep_mode=enable_sleep_mode,
generation_config="vllm",
)
Expand Down
11 changes: 9 additions & 2 deletions src/art/unsloth/state.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ class ModelState:
"""

def __init__(self, config: InternalModelConfig) -> None:
from unsloth_zoo.vllm_rlhf_utils import ColocateWorkerExtension
from vllm.engine import async_llm_engine

# Patch MultiStepModelRunner for Unsloth compatibility
Expand All @@ -49,7 +50,7 @@ def __init__(self, config: InternalModelConfig) -> None:
# Set effectively unlimited timeout to support engine pausing & resumption
async_llm_engine.ENGINE_ITERATION_TIMEOUT_S = 2**31 - 1
# Sticking with V0 engine for now
os.environ["VLLM_USE_V1"] = "0"
os.environ["VLLM_USE_V1"] = "1"
# We can't use expandable segments with sleep mode
enable_sleep_mode = config.get("engine_args", {}).get(
"enable_sleep_mode", False
Expand All @@ -69,7 +70,13 @@ def _from_engine_args(
engine_args: AsyncEngineArgs, *args: Any, **kwargs: Any
) -> AsyncLLMEngine:
return from_engine_args(
replace(engine_args, **config.get("engine_args", {})), *args, **kwargs
replace(
engine_args,
**config.get("engine_args", {}),
worker_extension_cls=f"{ColocateWorkerExtension.__module__}.{ColocateWorkerExtension.__qualname__}",
),
*args,
**kwargs,
)

AsyncLLMEngine.from_engine_args = _from_engine_args
Expand Down