Skip to content

Commit 4982529

Browse files
authored
[Utils] Deprecate unused utils (#2097)
## Purpose ## * These utils haven't been used in almost two years. Let's deprecate them before the next release ## After Release ## * #2098 --------- Signed-off-by: Kyle Sayers <[email protected]>
1 parent 91d8d9d commit 4982529

File tree

8 files changed

+42
-63
lines changed

8 files changed

+42
-63
lines changed

src/llmcompressor/modifiers/quantization/calibration.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,15 @@
88
QuantizationStrategy,
99
)
1010
from compressed_tensors.quantization.lifecycle.forward import forward_quantize
11-
from compressed_tensors.utils import align_module_device, update_offload_parameter
11+
from compressed_tensors.utils import (
12+
align_module_device,
13+
getattr_chain,
14+
update_offload_parameter,
15+
)
1216
from loguru import logger
1317
from torch.nn import Module
1418

1519
from llmcompressor.observers import Observer
16-
from llmcompressor.utils.helpers import getattr_chain
1720

1821
__all__ = [
1922
"initialize_observer",

src/llmcompressor/pipelines/independent/pipeline.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
from typing import TYPE_CHECKING
22

33
import torch
4+
from compressed_tensors.utils import patch_attr
45
from loguru import logger
56
from torch.utils.data.dataloader import DataLoader
67

78
from llmcompressor.core import active_session
89
from llmcompressor.pipelines.registry import CalibrationPipeline
9-
from llmcompressor.utils.helpers import patch_attr
1010

1111
if TYPE_CHECKING:
1212
from llmcompressor.args.dataset_arguments import DatasetArguments

src/llmcompressor/pipelines/sequential/ast_helpers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,9 @@
88
from typing import List
99

1010
import torch
11+
from compressed_tensors.utils import patch_attr
1112

1213
from llmcompressor.pipelines.sequential.ast_utils.auto_wrapper import AutoWrapper
13-
from llmcompressor.utils import patch_attr
1414

1515
__all__ = ["autowrap_forwards", "append_autowrap_source_on_fail"]
1616

src/llmcompressor/pipelines/sequential/ast_utils/name_analyzer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import builtins
33
from typing import Set, Tuple
44

5-
from llmcompressor.utils import patch_attr
5+
from compressed_tensors.utils import patch_attr
66

77

88
class NameAnalyzer(ast.NodeVisitor):

src/llmcompressor/pipelines/sequential/helpers.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
from compressed_tensors.utils import (
1111
has_offloaded_params,
1212
offloaded_dispatch,
13+
patch_attr,
1314
remove_dispatch,
1415
)
1516
from compressed_tensors.utils.match import match_targets
@@ -24,7 +25,7 @@
2425
from llmcompressor.modifiers import Modifier
2526
from llmcompressor.modifiers.utils.hooks import HooksMixin
2627
from llmcompressor.pipelines.sequential.transformers_helpers import HFTracer
27-
from llmcompressor.utils.helpers import calibration_forward_context, patch_attr
28+
from llmcompressor.utils.helpers import calibration_forward_context
2829
from llmcompressor.utils.pytorch.module import get_no_split_params
2930

3031
from .ast_helpers import append_autowrap_source_on_fail, autowrap_forwards

src/llmcompressor/utils/dev.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,15 +7,13 @@
77
import torch
88
from accelerate import dispatch_model, infer_auto_device_map
99
from accelerate.utils import get_balanced_memory
10-
from compressed_tensors.utils import remove_dispatch
10+
from compressed_tensors.utils import patch_attr, remove_dispatch
1111
from huggingface_hub import snapshot_download
1212
from safetensors.torch import save_file
1313
from transformers import AutoModelForCausalLM, PreTrainedModel
1414
from transformers.modeling_utils import TORCH_INIT_FUNCTIONS
1515
from transformers.utils import SAFE_WEIGHTS_INDEX_NAME, WEIGHTS_INDEX_NAME
1616

17-
from llmcompressor.utils.helpers import patch_attr
18-
1917
__all__ = [
2018
"skip_weights_download",
2119
"patch_transformers_logger_level",

src/llmcompressor/utils/helpers.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
import numpy
2525
import torch
2626
from compressed_tensors.quantization import disable_quantization, enable_quantization
27+
from compressed_tensors.utils import deprecated
2728
from loguru import logger
2829
from transformers import PreTrainedModel
2930

@@ -88,6 +89,7 @@
8889
##############################
8990

9091

92+
@deprecated()
9193
def flatten_iterable(li: Iterable):
9294
"""
9395
:param li: a possibly nested iterable of items to be flattened
@@ -105,6 +107,7 @@ def _flatten_gen(_li):
105107
return list(_flatten_gen(li))
106108

107109

110+
@deprecated()
108111
def convert_to_bool(val: Any):
109112
"""
110113
:param val: the value to be converted to a bool,
@@ -119,6 +122,7 @@ def convert_to_bool(val: Any):
119122
)
120123

121124

125+
@deprecated()
122126
def validate_str_iterable(
123127
val: Union[str, Iterable[str]], error_desc: str = ""
124128
) -> Union[str, Iterable[str]]:
@@ -144,6 +148,7 @@ def validate_str_iterable(
144148
raise ValueError("unsupported type ({}) given in {}".format(val, error_desc))
145149

146150

151+
@deprecated()
147152
def bucket_iterable(
148153
val: Iterable[Any],
149154
num_buckets: int = 3,
@@ -191,6 +196,7 @@ def bucket_iterable(
191196
INTERPOLATION_FUNCS = ["linear", "cubic", "inverse_cubic"]
192197

193198

199+
@deprecated(future_name="torch.lerp")
194200
def interpolate(
195201
x_cur: float, x0: float, x1: float, y0: Any, y1: Any, inter_func: str = "linear"
196202
) -> Any:
@@ -243,6 +249,7 @@ def interpolate(
243249
return y_per * (y1 - y0) + y0
244250

245251

252+
@deprecated(future_name="torch.lerp")
246253
def interpolate_list_linear(
247254
measurements: List[Tuple[float, float]], x_val: Union[float, List[float]]
248255
) -> List[Tuple[float, float]]:
@@ -279,6 +286,7 @@ def interpolate_list_linear(
279286
return interpolated
280287

281288

289+
@deprecated(future_name="torch.lerp")
282290
def interpolated_integral(measurements: List[Tuple[float, float]]):
283291
"""
284292
Calculate the interpolated integal for a group of measurements of the form
@@ -308,6 +316,7 @@ def interpolated_integral(measurements: List[Tuple[float, float]]):
308316
return integral
309317

310318

319+
@deprecated()
311320
def clean_path(path: str) -> str:
312321
"""
313322
:param path: the directory or file path to clean
@@ -316,6 +325,7 @@ def clean_path(path: str) -> str:
316325
return os.path.abspath(os.path.expanduser(path))
317326

318327

328+
@deprecated()
319329
def create_dirs(path: str):
320330
"""
321331
:param path: the directory path to try and create
@@ -332,6 +342,7 @@ def create_dirs(path: str):
332342
raise
333343

334344

345+
@deprecated()
335346
def create_parent_dirs(path: str):
336347
"""
337348
:param path: the file path to try to create the parent directories for
@@ -340,6 +351,7 @@ def create_parent_dirs(path: str):
340351
create_dirs(parent)
341352

342353

354+
@deprecated()
343355
def create_unique_dir(path: str, check_number: int = 0) -> str:
344356
"""
345357
:param path: the file path to create a unique version of
@@ -355,6 +367,7 @@ def create_unique_dir(path: str, check_number: int = 0) -> str:
355367
return create_unique_dir(path, check_number + 1)
356368

357369

370+
@deprecated()
358371
def path_file_count(path: str, pattern: str = "*") -> int:
359372
"""
360373
Return the number of files that match the given pattern under the given path
@@ -368,6 +381,7 @@ def path_file_count(path: str, pattern: str = "*") -> int:
368381
return len(fnmatch.filter(os.listdir(path), pattern))
369382

370383

384+
@deprecated()
371385
def path_file_size(path: str) -> int:
372386
"""
373387
Return the total size, in bytes, for a path on the file system
@@ -405,6 +419,7 @@ def path_file_size(path: str) -> int:
405419
return total_size
406420

407421

422+
@deprecated()
408423
def is_url(val: str):
409424
"""
410425
:param val: value to check if it is a url or not
@@ -429,6 +444,7 @@ def is_url(val: str):
429444
NDARRAY_KEY = "ndarray"
430445

431446

447+
@deprecated()
432448
def load_numpy(file_path: str) -> Union[numpy.ndarray, Dict[str, numpy.ndarray]]:
433449
"""
434450
Load a numpy file into either an ndarray or an OrderedDict representing what
@@ -449,6 +465,7 @@ def load_numpy(file_path: str) -> Union[numpy.ndarray, Dict[str, numpy.ndarray]]
449465
return array
450466

451467

468+
@deprecated()
452469
def save_numpy(
453470
array: Union[numpy.ndarray, Dict[str, numpy.ndarray], Iterable[numpy.ndarray]],
454471
export_dir: str,
@@ -488,6 +505,7 @@ def save_numpy(
488505
return export_path
489506

490507

508+
@deprecated()
491509
def _fix_loaded_numpy(array) -> Union[numpy.ndarray, Dict[str, numpy.ndarray]]:
492510
if not isinstance(array, numpy.ndarray):
493511
tmp_arrray = array
@@ -498,6 +516,7 @@ def _fix_loaded_numpy(array) -> Union[numpy.ndarray, Dict[str, numpy.ndarray]]:
498516
return array
499517

500518

519+
@deprecated()
501520
def load_numpy_from_tar(
502521
path: str,
503522
) -> List[Union[numpy.ndarray, Dict[str, numpy.ndarray]]]:
@@ -522,6 +541,7 @@ def load_numpy_from_tar(
522541
return data
523542

524543

544+
@deprecated()
525545
def load_numpy_list(
526546
data: Union[str, Iterable[Union[str, numpy.ndarray, Dict[str, numpy.ndarray]]]],
527547
) -> List[Union[numpy.ndarray, Dict[str, numpy.ndarray]]]:
@@ -553,6 +573,7 @@ def load_numpy_list(
553573
return loaded
554574

555575

576+
@deprecated()
556577
def load_labeled_data(
557578
data: Union[str, Iterable[Union[str, numpy.ndarray, Dict[str, numpy.ndarray]]]],
558579
labels: Union[
@@ -631,6 +652,7 @@ def __len__(self):
631652

632653
return len(self._items[list(self._items.keys())[0]])
633654

655+
@deprecated()
634656
def append(self, item: Union[numpy.ndarray, Dict[str, numpy.ndarray]]):
635657
"""
636658
Append a new item into the current batch.
@@ -684,6 +706,7 @@ def append(self, item: Union[numpy.ndarray, Dict[str, numpy.ndarray]]):
684706

685707
self._items[key].append(val)
686708

709+
@deprecated()
687710
def stack(self) -> Dict[str, numpy.ndarray]:
688711
"""
689712
Stack the current items into a batch along a new, zeroed dimension
@@ -698,6 +721,7 @@ def stack(self) -> Dict[str, numpy.ndarray]:
698721
return batch_dict
699722

700723

724+
@deprecated()
701725
def tensor_export(
702726
tensor: Union[numpy.ndarray, Dict[str, numpy.ndarray], Iterable[numpy.ndarray]],
703727
export_dir: str,
@@ -734,6 +758,7 @@ def tensor_export(
734758
return export_path
735759

736760

761+
@deprecated()
737762
def tensors_export(
738763
tensors: Union[numpy.ndarray, Dict[str, numpy.ndarray], Iterable[numpy.ndarray]],
739764
export_dir: str,
@@ -765,6 +790,7 @@ def tensors_export(
765790
return exported_paths
766791

767792

793+
@deprecated()
768794
def _tensors_export_recursive(
769795
tensors: Union[numpy.ndarray, Iterable[numpy.ndarray]],
770796
export_dir: str,
@@ -799,6 +825,7 @@ def _tensors_export_recursive(
799825
)
800826

801827

828+
@deprecated()
802829
def _tensors_export_batch(
803830
tensors: Union[numpy.ndarray, Dict[str, numpy.ndarray], Iterable[numpy.ndarray]],
804831
export_dir: str,
@@ -845,6 +872,7 @@ def _tensors_export_batch(
845872
)
846873

847874

875+
@deprecated()
848876
def json_to_jsonl(json_file_path: str, overwrite: bool = True):
849877
"""
850878
Converts a json list file to jsonl file format (used for sharding efficienty)
@@ -876,6 +904,7 @@ def json_to_jsonl(json_file_path: str, overwrite: bool = True):
876904
jsonl_file.write("\n") # newline
877905

878906

907+
@deprecated()
879908
def deprecation_warning(message: str):
880909
warnings.simplefilter("always", DeprecationWarning)
881910
warnings.warn(
@@ -946,6 +975,7 @@ def import_from_path(path: str) -> str:
946975
raise AttributeError(f"Cannot find {class_name} in {_path}")
947976

948977

978+
@deprecated()
949979
def getattr_chain(obj: Any, chain_str: str, *args, **kwargs) -> Any:
950980
"""
951981
Chain multiple getattr calls, separated by `.`
@@ -1063,6 +1093,7 @@ def calibration_forward_context(model: torch.nn.Module):
10631093
yield
10641094

10651095

1096+
@deprecated()
10661097
@contextlib.contextmanager
10671098
def disable_lm_head(model: torch.nn.Module):
10681099
"""

0 commit comments

Comments
 (0)