Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
d1ac1a3
fix: Ignore deprecation warning
matthewfeickert Jul 17, 2025
ec7df68
ignore again
matthewfeickert Jul 17, 2025
fdc53bd
fixup ignore
matthewfeickert Jul 17, 2025
9db68d1
Ensure pytest tests on CPU only
matthewfeickert Jul 17, 2025
4e8a6da
Ignore PyTorch deprecation on np.bool for now
matthewfeickert Jul 17, 2025
1489b8f
ignore PyTorch interacting with NumPy
matthewfeickert Jul 17, 2025
4373fe0
FIXME: SciPy versions differe in asimov fit
matthewfeickert Jul 17, 2025
e0a32ba
Remove click CliRunner mix_stderr as removed in click v8.2
matthewfeickert Jul 17, 2025
1491e4f
Add guard for tests
matthewfeickert Jul 17, 2025
23b2727
Remove click bound
matthewfeickert Jul 17, 2025
6a9e014
Add upper bound on numpy if pytorch used
matthewfeickert Jul 17, 2025
2fd4f5e
Temporarily bound given v1.16.0 failing in tests
matthewfeickert Jul 17, 2025
865eff1
Add max compatible tensorflow-probability for osx-64 wheels
matthewfeickert Jul 17, 2025
0b352ca
try to simplify logic
matthewfeickert Jul 17, 2025
786a4cc
Revert "try to simplify logic"
matthewfeickert Jul 17, 2025
74ac780
fix mypy errors
kratsg Jul 17, 2025
a293a33
jsonschmea finally broke us
matthewfeickert Jul 17, 2025
c3f1c6b
* Move mypy fixes out into a seperate PR
matthewfeickert Jul 17, 2025
47ccc20
Revert "jsonschmea finally broke us"
matthewfeickert Jul 17, 2025
91e765f
add arraylike
kratsg Jul 17, 2025
0ad39ab
Revert "* Move mypy fixes out into a seperate PR"
matthewfeickert Jul 17, 2025
6b6f9bb
Running pyhf without args is now exit code 2
matthewfeickert Jul 17, 2025
8e3b1a4
fixup typo
matthewfeickert Jul 17, 2025
99f5662
Add link to GitHub Issue
matthewfeickert Jul 18, 2025
b9930d7
Remove list of problems
matthewfeickert Jul 18, 2025
ebfd2b3
Add paerser option that automatically disables CUDA
matthewfeickert Jul 18, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .github/workflows/docker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,6 @@ jobs:

- name: Run CLI API check
run: |
printf "\npyhf\n"
docker run --rm pyhf/pyhf:sha-${GITHUB_SHA::8}
printf "\npyhf --version\n"
docker run --rm pyhf/pyhf:sha-${GITHUB_SHA::8} --version
printf "\npyhf --help\n"
Expand Down
19 changes: 15 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,8 @@ dependencies = [
"jsonpatch>=1.15",
"jsonschema>=4.15.0", # for utils
"pyyaml>=5.1", # for parsing CLI equal-delimited options
"scipy>=1.5.2", # requires numpy, which is required by pyhf and tensorflow
# c.f. https://github.com/scikit-hep/pyhf/issues/2593 for scipy v1.16.0 upper bound
"scipy>=1.5.2,<1.16.0", # requires numpy, which is required by pyhf and tensorflow
"tqdm>=4.56.0", # for readxml
"numpy", # compatible versions controlled through scipy
]
Expand All @@ -70,7 +71,7 @@ Homepage = "https://github.com/scikit-hep/pyhf"
shellcomplete = ["click_completion"]
# TODO: 'tensorflow' supports all platform_machine for tensorflow v2.16.1+
# but TensorFlow only supports python_version 3.8 up through tensorflow v2.13.1.
# So until Python 3.8 support is dropped, split requirments on python_version
# So until Python 3.8 support is dropped, split requirements on python_version
# before and after 3.9.
# NOTE: macos x86 support is deprecated from tensorflow v2.17.0 onwards.
tensorflow = [
Expand All @@ -79,9 +80,14 @@ tensorflow = [
"tensorflow-macos>=2.7.0; python_version < '3.9' and platform_machine == 'arm64' and platform_system == 'Darwin'", # c.f. PR #2119, #2452
"tensorflow-probability>=0.11.0; python_version < '3.9'", # c.f. PR #1657, #2452
# python >= 3.9
"tensorflow-probability[tf]>=0.24.0; python_version >= '3.9'" # c.f. PR #2452
"tensorflow-probability[tf]>=0.24.0,<0.25.0; python_version >= '3.9' and platform_machine != 'arm64' and platform_system == 'Darwin'", # c.f. TensorFlow v2.17.0
"tensorflow-probability[tf]>=0.24.0; python_version >= '3.9' and platform_machine == 'arm64' and platform_system == 'Darwin'", # c.f. TensorFlow v2.17.0
"tensorflow-probability[tf]>=0.24.0; python_version >= '3.9' and platform_system != 'Darwin'" # c.f. TensorFlow v2.17.0
]
torch = [
"torch>=1.10.0", # c.f. PR #1657
"numpy<2.0" # c.f. https://github.com/pytorch/pytorch/issues/157973
]
torch = ["torch>=1.10.0"] # c.f. PR #1657
jax = [
"jax>=0.4.1", # c.f. PR #2079
"jaxlib>=0.4.1", # c.f. PR #2079
Expand Down Expand Up @@ -229,6 +235,11 @@ filterwarnings = [
"ignore:Skipping device Apple Paravirtual device that does not support Metal 2.0:UserWarning", # Can't fix given hardware/virtualized device
'ignore:Type google._upb._message.[A-Z]+ uses PyType_Spec with a metaclass that has custom:DeprecationWarning', # protobuf via tensorflow
"ignore:jax.xla_computation is deprecated. Please use the AOT APIs:DeprecationWarning", # jax v0.4.30
"ignore:'MultiCommand' is deprecated and will be removed in Click 9.0. Use 'Group' instead.:DeprecationWarning", # Click
"ignore:Jupyter is migrating its paths to use standard platformdirs:DeprecationWarning", # papermill
"ignore:datetime.datetime.utcnow\\(\\) is deprecated:DeprecationWarning", # papermill
"ignore:In future, it will be an error for 'np.bool' scalars to be interpreted as an index:DeprecationWarning", # PyTorch
"ignore:__array__ implementation doesn't accept a copy keyword, so passing copy=False failed. __array__ must implement 'dtype' and 'copy' keyword arguments.:DeprecationWarning", # PyTorch interacting with NumPy
]

[tool.coverage.run]
Expand Down
47 changes: 27 additions & 20 deletions src/pyhf/tensor/numpy_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,15 @@
if TYPE_CHECKING:
from numpy.typing import ArrayLike, DTypeLike, NBitBase, NDArray
else:
ArrayLike = "ArrayLike"
NBitBase = "NBitBase"

from scipy import special
from scipy.special import gammaln, xlogy
from scipy.stats import norm, poisson

from pyhf.typing import Literal, Shape
from typing import cast

T = TypeVar("T", bound=NBitBase)

Expand All @@ -26,27 +28,32 @@
log = logging.getLogger(__name__)


class _BasicPoisson:
class _BasicPoisson(Generic[T]):
def __init__(self, rate: Tensor[T]):
self.rate = rate

def sample(self, sample_shape: Shape) -> ArrayLike:
return poisson(self.rate).rvs(size=sample_shape + self.rate.shape) # type: ignore[no-any-return]
return cast(
ArrayLike, poisson(self.rate).rvs(size=sample_shape + self.rate.shape)
)

def log_prob(self, value: NDArray[np.number[T]]) -> ArrayLike:
def log_prob(self, value: Tensor[T]) -> ArrayLike:
tensorlib: numpy_backend[T] = numpy_backend()
return tensorlib.poisson_logpdf(value, self.rate)


class _BasicNormal:
class _BasicNormal(Generic[T]):
def __init__(self, loc: Tensor[T], scale: Tensor[T]):
self.loc = loc
self.scale = scale

def sample(self, sample_shape: Shape) -> ArrayLike:
return norm(self.loc, self.scale).rvs(size=sample_shape + self.loc.shape) # type: ignore[no-any-return]
return cast(
ArrayLike,
norm(self.loc, self.scale).rvs(size=sample_shape + self.loc.shape),
)

def log_prob(self, value: NDArray[np.number[T]]) -> ArrayLike:
def log_prob(self, value: Tensor[T]) -> ArrayLike:
tensorlib: numpy_backend[T] = numpy_backend()
return tensorlib.normal_logpdf(value, self.loc, self.scale)

Expand Down Expand Up @@ -125,7 +132,7 @@ def erf(self, tensor_in: Tensor[T]) -> ArrayLike:
Returns:
NumPy ndarray: The values of the error function at the given points.
"""
return special.erf(tensor_in) # type: ignore[no-any-return]
return cast(ArrayLike, special.erf(tensor_in))

def erfinv(self, tensor_in: Tensor[T]) -> ArrayLike:
"""
Expand All @@ -145,7 +152,7 @@ def erfinv(self, tensor_in: Tensor[T]) -> ArrayLike:
Returns:
NumPy ndarray: The values of the inverse of the error function at the given points.
"""
return special.erfinv(tensor_in) # type: ignore[no-any-return]
return cast(ArrayLike, special.erfinv(tensor_in))

def tile(self, tensor_in: Tensor[T], repeats: int | Sequence[int]) -> ArrayLike:
"""
Expand Down Expand Up @@ -207,7 +214,7 @@ def tolist(self, tensor_in: Tensor[T] | list[T]) -> list[T]:
raise

def outer(self, tensor_in_1: Tensor[T], tensor_in_2: Tensor[T]) -> ArrayLike:
return np.outer(tensor_in_1, tensor_in_2) # type: ignore[arg-type]
return cast(ArrayLike, np.outer(tensor_in_1, tensor_in_2))

def gather(self, tensor: Tensor[T], indices: NDArray[np.integer[T]]) -> ArrayLike:
return tensor[indices]
Expand Down Expand Up @@ -255,7 +262,7 @@ def sum(self, tensor_in: Tensor[T], axis: int | None = None) -> ArrayLike:
return np.sum(tensor_in, axis=axis)

def product(self, tensor_in: Tensor[T], axis: Shape | None = None) -> ArrayLike:
return np.prod(tensor_in, axis=axis) # type: ignore[arg-type]
return cast(ArrayLike, np.prod(tensor_in, axis=axis))

def abs(self, tensor: Tensor[T]) -> ArrayLike:
return np.abs(tensor)
Expand Down Expand Up @@ -345,7 +352,7 @@ def percentile(
.. versionadded:: 0.7.0
"""
# see https://github.com/numpy/numpy/issues/22125
return np.percentile(tensor_in, q, axis=axis, interpolation=interpolation) # type: ignore[call-overload,no-any-return]
return cast(ArrayLike, np.percentile(tensor_in, q, axis=axis, interpolation=interpolation)) # type: ignore[call-overload]

def stack(self, sequence: Sequence[Tensor[T]], axis: int = 0) -> ArrayLike:
return np.stack(sequence, axis=axis)
Expand Down Expand Up @@ -392,7 +399,7 @@ def simple_broadcast(self, *args: Sequence[Tensor[T]]) -> Sequence[Tensor[T]]:
return np.broadcast_arrays(*args)

def shape(self, tensor: Tensor[T]) -> Shape:
return tensor.shape
return cast(Shape, tensor.shape)

def reshape(self, tensor: Tensor[T], newshape: Shape) -> ArrayLike:
return np.reshape(tensor, newshape)
Expand Down Expand Up @@ -434,10 +441,10 @@ def einsum(self, subscripts: str, *operands: Sequence[Tensor[T]]) -> ArrayLike:
Returns:
tensor: the calculation based on the Einstein summation convention
"""
return np.einsum(subscripts, *operands) # type: ignore[arg-type,no-any-return]
return cast(ArrayLike, np.einsum(subscripts, *operands))

def poisson_logpdf(self, n: Tensor[T], lam: Tensor[T]) -> ArrayLike:
return xlogy(n, lam) - lam - gammaln(n + 1.0) # type: ignore[no-any-return]
return cast(ArrayLike, xlogy(n, lam) - lam - gammaln(n + 1.0))

def poisson(self, n: Tensor[T], lam: Tensor[T]) -> ArrayLike:
r"""
Expand Down Expand Up @@ -481,7 +488,7 @@ def poisson(self, n: Tensor[T], lam: Tensor[T]) -> ArrayLike:
"""
_n = np.asarray(n)
_lam = np.asarray(lam)
return np.exp(xlogy(_n, _lam) - _lam - gammaln(_n + 1.0)) # type: ignore[no-any-return,operator]
return cast(ArrayLike, np.exp(xlogy(_n, _lam) - _lam - gammaln(_n + 1)))

def normal_logpdf(self, x: Tensor[T], mu: Tensor[T], sigma: Tensor[T]) -> ArrayLike:
# this is much faster than
Expand All @@ -491,7 +498,7 @@ def normal_logpdf(self, x: Tensor[T], mu: Tensor[T], sigma: Tensor[T]) -> ArrayL
root2pi = np.sqrt(2 * np.pi)
prefactor = -np.log(sigma * root2pi)
summand = -np.square(np.divide((x - mu), (root2 * sigma)))
return prefactor + summand # type: ignore[no-any-return]
return cast(ArrayLike, prefactor + summand)

# def normal_logpdf(self, x, mu, sigma):
# return norm.logpdf(x, loc=mu, scale=sigma)
Expand Down Expand Up @@ -522,7 +529,7 @@ def normal(self, x: Tensor[T], mu: Tensor[T], sigma: Tensor[T]) -> ArrayLike:
Returns:
NumPy float: Value of Normal(x|mu, sigma)
"""
return norm.pdf(x, loc=mu, scale=sigma) # type: ignore[no-any-return]
return cast(ArrayLike, norm.pdf(x, loc=mu, scale=sigma))

def normal_cdf(
self, x: Tensor[T], mu: float | Tensor[T] = 0, sigma: float | Tensor[T] = 1
Expand All @@ -548,9 +555,9 @@ def normal_cdf(
Returns:
NumPy float: The CDF
"""
return norm.cdf(x, loc=mu, scale=sigma) # type: ignore[no-any-return]
return cast(ArrayLike, norm.cdf(x, loc=mu, scale=sigma))

def poisson_dist(self, rate: Tensor[T]) -> _BasicPoisson:
def poisson_dist(self, rate: Tensor[T]) -> _BasicPoisson[T]:
r"""
The Poisson distribution with rate parameter :code:`rate`.

Expand All @@ -571,7 +578,7 @@ def poisson_dist(self, rate: Tensor[T]) -> _BasicPoisson:
"""
return _BasicPoisson(rate)

def normal_dist(self, mu: Tensor[T], sigma: Tensor[T]) -> _BasicNormal:
def normal_dist(self, mu: Tensor[T], sigma: Tensor[T]) -> _BasicNormal[T]:
r"""
The Normal distribution with mean :code:`mu` and standard deviation :code:`sigma`.

Expand Down
18 changes: 18 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import json
import os
import pathlib
import shutil
import sys
Expand All @@ -18,6 +19,12 @@ def pytest_addoption(parser):
choices=["tensorflow", "pytorch", "jax", "minuit"],
help="list of backends to disable in tests",
)
parser.addoption(
"--enable-cuda",
action="store_true",
default=False,
help="Allow CUDA enabled backends to run CUDA accelerated code on GPUs",
)


# Factory as fixture pattern
Expand Down Expand Up @@ -167,3 +174,14 @@ def datadir(tmp_path, request):
shutil.copytree(test_dir, tmp_path, dirs_exist_ok=True)

return tmp_path


@pytest.fixture(scope="session", autouse=True)
def setup_cuda_environment(request):
"""
Automatically force CUDA enabled backends to run in CPU mode unless
--enable-cuda is passed.
"""
if not request.config.getoption("--enable-cuda"):
# Ensure testing on CPU and not GPU
os.environ["CUDA_VISIBLE_DEVICES"] = ""
1 change: 1 addition & 0 deletions tests/test_calculator.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@
fitted_pars.free_fit_to_data
)
# lower tolerance for amd64 and arm64 to agree
# FIXME: SciPy v1.16.0 gives a different result from SciPy v1.15.3

Check notice on line 88 in tests/test_calculator.py

View check run for this annotation

codefactor.io / CodeFactor

tests/test_calculator.py#L88

Unresolved comment '# FIXME: SciPy v1.16.0 gives a different result from SciPy v1.15.3'. (C100)
assert pytest.approx(
[7.6470499e-05, 1.4997178], rel=1e-3
) == pyhf.tensorlib.tolist(fitted_pars.free_fit_to_asimov)
7 changes: 6 additions & 1 deletion tests/test_scripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -666,7 +666,12 @@ def test_missing_contrib_download(caplog):

from pyhf.contrib.cli import download

runner = CliRunner(mix_stderr=False)
# mix_stderr removed in Click v8.2.0.
# Can simplify once pyhf is Python 3.10+.
try:
runner = CliRunner(mix_stderr=False)
except TypeError:
runner = CliRunner()
result = runner.invoke(
download,
[
Expand Down
Loading