1818from scipy .stats import norm , poisson
1919
2020from pyhf .typing import Literal , Shape
21- from typing import cast
2221
2322T = TypeVar ("T" , bound = NBitBase )
2423
2726log = logging .getLogger (__name__ )
2827
2928
30- class _BasicPoisson ( Generic [ T ]) :
29+ class _BasicPoisson :
3130 def __init__ (self , rate : Tensor [T ]):
3231 self .rate = rate
3332
3433 def sample (self , sample_shape : Shape ) -> ArrayLike :
35- return cast (
36- ArrayLike , poisson (self .rate ).rvs (size = sample_shape + self .rate .shape )
37- )
34+ return poisson (self .rate ).rvs (size = sample_shape + self .rate .shape ) # type: ignore[no-any-return]
3835
39- def log_prob (self , value : Tensor [ T ]) -> ArrayLike :
36+ def log_prob (self , value : NDArray [ np . number [ T ] ]) -> ArrayLike :
4037 tensorlib : numpy_backend [T ] = numpy_backend ()
4138 return tensorlib .poisson_logpdf (value , self .rate )
4239
4340
44- class _BasicNormal ( Generic [ T ]) :
41+ class _BasicNormal :
4542 def __init__ (self , loc : Tensor [T ], scale : Tensor [T ]):
4643 self .loc = loc
4744 self .scale = scale
4845
4946 def sample (self , sample_shape : Shape ) -> ArrayLike :
50- return cast (
51- ArrayLike ,
52- norm (self .loc , self .scale ).rvs (size = sample_shape + self .loc .shape ),
53- )
47+ return norm (self .loc , self .scale ).rvs (size = sample_shape + self .loc .shape ) # type: ignore[no-any-return]
5448
55- def log_prob (self , value : Tensor [ T ]) -> ArrayLike :
49+ def log_prob (self , value : NDArray [ np . number [ T ] ]) -> ArrayLike :
5650 tensorlib : numpy_backend [T ] = numpy_backend ()
5751 return tensorlib .normal_logpdf (value , self .loc , self .scale )
5852
@@ -131,7 +125,7 @@ def erf(self, tensor_in: Tensor[T]) -> ArrayLike:
131125 Returns:
132126 NumPy ndarray: The values of the error function at the given points.
133127 """
134- return cast ( ArrayLike , special .erf (tensor_in ))
128+ return special .erf (tensor_in ) # type: ignore[no-any-return]
135129
136130 def erfinv (self , tensor_in : Tensor [T ]) -> ArrayLike :
137131 """
@@ -151,7 +145,7 @@ def erfinv(self, tensor_in: Tensor[T]) -> ArrayLike:
151145 Returns:
152146 NumPy ndarray: The values of the inverse of the error function at the given points.
153147 """
154- return cast ( ArrayLike , special .erfinv (tensor_in ))
148+ return special .erfinv (tensor_in ) # type: ignore[no-any-return]
155149
156150 def tile (self , tensor_in : Tensor [T ], repeats : int | Sequence [int ]) -> ArrayLike :
157151 """
@@ -213,7 +207,7 @@ def tolist(self, tensor_in: Tensor[T] | list[T]) -> list[T]:
213207 raise
214208
215209 def outer (self , tensor_in_1 : Tensor [T ], tensor_in_2 : Tensor [T ]) -> ArrayLike :
216- return cast ( ArrayLike , np .outer (tensor_in_1 , tensor_in_2 ))
210+ return np .outer (tensor_in_1 , tensor_in_2 ) # type: ignore[arg-type]
217211
218212 def gather (self , tensor : Tensor [T ], indices : NDArray [np .integer [T ]]) -> ArrayLike :
219213 return tensor [indices ]
@@ -261,7 +255,7 @@ def sum(self, tensor_in: Tensor[T], axis: int | None = None) -> ArrayLike:
261255 return np .sum (tensor_in , axis = axis )
262256
263257 def product (self , tensor_in : Tensor [T ], axis : Shape | None = None ) -> ArrayLike :
264- return cast ( ArrayLike , np .prod (tensor_in , axis = axis ))
258+ return np .prod (tensor_in , axis = axis ) # type: ignore[arg-type]
265259
266260 def abs (self , tensor : Tensor [T ]) -> ArrayLike :
267261 return np .abs (tensor )
@@ -351,7 +345,7 @@ def percentile(
351345 .. versionadded:: 0.7.0
352346 """
353347 # see https://github.com/numpy/numpy/issues/22125
354- return cast ( ArrayLike , np .percentile (tensor_in , q , axis = axis , interpolation = interpolation )) # type: ignore[call-overload]
348+ return np .percentile (tensor_in , q , axis = axis , interpolation = interpolation ) # type: ignore[call-overload,no-any-return ]
355349
356350 def stack (self , sequence : Sequence [Tensor [T ]], axis : int = 0 ) -> ArrayLike :
357351 return np .stack (sequence , axis = axis )
@@ -398,7 +392,7 @@ def simple_broadcast(self, *args: Sequence[Tensor[T]]) -> Sequence[Tensor[T]]:
398392 return np .broadcast_arrays (* args )
399393
400394 def shape (self , tensor : Tensor [T ]) -> Shape :
401- return cast ( Shape , tensor .shape )
395+ return tensor .shape
402396
403397 def reshape (self , tensor : Tensor [T ], newshape : Shape ) -> ArrayLike :
404398 return np .reshape (tensor , newshape )
@@ -440,10 +434,10 @@ def einsum(self, subscripts: str, *operands: Sequence[Tensor[T]]) -> ArrayLike:
440434 Returns:
441435 tensor: the calculation based on the Einstein summation convention
442436 """
443- return cast ( ArrayLike , np .einsum (subscripts , * operands ))
437+ return np .einsum (subscripts , * operands ) # type: ignore[arg-type,no-any-return]
444438
445439 def poisson_logpdf (self , n : Tensor [T ], lam : Tensor [T ]) -> ArrayLike :
446- return cast ( ArrayLike , xlogy (n , lam ) - lam - gammaln (n + 1.0 ))
440+ return xlogy (n , lam ) - lam - gammaln (n + 1.0 ) # type: ignore[no-any-return]
447441
448442 def poisson (self , n : Tensor [T ], lam : Tensor [T ]) -> ArrayLike :
449443 r"""
@@ -487,7 +481,7 @@ def poisson(self, n: Tensor[T], lam: Tensor[T]) -> ArrayLike:
487481 """
488482 _n = np .asarray (n )
489483 _lam = np .asarray (lam )
490- return cast ( ArrayLike , np .exp (xlogy (_n , _lam ) - _lam - gammaln (_n + 1 )))
484+ return np .exp (xlogy (_n , _lam ) - _lam - gammaln (_n + 1.0 )) # type: ignore[no-any-return,operator]
491485
492486 def normal_logpdf (self , x : Tensor [T ], mu : Tensor [T ], sigma : Tensor [T ]) -> ArrayLike :
493487 # this is much faster than
@@ -497,7 +491,7 @@ def normal_logpdf(self, x: Tensor[T], mu: Tensor[T], sigma: Tensor[T]) -> ArrayL
497491 root2pi = np .sqrt (2 * np .pi )
498492 prefactor = - np .log (sigma * root2pi )
499493 summand = - np .square (np .divide ((x - mu ), (root2 * sigma )))
500- return cast ( ArrayLike , prefactor + summand )
494+ return prefactor + summand # type: ignore[no-any-return]
501495
502496 # def normal_logpdf(self, x, mu, sigma):
503497 # return norm.logpdf(x, loc=mu, scale=sigma)
@@ -528,7 +522,7 @@ def normal(self, x: Tensor[T], mu: Tensor[T], sigma: Tensor[T]) -> ArrayLike:
528522 Returns:
529523 NumPy float: Value of Normal(x|mu, sigma)
530524 """
531- return cast ( ArrayLike , norm .pdf (x , loc = mu , scale = sigma ))
525+ return norm .pdf (x , loc = mu , scale = sigma ) # type: ignore[no-any-return]
532526
533527 def normal_cdf (
534528 self , x : Tensor [T ], mu : float | Tensor [T ] = 0 , sigma : float | Tensor [T ] = 1
@@ -554,9 +548,9 @@ def normal_cdf(
554548 Returns:
555549 NumPy float: The CDF
556550 """
557- return cast ( ArrayLike , norm .cdf (x , loc = mu , scale = sigma ))
551+ return norm .cdf (x , loc = mu , scale = sigma ) # type: ignore[no-any-return]
558552
559- def poisson_dist (self , rate : Tensor [T ]) -> _BasicPoisson [ T ] :
553+ def poisson_dist (self , rate : Tensor [T ]) -> _BasicPoisson :
560554 r"""
561555 The Poisson distribution with rate parameter :code:`rate`.
562556
@@ -577,7 +571,7 @@ def poisson_dist(self, rate: Tensor[T]) -> _BasicPoisson[T]:
577571 """
578572 return _BasicPoisson (rate )
579573
580- def normal_dist (self , mu : Tensor [T ], sigma : Tensor [T ]) -> _BasicNormal [ T ] :
574+ def normal_dist (self , mu : Tensor [T ], sigma : Tensor [T ]) -> _BasicNormal :
581575 r"""
582576 The Normal distribution with mean :code:`mu` and standard deviation :code:`sigma`.
583577
0 commit comments