diff --git a/README.rst b/README.rst index 19d9b829a2..ab238ad27c 100644 --- a/README.rst +++ b/README.rst @@ -60,7 +60,7 @@ This is how you use the ``pyhf`` Python API to build a statistical model and run ... test_mu, data, model, test_stat="qtilde", return_expected=True ... ) >>> print(f"Observed: {CLs_obs:.8f}, Expected: {CLs_exp:.8f}") - Observed: 0.05251497, Expected: 0.06445321 + Observed: 0.05251497, Expected: 0.06445319 Alternatively the statistical model and observational data can be read from its serialized JSON representation (see next section). diff --git a/src/pyhf/infer/__init__.py b/src/pyhf/infer/__init__.py index 058a7460d6..213a481b01 100644 --- a/src/pyhf/infer/__init__.py +++ b/src/pyhf/infer/__init__.py @@ -38,6 +38,7 @@ def hypotest( Example: >>> import pyhf + >>> import numpy as np >>> pyhf.set_backend("numpy") >>> model = pyhf.simplemodels.uncorrelated_background( ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0] @@ -50,8 +51,8 @@ def hypotest( ... ) >>> CLs_obs array(0.05251497) - >>> CLs_exp_band - [array(0.00260626), array(0.01382005), array(0.06445321), array(0.23525644), array(0.57303621)] + >>> np.isclose(CLs_exp_band, [0.00260626, 0.01382005, 0.06445321, 0.23525644, 0.57303621]) + array([ True, True, True, True, True]) Args: poi_test (Number or Tensor): The value of the parameter of interest (POI) diff --git a/src/pyhf/infer/calculators.py b/src/pyhf/infer/calculators.py index 12bb6d5bce..6fc83b00af 100644 --- a/src/pyhf/infer/calculators.py +++ b/src/pyhf/infer/calculators.py @@ -40,6 +40,7 @@ def generate_asimov_data( Example: >>> import pyhf + >>> import numpy as np >>> pyhf.set_backend("numpy") >>> model = pyhf.simplemodels.uncorrelated_background( ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0] @@ -47,12 +48,14 @@ def generate_asimov_data( >>> observations = [51, 48] >>> data = observations + model.config.auxdata >>> mu_test = 1.0 - >>> pyhf.infer.calculators.generate_asimov_data(mu_test, data, model, None, None, None) - array([ 60.61229858, 56.52802479, 270.06832542, 48.31545488]) - >>> pyhf.infer.calculators.generate_asimov_data( + >>> asimov_data = pyhf.infer.calculators.generate_asimov_data(mu_test, data, model, None, None, None) + >>> np.isclose(asimov_data, [ 60.61229858, 56.52802479, 270.06832542, 48.31545488]) + array([ True, True, True, True]) + >>> _, asimov_params = pyhf.infer.calculators.generate_asimov_data( ... mu_test, data, model, None, None, None, return_fitted_pars=True ... ) - (array([ 60.61229858, 56.52802479, 270.06832542, 48.31545488]), array([1. , 0.97224597, 0.87553894])) + >>> np.isclose(asimov_params, [1. , 0.97224597, 0.87553894]) + array([ True, True, True]) Args: asimov_mu (:obj:`float`): The value for the parameter of interest to be used. @@ -339,6 +342,7 @@ def teststatistic(self, poi_test): Example: >>> import pyhf + >>> import numpy as np >>> pyhf.set_backend("numpy") >>> model = pyhf.simplemodels.uncorrelated_background( ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0] @@ -347,12 +351,20 @@ def teststatistic(self, poi_test): >>> data = observations + model.config.auxdata >>> mu_test = 1.0 >>> asymptotic_calculator = pyhf.infer.calculators.AsymptoticCalculator(data, model, test_stat="qtilde") - >>> asymptotic_calculator.teststatistic(mu_test) - array(0.14043184) - >>> asymptotic_calculator.fitted_pars - HypoTestFitResults(asimov_pars=array([0. , 1.0030482 , 0.96264534]), free_fit_to_data=array([0. , 1.0030512 , 0.96266961]), free_fit_to_asimov=array([0. , 1.00304893, 0.96263365]), fixed_poi_fit_to_data=array([1. , 0.97224597, 0.87553894]), fixed_poi_fit_to_asimov=array([1. , 0.97276864, 0.87142047])) - >>> asymptotic_calculator.fitted_pars.free_fit_to_asimov # best-fit parameters to Asimov dataset - array([0. , 1.00304893, 0.96263365]) + >>> test_stat = asymptotic_calculator.teststatistic(mu_test) + >>> np.isclose(test_stat, 0.14043184) + np.True_ + >>> fit_results = asymptotic_calculator.fitted_pars + >>> np.isclose(fit_results.asimov_pars, [0. , 1.0030482 , 0.96264534]) + array([ True, True, True]) + >>> np.isclose(fit_results.free_fit_to_data, [0. , 1.0030512 , 0.96266961]) + array([ True, True, True]) + >>> np.isclose(fit_results.free_fit_to_asimov, [0. , 1.00304893, 0.96263365]) # best-fit parameters to Asimov dataset + array([ True, True, True]) + >>> np.isclose(fit_results.fixed_poi_fit_to_data, [1. , 0.97224597, 0.87553894]) + array([ True, True, True]) + >>> np.isclose(fit_results.fixed_poi_fit_to_asimov, [1. , 0.97276864, 0.87142047]) + array([ True, True, True]) Args: poi_test (:obj:`float` or :obj:`tensor`): The value for the parameter of interest. @@ -433,6 +445,7 @@ def pvalues(self, teststat, sig_plus_bkg_distribution, bkg_only_distribution): Example: >>> import pyhf + >>> import numpy as np >>> pyhf.set_backend("numpy") >>> model = pyhf.simplemodels.uncorrelated_background( ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0] @@ -446,8 +459,12 @@ def pvalues(self, teststat, sig_plus_bkg_distribution, bkg_only_distribution): >>> q_tilde = asymptotic_calculator.teststatistic(mu_test) >>> sig_plus_bkg_dist, bkg_dist = asymptotic_calculator.distributions(mu_test) >>> CLsb, CLb, CLs = asymptotic_calculator.pvalues(q_tilde, sig_plus_bkg_dist, bkg_dist) - >>> CLsb, CLb, CLs - (array(0.02332502), array(0.4441594), array(0.05251497)) + >>> np.isclose(CLsb, 0.02332502) + np.True_ + >>> np.isclose(CLb, 0.4441594) + np.True_ + >>> np.isclose(CLs, 0.05251497) + np.True_ Args: teststat (:obj:`tensor`): The test statistic. @@ -478,6 +495,7 @@ def expected_pvalues(self, sig_plus_bkg_distribution, bkg_only_distribution): Example: >>> import pyhf + >>> import numpy as np >>> pyhf.set_backend("numpy") >>> model = pyhf.simplemodels.uncorrelated_background( ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0] @@ -491,8 +509,8 @@ def expected_pvalues(self, sig_plus_bkg_distribution, bkg_only_distribution): >>> _ = asymptotic_calculator.teststatistic(mu_test) >>> sig_plus_bkg_dist, bkg_dist = asymptotic_calculator.distributions(mu_test) >>> CLsb_exp_band, CLb_exp_band, CLs_exp_band = asymptotic_calculator.expected_pvalues(sig_plus_bkg_dist, bkg_dist) - >>> CLs_exp_band - [array(0.00260626), array(0.01382005), array(0.06445321), array(0.23525644), array(0.57303621)] + >>> np.isclose(CLs_exp_band, [0.00260626, 0.01382005, 0.06445321, 0.23525644, 0.57303621]) + array([ True, True, True, True, True]) Args: sig_plus_bkg_distribution (~pyhf.infer.calculators.AsymptoticTestStatDistribution): @@ -611,6 +629,7 @@ def expected_value(self, nsigma): Examples: >>> import pyhf + >>> import numpy as np >>> import numpy.random as random >>> random.seed(0) >>> pyhf.set_backend("numpy") @@ -623,6 +642,7 @@ def expected_value(self, nsigma): np.float64(6.15094381...) >>> import pyhf + >>> import numpy as np >>> import numpy.random as random >>> random.seed(0) >>> pyhf.set_backend("numpy") @@ -646,9 +666,9 @@ def expected_value(self, nsigma): ... ) ... ) >>> n_sigma = pyhf.tensorlib.astensor([-2, -1, 0, 1, 2]) - >>> dist.expected_value(n_sigma) - array([0.00000000e+00, 0.00000000e+00, 5.53671231e-04, 8.29987137e-01, - 2.99592664e+00]) + >>> exp_values = dist.expected_value(n_sigma) + >>> np.isclose(exp_values, [0.00000000e+00, 0.00000000e+00, 5.53671231e-04, 8.29987137e-01, 2.99592664e+00]) + array([ True, True, True, True, True]) Args: nsigma (:obj:`int` or :obj:`tensor`): The number of standard deviations. diff --git a/src/pyhf/infer/intervals/upper_limits.py b/src/pyhf/infer/intervals/upper_limits.py index 6b86d586fe..599a33c9de 100644 --- a/src/pyhf/infer/intervals/upper_limits.py +++ b/src/pyhf/infer/intervals/upper_limits.py @@ -46,10 +46,10 @@ def toms748_scan( >>> obs_limit, exp_limits = pyhf.infer.intervals.upper_limits.toms748_scan( ... data, model, 0., 5., rtol=0.01 ... ) - >>> obs_limit - array(1.01156939) - >>> exp_limits - [array(0.5600747), array(0.75702605), array(1.06234693), array(1.50116923), array(2.05078912)] + >>> np.isclose(obs_limit, 1.01156939) + np.True_ + >>> np.isclose(exp_limits, [0.5600747, 0.75702605, 1.06234693, 1.50116923, 2.05078912]) + array([ True, True, True, True, True]) Args: data (:obj:`tensor`): The observed data. @@ -161,10 +161,10 @@ def linear_grid_scan( >>> obs_limit, exp_limits, (scan, results) = pyhf.infer.intervals.upper_limits.upper_limit( ... data, model, scan, return_results=True ... ) - >>> obs_limit - array(1.01764089) - >>> exp_limits - [array(0.59576921), array(0.76169166), array(1.08504773), array(1.50170482), array(2.06654952)] + >>> np.isclose(obs_limit, 1.01764089) + np.True_ + >>> np.isclose(exp_limits, [0.59576921, 0.76169166, 1.08504773, 1.50170482, 2.06654952]) + array([ True, True, True, True, True]) Args: data (:obj:`tensor`): The observed data. @@ -225,10 +225,10 @@ def upper_limit( >>> obs_limit, exp_limits, (scan, results) = pyhf.infer.intervals.upper_limits.upper_limit( ... data, model, scan, return_results=True ... ) - >>> obs_limit - array(1.01764089) - >>> exp_limits - [array(0.59576921), array(0.76169166), array(1.08504773), array(1.50170482), array(2.06654952)] + >>> np.isclose(obs_limit, 1.01764089) + np.True_ + >>> np.isclose(exp_limits, [0.59576921, 0.76169166, 1.08504773, 1.50170482, 2.06654952]) + array([ True, True, True, True, True]) Args: data (:obj:`tensor`): The observed data. diff --git a/src/pyhf/infer/mle.py b/src/pyhf/infer/mle.py index c269eb47c8..5e6357ae2b 100644 --- a/src/pyhf/infer/mle.py +++ b/src/pyhf/infer/mle.py @@ -86,6 +86,7 @@ def fit(data, pdf, init_pars=None, par_bounds=None, fixed_params=None, **kwargs) Example: >>> import pyhf + >>> import numpy as np >>> pyhf.set_backend("numpy") >>> model = pyhf.simplemodels.uncorrelated_background( ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0] @@ -93,8 +94,8 @@ def fit(data, pdf, init_pars=None, par_bounds=None, fixed_params=None, **kwargs) >>> observations = [51, 48] >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata) >>> bestfit_pars, twice_nll = pyhf.infer.mle.fit(data, model, return_fitted_val=True) - >>> bestfit_pars - array([0. , 1.0030512 , 0.96266961]) + >>> np.isclose(bestfit_pars, [0. , 1.0030512 , 0.96266961]) + array([ True, True, True]) >>> twice_nll array(24.98393521) >>> -2 * model.logpdf(bestfit_pars, data) == twice_nll @@ -157,6 +158,7 @@ def fixed_poi_fit( Example: >>> import pyhf + >>> import numpy as np >>> pyhf.set_backend("numpy") >>> model = pyhf.simplemodels.uncorrelated_background( ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0] @@ -167,8 +169,8 @@ def fixed_poi_fit( >>> bestfit_pars, twice_nll = pyhf.infer.mle.fixed_poi_fit( ... test_poi, data, model, return_fitted_val=True ... ) - >>> bestfit_pars - array([1. , 0.97224597, 0.87553894]) + >>> np.isclose(bestfit_pars, [1. , 0.97224597, 0.87553894]) + array([ True, True, True]) >>> twice_nll array(28.92218013) >>> -2 * model.logpdf(bestfit_pars, data) == twice_nll diff --git a/src/pyhf/infer/test_statistics.py b/src/pyhf/infer/test_statistics.py index 97b6babe79..33537f017a 100644 --- a/src/pyhf/infer/test_statistics.py +++ b/src/pyhf/infer/test_statistics.py @@ -84,6 +84,7 @@ def qmu(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=F Example: >>> import pyhf + >>> import numpy as np >>> pyhf.set_backend("numpy") >>> model = pyhf.simplemodels.uncorrelated_background( ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0] @@ -99,10 +100,15 @@ def qmu(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=F ... test_mu, data, model, init_pars, par_bounds, fixed_params ... ) array(3.9549891) - >>> pyhf.infer.test_statistics.qmu( + >>> test_stat, (constrained, unconstrained) = pyhf.infer.test_statistics.qmu( ... test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars=True ... ) - (array(3.9549891), (array([1. , 0.97224597, 0.87553894]), array([-0.06679525, 1.00555369, 0.96930896]))) + >>> np.isclose(test_stat, 3.9549891) + np.True_ + >>> np.isclose(constrained, [1. , 0.97224597, 0.87553894]) + array([ True, True, True]) + >>> np.isclose(unconstrained, [-0.06679525, 1.00555369, 0.96930896]) + array([ True, True, True]) Args: mu (Number or Tensor): The signal strength parameter @@ -180,6 +186,7 @@ def qmu_tilde( Example: >>> import pyhf + >>> import numpy as np >>> pyhf.set_backend("numpy") >>> model = pyhf.simplemodels.uncorrelated_background( ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0] @@ -194,10 +201,15 @@ def qmu_tilde( ... test_mu, data, model, init_pars, par_bounds, fixed_params ... ) array(3.93824492) - >>> pyhf.infer.test_statistics.qmu_tilde( + >>> test_stat, (constrained, unconstrained) = pyhf.infer.test_statistics.qmu_tilde( ... test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars=True ... ) - (array(3.93824492), (array([1. , 0.97224597, 0.87553894]), array([0. , 1.0030512 , 0.96266961]))) + >>> test_stat + array(3.93824492) + >>> np.isclose(constrained, [1. , 0.97224597, 0.87553894]) + array([ True, True, True]) + >>> np.isclose(unconstrained, [0. , 1.0030512 , 0.96266961]) + array([ True, True, True]) Args: mu (Number or Tensor): The signal strength parameter @@ -262,6 +274,7 @@ def tmu(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=F Example: >>> import pyhf + >>> import numpy as np >>> pyhf.set_backend("numpy") >>> model = pyhf.simplemodels.uncorrelated_background( ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0] @@ -277,10 +290,15 @@ def tmu(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=F ... test_mu, data, model, init_pars, par_bounds, fixed_params ... ) array(3.9549891) - >>> pyhf.infer.test_statistics.tmu( + >>> test_stat, (constrained, unconstrained) = pyhf.infer.test_statistics.tmu( ... test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars=True ... ) - (array(3.9549891), (array([1. , 0.97224597, 0.87553894]), array([-0.06679525, 1.00555369, 0.96930896]))) + >>> test_stat + array(3.9549891) + >>> np.isclose(constrained, [1. , 0.97224597, 0.87553894]) + array([ True, True, True]) + >>> np.isclose(unconstrained, [-0.06679525, 1.00555369, 0.96930896]) + array([ True, True, True]) Args: mu (Number or Tensor): The signal strength parameter @@ -353,6 +371,7 @@ def tmu_tilde( Example: >>> import pyhf + >>> import numpy as np >>> pyhf.set_backend("numpy") >>> model = pyhf.simplemodels.uncorrelated_background( ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0] @@ -367,10 +386,15 @@ def tmu_tilde( ... test_mu, data, model, init_pars, par_bounds, fixed_params ... ) array(3.93824492) - >>> pyhf.infer.test_statistics.tmu_tilde( + >>> test_stat, (constrained, unconstrained) = pyhf.infer.test_statistics.tmu_tilde( ... test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars=True ... ) - (array(3.93824492), (array([1. , 0.97224597, 0.87553894]), array([0. , 1.0030512 , 0.96266961]))) + >>> test_stat + array(3.93824492) + >>> np.isclose(constrained, [1. , 0.97224597, 0.87553894]) + array([ True, True, True]) + >>> np.isclose(unconstrained, [0. , 1.0030512 , 0.96266961]) + array([ True, True, True]) Args: mu (Number or Tensor): The signal strength parameter @@ -433,6 +457,7 @@ def q0(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=Fa Example: >>> import pyhf + >>> import numpy as np >>> pyhf.set_backend("numpy") >>> model = pyhf.simplemodels.uncorrelated_background( ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0] @@ -445,10 +470,15 @@ def q0(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=Fa >>> fixed_params = model.config.suggested_fixed() >>> pyhf.infer.test_statistics.q0(test_mu, data, model, init_pars, par_bounds, fixed_params) array(2.98339447) - >>> pyhf.infer.test_statistics.q0( + >>> test_stat, (constrained, unconstrained) = pyhf.infer.test_statistics.q0( ... test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars=True ... ) - (array(2.98339447), (array([0. , 1.03050845, 1.12128752]), array([0.95260667, 0.99635345, 1.02140172]))) + >>> test_stat + array(2.98339447) + >>> np.isclose(constrained, [0. , 1.03050845, 1.12128752]) + array([ True, True, True]) + >>> np.isclose(unconstrained, [0.95260667, 0.99635345, 1.02140172]) + array([ True, True, True]) Args: mu (Number or Tensor): The signal strength parameter (must be set to zero) diff --git a/tests/test_calculator.py b/tests/test_calculator.py index 2395d1ff8c..ad77572470 100644 --- a/tests/test_calculator.py +++ b/tests/test_calculator.py @@ -85,7 +85,10 @@ def test_asymptotic_calculator_has_fitted_pars(test_stat): fitted_pars.free_fit_to_data ) # lower tolerance for amd64 and arm64 to agree - # FIXME: SciPy v1.16.0 gives a different result from SciPy v1.15.3 + # SciPy v1.16.0 gives a different result from SciPy v1.15.3 + # - tune the test tolerance to cover: https://github.com/scikit-hep/pyhf/issues/2593#issuecomment-3411614219 + # darwin: [7.6470499e-05, 1.4997178] + # linux: [7.6321608e-05, 1.4997178] assert pytest.approx( - [7.6470499e-05, 1.4997178], rel=1e-3 + [7.63960535e-05, 1.4997178], rel=1e-3 ) == pyhf.tensorlib.tolist(fitted_pars.free_fit_to_asimov)