Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 29 additions & 19 deletions pyerrors/fits.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import scipy.stats
import matplotlib.pyplot as plt
from matplotlib import gridspec
from scipy.odr import ODR, Model, RealData
from odrpack import odr_fit
import iminuit
from autograd import jacobian as auto_jacobian
from autograd import hessian as auto_hessian
Expand Down Expand Up @@ -634,32 +634,42 @@ def func(a, x):
raise Exception('No y errors available, run the gamma method first.')

if 'initial_guess' in kwargs:
x0 = kwargs.get('initial_guess')
x0 = np.asarray(kwargs.get('initial_guess'), dtype=np.float64)
if len(x0) != n_parms:
raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
else:
x0 = [1] * n_parms

data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
model = Model(func)
odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
odr.set_job(fit_type=0, deriv=1)
out = odr.run()
x0 = np.ones(n_parms, dtype=np.float64)

# odrpack expects f(x, beta), but pyerrors convention is f(beta, x)
Copy link

Copilot AI Feb 3, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The implementation now uses odrpack.odr_fit, but total_least_squares still documents itself as being based on SciPy ODR (see this function’s docstring “Notes” section) and the package-level docs in pyerrors/__init__.py still reference SciPy’s ODR (and even point readers to fits.least_squares). Please update the documentation to reflect the new odrpack backend and reference fits.total_least_squares where appropriate, so users aren’t directed to deprecated/incorrect APIs.

Suggested change
# odrpack expects f(x, beta), but pyerrors convention is f(beta, x)
# The total_least_squares backend uses odrpack.odr_fit, which expects
# a model signature f(x, beta), while pyerrors conventions use f(beta, x).

Copilot uses AI. Check for mistakes.
def wrapped_func(x, beta):
return func(beta, x)

out = odr_fit(
wrapped_func,
np.asarray(x_f, dtype=np.float64),
np.asarray(y_f, dtype=np.float64),
beta0=x0,
weight_x=1.0 / np.asarray(dx_f, dtype=np.float64) ** 2,
weight_y=1.0 / np.asarray(dy_f, dtype=np.float64) ** 2,
partol=np.finfo(np.float64).eps,
task='explicit-ODR',
diff_scheme='central'
)

output.residual_variance = out.res_var

output.method = 'ODR'

output.message = out.stopreason

output.xplus = out.xplus
output.xplus = out.xplusd

if not silent:
print('Method: ODR')
print(*out.stopreason)
print(out.stopreason)
print('Residual variance:', output.residual_variance)

if out.info > 3:
if not out.success:
raise Exception('The minimization procedure did not converge.')

m = x_f.size
Expand All @@ -679,9 +689,9 @@ def odr_chisquare(p):

number_of_x_parameters = int(m / x_f.shape[-1])

old_jac = jacobian(func)(out.beta, out.xplus)
old_jac = jacobian(func)(out.beta, out.xplusd)
fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplusd, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
new_jac = np.concatenate((fused_row1, fused_row2), axis=1)

A = W @ new_jac
Expand All @@ -690,14 +700,14 @@ def odr_chisquare(p):
if expected_chisquare <= 0.0:
warnings.warn("Negative expected_chisquare.", RuntimeWarning)
expected_chisquare = np.abs(expected_chisquare)
output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplusd.ravel()))) / expected_chisquare
if not silent:
print('chisquare/expected_chisquare:',
output.chisquare_by_expected_chisquare)

fitp = out.beta
try:
hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplusd.ravel())))
except TypeError:
raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None

Expand All @@ -706,7 +716,7 @@ def odr_chisquare_compact_x(d):
chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
return chisq

jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplusd.ravel(), x_f.ravel())))

# Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
try:
Expand All @@ -719,7 +729,7 @@ def odr_chisquare_compact_y(d):
chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
return chisq

jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplusd.ravel(), y_f)))

# Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
try:
Expand All @@ -733,7 +743,7 @@ def odr_chisquare_compact_y(d):

output.fit_parameters = result

output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplusd.ravel())))
output.dof = x.shape[-1] - n_parms
output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
license="MIT",
packages=find_packages(),
python_requires='>=3.10.0',
install_requires=['numpy>=2.0', 'autograd>=1.7.0', 'numdifftools>=0.9.41', 'matplotlib>=3.9', 'scipy>=1.13', 'iminuit>=2.28', 'h5py>=3.11', 'lxml>=5.0', 'python-rapidjson>=1.20', 'pandas>=2.2'],
install_requires=['numpy>=2.0', 'autograd>=1.7.0', 'numdifftools>=0.9.41', 'matplotlib>=3.9', 'scipy>=1.13', 'iminuit>=2.28', 'h5py>=3.11', 'lxml>=5.0', 'python-rapidjson>=1.20', 'pandas>=2.2', 'odrpack>=0.4'],
extras_require={'test': ['pytest', 'pytest-cov', 'pytest-benchmark', 'hypothesis', 'nbmake', 'flake8']},
classifiers=[
'Development Status :: 5 - Production/Stable',
Expand Down
68 changes: 50 additions & 18 deletions tests/fits_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import matplotlib.pyplot as plt
import math
import scipy.optimize
from scipy.odr import ODR, Model, RealData
from odrpack import odr_fit
from scipy.linalg import cholesky
from scipy.stats import norm
import iminuit
Expand Down Expand Up @@ -397,11 +397,21 @@ def func(a, x):
y = a[0] * anp.exp(-a[1] * x)
return y

data = RealData([o.value for o in ox], [o.value for o in oy], sx=[o.dvalue for o in ox], sy=[o.dvalue for o in oy])
model = Model(func)
odr = ODR(data, model, [0, 0], partol=np.finfo(np.float64).eps)
odr.set_job(fit_type=0, deriv=1)
output = odr.run()
# odrpack expects f(x, beta), but pyerrors convention is f(beta, x)
def wrapped_func(x, beta):
return func(beta, x)

output = odr_fit(
wrapped_func,
np.array([o.value for o in ox]),
np.array([o.value for o in oy]),
beta0=np.array([0.0, 0.0]),
weight_x=1.0 / np.array([o.dvalue for o in ox]) ** 2,
weight_y=1.0 / np.array([o.dvalue for o in oy]) ** 2,
partol=np.finfo(np.float64).eps,
task='explicit-ODR',
diff_scheme='central'
)

out = pe.total_least_squares(ox, oy, func, expected_chisquare=True)
beta = out.fit_parameters
Expand Down Expand Up @@ -1431,11 +1441,11 @@ def func(a, x):
global print_output, beta0
print_output = 1
if 'initial_guess' in kwargs:
beta0 = kwargs.get('initial_guess')
beta0 = np.asarray(kwargs.get('initial_guess'), dtype=np.float64)
if len(beta0) != n_parms:
raise Exception('Initial guess does not have the correct length.')
else:
beta0 = np.arange(n_parms)
beta0 = np.arange(n_parms, dtype=np.float64)

if len(x) != len(y):
raise Exception('x and y have to have the same length')
Expand Down Expand Up @@ -1463,23 +1473,45 @@ def do_the_fit(obs, **kwargs):

xerr = kwargs.get('xerr')

# odrpack expects f(x, beta), but pyerrors convention is f(beta, x)
def wrapped_func(x, beta):
return func(beta, x)

if length == len(obs):
assert 'x_constants' in kwargs
data = RealData(kwargs.get('x_constants'), obs, sy=yerr)
fit_type = 2
x_data = np.asarray(kwargs.get('x_constants'))
y_data = np.asarray(obs)
# Ordinary least squares (no x errors)
output = odr_fit(
wrapped_func,
x_data,
y_data,
beta0=beta0,
weight_y=1.0 / np.asarray(yerr) ** 2,
partol=np.finfo(np.float64).eps,
task='explicit-OLS',
diff_scheme='central'
)
elif length == len(obs) // 2:
data = RealData(obs[:length], obs[length:], sx=xerr, sy=yerr)
fit_type = 0
x_data = np.asarray(obs[:length])
y_data = np.asarray(obs[length:])
# ODR with x errors
output = odr_fit(
wrapped_func,
x_data,
y_data,
beta0=beta0,
weight_x=1.0 / np.asarray(xerr) ** 2,
weight_y=1.0 / np.asarray(yerr) ** 2,
partol=np.finfo(np.float64).eps,
task='explicit-ODR',
diff_scheme='central'
)
else:
raise Exception('x and y do not fit together.')

model = Model(func)

odr = ODR(data, model, beta0, partol=np.finfo(np.float64).eps)
odr.set_job(fit_type=fit_type, deriv=1)
output = odr.run()
if print_output and not silent:
print(*output.stopreason)
print(output.stopreason)
print('chisquare/d.o.f.:', output.res_var)
print_output = 0
beta0 = output.beta
Expand Down
Loading