From c56d05a9a9843ac56b91cee234d09cf599d00ee4 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Fri, 27 Feb 2026 22:58:15 +0530 Subject: [PATCH 01/31] Fix #354: Add warnings when dimension is modified on NZF1, spmsrtls, and bearing - NZF1: Warn when n is not a multiple of 13 (adjusted to nearest multiple) - spmsrtls: Warn when n is adjusted due to minimum dimension requirement (n >= 100) - bearing: Warn when grid dimensions are adjusted to ensure nx > 0 and ny > 0 These warnings follow the pattern already established in dixmaan* problems. --- src/ADNLPProblems/NZF1.jl | 2 ++ src/ADNLPProblems/bearing.jl | 5 +++++ src/ADNLPProblems/spmsrtls.jl | 4 ++++ 3 files changed, 11 insertions(+) diff --git a/src/ADNLPProblems/NZF1.jl b/src/ADNLPProblems/NZF1.jl index e56c47ea0..bcb84c831 100644 --- a/src/ADNLPProblems/NZF1.jl +++ b/src/ADNLPProblems/NZF1.jl @@ -6,6 +6,7 @@ function NZF1(; use_nls::Bool = false, kwargs...) end function NZF1(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + (n % 13 == 0) || @warn("NZF1: number of variables adjusted to be a multiple of 13") nbis = max(2, div(n, 13)) n = 13 * nbis l = div(n, 13) @@ -29,6 +30,7 @@ function NZF1(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwarg end function NZF1(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + (n % 13 == 0) || @warn("NZF1: number of variables adjusted to be a multiple of 13") nbis = max(2, div(n, 13)) n = 13 * nbis l = div(n, 13) diff --git a/src/ADNLPProblems/bearing.jl b/src/ADNLPProblems/bearing.jl index 40ab56ca9..d11d401fc 100644 --- a/src/ADNLPProblems/bearing.jl +++ b/src/ADNLPProblems/bearing.jl @@ -9,6 +9,11 @@ function bearing(; ) where {T} # nx > 0 # grid points in 1st direction # ny > 0 # grid points in 2nd direction + + # Ensure nx and ny are at least 1, and warn if they need adjustment + (nx > 0 && ny > 0) || @warn("bearing: grid dimensions adjusted to ensure nx > 0 and ny > 0") + nx = max(1, nx) + ny = max(1, ny) b = 10 # grid is (0,2*pi)x(0,2*b) e = 1 // 10 # eccentricity diff --git a/src/ADNLPProblems/spmsrtls.jl b/src/ADNLPProblems/spmsrtls.jl index d55161026..2a72f28ab 100644 --- a/src/ADNLPProblems/spmsrtls.jl +++ b/src/ADNLPProblems/spmsrtls.jl @@ -6,8 +6,10 @@ function spmsrtls(; use_nls::Bool = false, kwargs...) end function spmsrtls(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 + (n == n_orig) || @warn("spmsrtls: number of variables adjusted from $n_orig to $n") p = [sin(i^2) for i = 1:n] x0 = T[p[i] / 5 for i = 1:n] @@ -59,8 +61,10 @@ function spmsrtls(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function spmsrtls(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 + (n == n_orig) || @warn("spmsrtls: number of variables adjusted from $n_orig to $n") p = [sin(i^2) for i = 1:n] x0 = T[p[i] / 5 for i = 1:n] From d23f4596cfe51ed7d07cfdaecbe3a25565e28463 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Sat, 28 Feb 2026 01:01:29 +0530 Subject: [PATCH 02/31] Update src/ADNLPProblems/bearing.jl Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/ADNLPProblems/bearing.jl | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/ADNLPProblems/bearing.jl b/src/ADNLPProblems/bearing.jl index d11d401fc..c241bc0c2 100644 --- a/src/ADNLPProblems/bearing.jl +++ b/src/ADNLPProblems/bearing.jl @@ -11,9 +11,20 @@ function bearing(; # ny > 0 # grid points in 2nd direction # Ensure nx and ny are at least 1, and warn if they need adjustment - (nx > 0 && ny > 0) || @warn("bearing: grid dimensions adjusted to ensure nx > 0 and ny > 0") + nx_orig = nx + ny_orig = ny nx = max(1, nx) ny = max(1, ny) + if nx != nx_orig || ny != ny_orig + msg_parts = String[] + if nx != nx_orig + push!(msg_parts, "nx from $(nx_orig) to $(nx)") + end + if ny != ny_orig + push!(msg_parts, "ny from $(ny_orig) to $(ny)") + end + @warn("bearing: grid dimensions adjusted: " * join(msg_parts, ", ")) + end b = 10 # grid is (0,2*pi)x(0,2*b) e = 1 // 10 # eccentricity From 7222fc4272781ba40c87b8dd50c83a7d55aeb450 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Sat, 28 Feb 2026 01:04:45 +0530 Subject: [PATCH 03/31] n%13 removal --- src/ADNLPProblems/NZF1.jl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/ADNLPProblems/NZF1.jl b/src/ADNLPProblems/NZF1.jl index bcb84c831..3bf813f58 100644 --- a/src/ADNLPProblems/NZF1.jl +++ b/src/ADNLPProblems/NZF1.jl @@ -6,9 +6,10 @@ function NZF1(; use_nls::Bool = false, kwargs...) end function NZF1(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 13 == 0) || @warn("NZF1: number of variables adjusted to be a multiple of 13") + n_orig = n nbis = max(2, div(n, 13)) n = 13 * nbis + (n == n_orig) || @warn("NZF1: number of variables adjusted from $n_orig to $n") l = div(n, 13) function f(x; l = l) return sum( @@ -30,9 +31,10 @@ function NZF1(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwarg end function NZF1(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 13 == 0) || @warn("NZF1: number of variables adjusted to be a multiple of 13") + n_orig = n nbis = max(2, div(n, 13)) n = 13 * nbis + (n == n_orig) || @warn("NZF1: number of variables adjusted from $n_orig to $n") l = div(n, 13) function F!(r, x; l = l) for i = 1:l From 039a60c8fef1faa92827d23b559bf3d08c64080f Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Sat, 28 Feb 2026 01:11:03 +0530 Subject: [PATCH 04/31] Add @adjust_nvar_warn macro and update all dimension adjustment warnings - Created @adjust_nvar_warn macro in ADNLPProblems module to standardize dimension adjustment warnings across all problems - Updated warning messages to consistently show both original and adjusted values following pattern: 'problem_name: number of variables adjusted from {n_orig} to {n}' - Applied macro to all problems with dimension adjustments: - NZF1 (multiple of 13) - spmsrtls (adjusted formula) - chainwoo (multiple of 4, both :nlp and :nls variants) - woods (multiple of 4) - srosenbr (multiple of 2) - catenary (multiple of 3, minimum 6) - clplatea, clplateb, clplatec (perfect squares) - fminsrf2 (minimum 4, then perfect square) - powellsg (multiple of 4, both :nlp and :nls variants) - watson (clamped between 2 and 31, both :nlp and :nls variants) Addresses issue #354 --- src/ADNLPProblems/ADNLPProblems.jl | 20 ++++++++++++++++++++ src/ADNLPProblems/NZF1.jl | 4 ++-- src/ADNLPProblems/catenary.jl | 4 ++-- src/ADNLPProblems/chainwoo.jl | 6 ++++-- src/ADNLPProblems/clplatea.jl | 3 ++- src/ADNLPProblems/clplateb.jl | 3 ++- src/ADNLPProblems/clplatec.jl | 3 ++- src/ADNLPProblems/fminsrf2.jl | 5 ++--- src/ADNLPProblems/powellsg.jl | 5 +++-- src/ADNLPProblems/spmsrtls.jl | 4 ++-- src/ADNLPProblems/srosenbr.jl | 3 ++- src/ADNLPProblems/watson.jl | 4 ++++ src/ADNLPProblems/woods.jl | 3 ++- 13 files changed, 49 insertions(+), 18 deletions(-) diff --git a/src/ADNLPProblems/ADNLPProblems.jl b/src/ADNLPProblems/ADNLPProblems.jl index 824468a1f..52a0e0f5f 100644 --- a/src/ADNLPProblems/ADNLPProblems.jl +++ b/src/ADNLPProblems/ADNLPProblems.jl @@ -26,6 +26,26 @@ end @require ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a" begin using JLD2, LinearAlgebra, SparseArrays, SpecialFunctions + """ + @adjust_nvar_warn(problem_name, n_orig, n) + + Issue a warning if the number of variables was adjusted, showing both original and adjusted values. + This macro provides consistent warning messages across all problems with dimension adjustments. + + # Example + ```julia + n_orig = n + n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("woods", n_orig, n) + ``` + """ + macro adjust_nvar_warn(problem_name, n_orig, n) + return quote + ($(esc(n)) == $(esc(n_orig))) || + @warn($(esc(problem_name)) * ": number of variables adjusted from " * string($(esc(n_orig))) * " to " * string($(esc(n)))) + end + end + path = dirname(@__FILE__) files = filter(x -> x[(end - 2):end] == ".jl", readdir(path)) for file in files diff --git a/src/ADNLPProblems/NZF1.jl b/src/ADNLPProblems/NZF1.jl index 3bf813f58..5edd35f6f 100644 --- a/src/ADNLPProblems/NZF1.jl +++ b/src/ADNLPProblems/NZF1.jl @@ -9,7 +9,7 @@ function NZF1(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwarg n_orig = n nbis = max(2, div(n, 13)) n = 13 * nbis - (n == n_orig) || @warn("NZF1: number of variables adjusted from $n_orig to $n") + @adjust_nvar_warn("NZF1", n_orig, n) l = div(n, 13) function f(x; l = l) return sum( @@ -34,7 +34,7 @@ function NZF1(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwarg n_orig = n nbis = max(2, div(n, 13)) n = 13 * nbis - (n == n_orig) || @warn("NZF1: number of variables adjusted from $n_orig to $n") + @adjust_nvar_warn("NZF1", n_orig, n) l = div(n, 13) function F!(r, x; l = l) for i = 1:l diff --git a/src/ADNLPProblems/catenary.jl b/src/ADNLPProblems/catenary.jl index 0c4756f65..345a0bdc3 100644 --- a/src/ADNLPProblems/catenary.jl +++ b/src/ADNLPProblems/catenary.jl @@ -8,10 +8,10 @@ function catenary( FRACT = 0.6, kwargs..., ) where {T} - (n % 3 == 0) || @warn("catenary: number of variables adjusted to be a multiple of 3") + n_orig = n n = 3 * max(1, div(n, 3)) - (n < 6) || @warn("catenary: number of variables adjusted to be greater or equal to 6") n = max(n, 6) + @adjust_nvar_warn("catenary", n_orig, n) ## Model Parameters N = div(n, 3) - 2 diff --git a/src/ADNLPProblems/chainwoo.jl b/src/ADNLPProblems/chainwoo.jl index 9d1520fe8..154b71ba8 100644 --- a/src/ADNLPProblems/chainwoo.jl +++ b/src/ADNLPProblems/chainwoo.jl @@ -6,8 +6,9 @@ function chainwoo(; use_nls::Bool = false, kwargs...) end function chainwoo(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 4 == 0) || @warn("chainwoo: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("chainwoo", n_orig, n) function f(x; n = length(x)) return 1 + sum( 100 * (x[2 * i] - x[2 * i - 1]^2)^2 + @@ -23,8 +24,9 @@ function chainwoo(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function chainwoo(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 4 == 0) || @warn("chainwoo: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("chainwoo", n_orig, n) function F!(r, x; n = length(x)) nb = div(n, 2) - 1 r[1] = 1 diff --git a/src/ADNLPProblems/clplatea.jl b/src/ADNLPProblems/clplatea.jl index a54e08ade..7174be032 100644 --- a/src/ADNLPProblems/clplatea.jl +++ b/src/ADNLPProblems/clplatea.jl @@ -6,9 +6,10 @@ function clplatea(; wght = -0.1, kwargs..., ) where {T} + n_orig = n p = max(floor(Int, sqrt(n)), 3) - p * p != n && @warn("clplatea: number of variables adjusted from $n to $(p*p)") n = p * p + @adjust_nvar_warn("clplatea", n_orig, n) hp2 = (1 // 2) * p^2 function f(x; p = p, hp2 = hp2, wght = wght) return (eltype(x)(wght) * x[p + (p - 1) * p]) + diff --git a/src/ADNLPProblems/clplateb.jl b/src/ADNLPProblems/clplateb.jl index a732e4311..f1fbc0120 100644 --- a/src/ADNLPProblems/clplateb.jl +++ b/src/ADNLPProblems/clplateb.jl @@ -6,9 +6,10 @@ function clplateb(; wght = -0.1, kwargs..., ) where {T} + n_orig = n p = max(floor(Int, sqrt(n)), 3) - p * p != n && @warn("clplateb: number of variables adjusted from $n to $(p*p)") n = p * p + @adjust_nvar_warn("clplateb", n_orig, n) hp2 = 1 // 2 * p^2 function f(x; p = p, hp2 = hp2, wght = wght) return sum(eltype(x)(wght) / (p - 1) * x[p + (j - 1) * p] for j = 1:p) + diff --git a/src/ADNLPProblems/clplatec.jl b/src/ADNLPProblems/clplatec.jl index 5d77a3f36..43d73eae4 100644 --- a/src/ADNLPProblems/clplatec.jl +++ b/src/ADNLPProblems/clplatec.jl @@ -8,9 +8,10 @@ function clplatec(; l = 0.01, kwargs..., ) where {T} + n_orig = n p = max(floor(Int, sqrt(n)), 3) - p * p != n && @warn("clplatec: number of variables adjusted from $n to $(p*p)") n = p * p + @adjust_nvar_warn("clplatec", n_orig, n) hp2 = 1 // 2 * p^2 function f(x; p = p, hp2 = hp2, wght = wght, r = r, l = l) diff --git a/src/ADNLPProblems/fminsrf2.jl b/src/ADNLPProblems/fminsrf2.jl index c7fe806ea..eae7709ba 100644 --- a/src/ADNLPProblems/fminsrf2.jl +++ b/src/ADNLPProblems/fminsrf2.jl @@ -1,12 +1,11 @@ export fminsrf2 function fminsrf2(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n < 4 && @warn("fminsrf2: number of variables must be ≥ 4") + n_orig = n n = max(4, n) - p = floor(Int, sqrt(n)) - p * p != n && @warn("fminsrf2: number of variables adjusted from $n down to $(p*p)") n = p * p + @adjust_nvar_warn("fminsrf2", n_orig, n) h00 = 1 slopej = 4 diff --git a/src/ADNLPProblems/powellsg.jl b/src/ADNLPProblems/powellsg.jl index 38ac7db65..b64333d07 100644 --- a/src/ADNLPProblems/powellsg.jl +++ b/src/ADNLPProblems/powellsg.jl @@ -6,8 +6,9 @@ function powellsg(; use_nls::Bool = false, kwargs...) end function powellsg(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 4 == 0) || @warn("powellsg: number of variables adjusted to be a multiple of 4") - n = 4 * max(1, div(n, 4)) # number of variables adjusted to be a multiple of 4 + n_orig = n + n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("powellsg", n_orig, n) function f(x; n = length(x)) return sum( (x[j] + 10 * x[j + 1])^2 + diff --git a/src/ADNLPProblems/spmsrtls.jl b/src/ADNLPProblems/spmsrtls.jl index 2a72f28ab..8cbc967cf 100644 --- a/src/ADNLPProblems/spmsrtls.jl +++ b/src/ADNLPProblems/spmsrtls.jl @@ -9,7 +9,7 @@ function spmsrtls(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k n_orig = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 - (n == n_orig) || @warn("spmsrtls: number of variables adjusted from $n_orig to $n") + @adjust_nvar_warn("spmsrtls", n_orig, n) p = [sin(i^2) for i = 1:n] x0 = T[p[i] / 5 for i = 1:n] @@ -64,7 +64,7 @@ function spmsrtls(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, k n_orig = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 - (n == n_orig) || @warn("spmsrtls: number of variables adjusted from $n_orig to $n") + @adjust_nvar_warn("spmsrtls", n_orig, n) p = [sin(i^2) for i = 1:n] x0 = T[p[i] / 5 for i = 1:n] diff --git a/src/ADNLPProblems/srosenbr.jl b/src/ADNLPProblems/srosenbr.jl index 451d0a6e7..8c51d9e64 100644 --- a/src/ADNLPProblems/srosenbr.jl +++ b/src/ADNLPProblems/srosenbr.jl @@ -1,8 +1,9 @@ export srosenbr function srosenbr(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 2 == 0) || @warn("srosenbr: number of variables adjusted to be even") + n_orig = n n = 2 * max(1, div(n, 2)) + @adjust_nvar_warn("srosenbr", n_orig, n) function f(x; n = length(x)) return sum(100 * (x[2 * i] - x[2 * i - 1]^2)^2 + (x[2 * i - 1] - 1)^2 for i = 1:div(n, 2)) end diff --git a/src/ADNLPProblems/watson.jl b/src/ADNLPProblems/watson.jl index 56e1f2757..cf75edc30 100644 --- a/src/ADNLPProblems/watson.jl +++ b/src/ADNLPProblems/watson.jl @@ -6,7 +6,9 @@ function watson(; use_nls::Bool = false, kwargs...) end function watson(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n n = min(max(n, 2), 31) + @adjust_nvar_warn("watson", n_orig, n) function f(x; n = n) Ti = eltype(x) return 1 // 2 * sum( @@ -31,7 +33,9 @@ function watson(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwa end function watson(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n n = min(max(n, 2), 31) + @adjust_nvar_warn("watson", n_orig, n) function F!(r, x; n = n) Ti = eltype(x) for i = 1:29 diff --git a/src/ADNLPProblems/woods.jl b/src/ADNLPProblems/woods.jl index 426166561..575630098 100644 --- a/src/ADNLPProblems/woods.jl +++ b/src/ADNLPProblems/woods.jl @@ -1,8 +1,9 @@ export woods function woods(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 4 == 0) || @warn("woods: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("woods", n_orig, n) function f(x; n = length(x)) return sum( 100 * (x[4 * i - 2] - x[4 * i - 3]^2)^2 + From 4d99e6de4ceac0b93e744f4056a85cbdd03a4d66 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Sat, 28 Feb 2026 01:36:44 +0530 Subject: [PATCH 05/31] Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/ADNLPProblems/ADNLPProblems.jl | 3 ++- src/ADNLPProblems/bearing.jl | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/ADNLPProblems/ADNLPProblems.jl b/src/ADNLPProblems/ADNLPProblems.jl index 52a0e0f5f..69e0a9d12 100644 --- a/src/ADNLPProblems/ADNLPProblems.jl +++ b/src/ADNLPProblems/ADNLPProblems.jl @@ -42,7 +42,8 @@ end macro adjust_nvar_warn(problem_name, n_orig, n) return quote ($(esc(n)) == $(esc(n_orig))) || - @warn($(esc(problem_name)) * ": number of variables adjusted from " * string($(esc(n_orig))) * " to " * string($(esc(n)))) + @warn(string($(esc(problem_name)), ": number of variables adjusted from ", + $(esc(n_orig)), " to ", $(esc(n)))) end end diff --git a/src/ADNLPProblems/bearing.jl b/src/ADNLPProblems/bearing.jl index c241bc0c2..bccc33a62 100644 --- a/src/ADNLPProblems/bearing.jl +++ b/src/ADNLPProblems/bearing.jl @@ -9,7 +9,7 @@ function bearing(; ) where {T} # nx > 0 # grid points in 1st direction # ny > 0 # grid points in 2nd direction - + # Ensure nx and ny are at least 1, and warn if they need adjustment nx_orig = nx ny_orig = ny From a9c3d38a15283251480aad51fc4d10a769673f84 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Sat, 28 Feb 2026 01:36:58 +0530 Subject: [PATCH 06/31] Update powellsg :nls variant to use @adjust_nvar_warn macro Ensures consistent warning messages between :nlp and :nls variants, showing both original and adjusted dimension values. --- src/ADNLPProblems/powellsg.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/ADNLPProblems/powellsg.jl b/src/ADNLPProblems/powellsg.jl index b64333d07..e95cc1d55 100644 --- a/src/ADNLPProblems/powellsg.jl +++ b/src/ADNLPProblems/powellsg.jl @@ -25,8 +25,9 @@ function powellsg(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function powellsg(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - (n % 4 == 0) || @warn("powellsg: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("powellsg", n_orig, n) function F!(r, x; n = length(x)) @inbounds for j = 1:4:n r[j] = x[j] + 10 * x[j + 1] From 0b0178024e8c17219c5e0c6eda36bafdb1447905 Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Tue, 7 Apr 2026 19:09:41 +0530 Subject: [PATCH 07/31] PureJuMP implementations --- src/ADNLPProblems/bearing.jl | 16 +++------------- src/PureJuMP/NZF1.jl | 3 ++- src/PureJuMP/PureJuMP.jl | 8 ++++++++ src/PureJuMP/broydn7d.jl | 3 ++- src/PureJuMP/catenary.jl | 4 ++-- src/PureJuMP/chainwoo.jl | 3 ++- src/PureJuMP/clplatea.jl | 3 ++- src/PureJuMP/clplateb.jl | 3 ++- src/PureJuMP/clplatec.jl | 3 ++- src/PureJuMP/dixmaan_efgh.jl | 3 ++- src/PureJuMP/dixmaan_ijkl.jl | 3 ++- src/PureJuMP/dixmaan_mnop.jl | 3 ++- src/PureJuMP/fminsrf2.jl | 4 ++-- src/PureJuMP/powellsg.jl | 3 ++- src/PureJuMP/srosenbr.jl | 3 ++- src/PureJuMP/woods.jl | 3 ++- 16 files changed, 39 insertions(+), 29 deletions(-) diff --git a/src/ADNLPProblems/bearing.jl b/src/ADNLPProblems/bearing.jl index bccc33a62..238d07e30 100644 --- a/src/ADNLPProblems/bearing.jl +++ b/src/ADNLPProblems/bearing.jl @@ -10,21 +10,11 @@ function bearing(; # nx > 0 # grid points in 1st direction # ny > 0 # grid points in 2nd direction - # Ensure nx and ny are at least 1, and warn if they need adjustment - nx_orig = nx - ny_orig = ny + n_orig = n nx = max(1, nx) ny = max(1, ny) - if nx != nx_orig || ny != ny_orig - msg_parts = String[] - if nx != nx_orig - push!(msg_parts, "nx from $(nx_orig) to $(nx)") - end - if ny != ny_orig - push!(msg_parts, "ny from $(ny_orig) to $(ny)") - end - @warn("bearing: grid dimensions adjusted: " * join(msg_parts, ", ")) - end + n = (nx + 2) * (ny + 2) + @adjust_nvar_warn("bearing", n_orig, n) b = 10 # grid is (0,2*pi)x(0,2*b) e = 1 // 10 # eccentricity diff --git a/src/PureJuMP/NZF1.jl b/src/PureJuMP/NZF1.jl index f0eb746c5..886bfc857 100644 --- a/src/PureJuMP/NZF1.jl +++ b/src/PureJuMP/NZF1.jl @@ -7,9 +7,10 @@ export NZF1 function NZF1(args...; n::Int = default_nvar, kwargs...) - mod(n, 13) != 0 && @warn("NZF1: number of variables adjusted to be divisible by 13 and ≥ 26") + n_orig = n nbis = max(2, div(n, 13)) n = 13 * nbis + @adjust_nvar_warn("NZF1", n_orig, n) l = div(n, 13) diff --git a/src/PureJuMP/PureJuMP.jl b/src/PureJuMP/PureJuMP.jl index a855ee252..d24093b97 100644 --- a/src/PureJuMP/PureJuMP.jl +++ b/src/PureJuMP/PureJuMP.jl @@ -20,6 +20,14 @@ end using JuMP, LinearAlgebra, SpecialFunctions +macro adjust_nvar_warn(problem_name, n_orig, n) + return quote + ($(esc(n)) == $(esc(n_orig))) || + @warn(string($(esc(problem_name)), ": number of variables adjusted from ", + $(esc(n_orig)), " to ", $(esc(n)))) + end +end + path = dirname(@__FILE__) files = filter(x -> x[(end - 2):end] == ".jl", readdir(path)) for file in files diff --git a/src/PureJuMP/broydn7d.jl b/src/PureJuMP/broydn7d.jl index 5778efbcc..eb24cfc2f 100644 --- a/src/PureJuMP/broydn7d.jl +++ b/src/PureJuMP/broydn7d.jl @@ -46,9 +46,10 @@ export broydn7d "Broyden 7-diagonal model in size `n`" function broydn7d(args...; n::Int = default_nvar, p::Float64 = 7 / 3, kwargs...) - mod(n, 2) > 0 && @warn("broydn7d: number of variables adjusted to be even") + n_orig = n n2 = max(1, div(n, 2)) n = 2 * n2 + @adjust_nvar_warn("broydn7d", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/catenary.jl b/src/PureJuMP/catenary.jl index 4da81b57f..6be4300b3 100644 --- a/src/PureJuMP/catenary.jl +++ b/src/PureJuMP/catenary.jl @@ -17,10 +17,10 @@ export catenary function catenary(args...; n::Int = default_nvar, Bl = 1.0, FRACT = 0.6, kwargs...) - (n % 3 == 0) || @warn("catenary: number of variables adjusted to be a multiple of 3") + n_orig = n n = 3 * max(1, div(n, 3)) - (n < 6) || @warn("catenary: number of variables adjusted to be greater or equal to 6") n = max(n, 6) + @adjust_nvar_warn("catenary", n_orig, n) ## Model Parameters diff --git a/src/PureJuMP/chainwoo.jl b/src/PureJuMP/chainwoo.jl index 0fc8cb893..7d0112043 100644 --- a/src/PureJuMP/chainwoo.jl +++ b/src/PureJuMP/chainwoo.jl @@ -35,8 +35,9 @@ export chainwoo "The chained Woods function in size `n`, a variant on the Woods function" function chainwoo(args...; n::Int = default_nvar, kwargs...) - (n % 4 == 0) || @warn("chainwoo: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("chainwoo", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/clplatea.jl b/src/PureJuMP/clplatea.jl index 011e0db9a..a12947dce 100644 --- a/src/PureJuMP/clplatea.jl +++ b/src/PureJuMP/clplatea.jl @@ -26,9 +26,10 @@ export clplatea "The clamped plate problem (Strang, Nocedal, Dax)." function clplatea(args...; n::Int = default_nvar, wght::Float64 = -0.1, kwargs...) + n_orig = n p = floor(Int, sqrt(n)) - p * p != n && @warn("clplatea: number of variables adjusted from $n down to $(p*p)") n = p * p + @adjust_nvar_warn("clplatea", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/clplateb.jl b/src/PureJuMP/clplateb.jl index 575e9fe5d..ec6315f17 100644 --- a/src/PureJuMP/clplateb.jl +++ b/src/PureJuMP/clplateb.jl @@ -27,9 +27,10 @@ export clplateb "The clamped plate problem (Strang, Nocedal, Dax)." function clplateb(args...; n::Int = default_nvar, wght::Float64 = -0.1, kwargs...) + n_orig = n p = floor(Int, sqrt(n)) - p * p != n && @warn("clplateb: number of variables adjusted from $n down to $(p*p)") n = p * p + @adjust_nvar_warn("clplateb", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/clplatec.jl b/src/PureJuMP/clplatec.jl index 274feda44..e85f9686d 100644 --- a/src/PureJuMP/clplatec.jl +++ b/src/PureJuMP/clplatec.jl @@ -33,9 +33,10 @@ function clplatec( l::Float64 = 0.01, kwargs..., ) + n_orig = n p = floor(Int, sqrt(n)) - p * p != n && @warn("clplatec: number of variables adjusted from $n down to $(p*p)") n = p * p + @adjust_nvar_warn("clplatec", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_efgh.jl b/src/PureJuMP/dixmaan_efgh.jl index 39e2c5976..47c5b5f2e 100644 --- a/src/PureJuMP/dixmaan_efgh.jl +++ b/src/PureJuMP/dixmaan_efgh.jl @@ -33,9 +33,10 @@ function dixmaane( δ::Float64 = 0.125, kwargs..., ) - (n % 3 == 0) || @warn("dixmaan: number of variables adjusted to be a multiple of 3") + n_orig = n m = max(1, div(n, 3)) n = 3 * m + @adjust_nvar_warn("dixmaan", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_ijkl.jl b/src/PureJuMP/dixmaan_ijkl.jl index 11f291714..004e62f2e 100644 --- a/src/PureJuMP/dixmaan_ijkl.jl +++ b/src/PureJuMP/dixmaan_ijkl.jl @@ -33,9 +33,10 @@ function dixmaani( δ::Float64 = 0.125, kwargs..., ) - (n % 3 == 0) || @warn("dixmaan: number of variables adjusted to be a multiple of 3") + n_orig = n m = max(1, div(n, 3)) n = 3 * m + @adjust_nvar_warn("dixmaan", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_mnop.jl b/src/PureJuMP/dixmaan_mnop.jl index c4537bcd5..4ac9bf7c4 100644 --- a/src/PureJuMP/dixmaan_mnop.jl +++ b/src/PureJuMP/dixmaan_mnop.jl @@ -31,9 +31,10 @@ function dixmaanm( δ::Float64 = 0.125, kwargs..., ) - (n % 3 == 0) || @warn("dixmaan: number of variables adjusted to be a multiple of 3") + n_orig = n m = max(1, div(n, 3)) n = 3 * m + @adjust_nvar_warn("dixmaan", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/fminsrf2.jl b/src/PureJuMP/fminsrf2.jl index b6acb5a8f..d73c4c630 100644 --- a/src/PureJuMP/fminsrf2.jl +++ b/src/PureJuMP/fminsrf2.jl @@ -21,12 +21,12 @@ export fminsrf2 function fminsrf2(args...; n::Int = default_nvar, kwargs...) - n < 4 && @warn("fminsrf2: number of variables must be ≥ 4") + n_orig = n n = max(4, n) p = floor(Int, sqrt(n)) - p * p != n && @warn("fminsrf2: number of variables adjusted from $n down to $(p*p)") n = p * p + @adjust_nvar_warn("fminsrf2", n_orig, n) h00 = 1.0 slopej = 4.0 diff --git a/src/PureJuMP/powellsg.jl b/src/PureJuMP/powellsg.jl index bcb9f7deb..e7ecdd2a5 100644 --- a/src/PureJuMP/powellsg.jl +++ b/src/PureJuMP/powellsg.jl @@ -37,8 +37,9 @@ export powellsg "The extended Powell singular problem in size 'n' " function powellsg(args...; n::Int = default_nvar, kwargs...) - (n % 4 == 0) || @warn("powellsg: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("powellsg", n_orig, n) x0 = zeros(n) x0[4 * (collect(1:div(n, 4))) .- 3] .= 3.0 diff --git a/src/PureJuMP/srosenbr.jl b/src/PureJuMP/srosenbr.jl index fc971e61d..1381f38c0 100644 --- a/src/PureJuMP/srosenbr.jl +++ b/src/PureJuMP/srosenbr.jl @@ -21,8 +21,9 @@ export srosenbr "The separable extension of Rosenbrock's function 'n' " function srosenbr(args...; n::Int = default_nvar, kwargs...) - (n % 2 == 0) || @warn("srosenbr: number of variables adjusted to be even") + n_orig = n n = 2 * max(1, div(n, 2)) + @adjust_nvar_warn("srosenbr", n_orig, n) x0 = ones(n) x0[2 * (collect(1:div(n, 2))) .- 1] .= -1.2 diff --git a/src/PureJuMP/woods.jl b/src/PureJuMP/woods.jl index 8127b51ab..c9f5b8938 100644 --- a/src/PureJuMP/woods.jl +++ b/src/PureJuMP/woods.jl @@ -39,8 +39,9 @@ export woods "The extended Woods problem `n` " function woods(args...; n::Int = default_nvar, kwargs...) - (n % 4 == 0) || @warn("woods: number of variables adjusted to be a multiple of 4") + n_orig = n n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("woods", n_orig, n) nlp = Model() From 106ad0b6f84df20ae98286585210dc0af44bccdb Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Sat, 18 Apr 2026 21:40:29 +0530 Subject: [PATCH 08/31] Update src/PureJuMP/PureJuMP.jl Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/PureJuMP/PureJuMP.jl | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/PureJuMP/PureJuMP.jl b/src/PureJuMP/PureJuMP.jl index d24093b97..0c5892e32 100644 --- a/src/PureJuMP/PureJuMP.jl +++ b/src/PureJuMP/PureJuMP.jl @@ -20,11 +20,14 @@ end using JuMP, LinearAlgebra, SpecialFunctions +_adjust_nvar_warn_message(problem_name, n_orig, n) = + string(problem_name, ": number of variables adjusted from ", n_orig, " to ", n) + macro adjust_nvar_warn(problem_name, n_orig, n) return quote - ($(esc(n)) == $(esc(n_orig))) || - @warn(string($(esc(problem_name)), ": number of variables adjusted from ", - $(esc(n_orig)), " to ", $(esc(n)))) + local _n_orig = $(esc(n_orig)) + local _n = $(esc(n)) + (_n == _n_orig) || @warn(_adjust_nvar_warn_message($(esc(problem_name)), _n_orig, _n)) end end From f6c9e0ae2e4e88bda00965aea9df5ddb5f3d0242 Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Sat, 18 Apr 2026 22:10:29 +0530 Subject: [PATCH 09/31] Add regression tests for dimension-adjustment warnings --- src/PureJuMP/PureJuMP.jl | 5 ++ src/PureJuMP/bearing.jl | 6 ++ test/test-defined-problems.jl | 114 ++++++++++++++++++++++++++++++++++ 3 files changed, 125 insertions(+) diff --git a/src/PureJuMP/PureJuMP.jl b/src/PureJuMP/PureJuMP.jl index 0c5892e32..a52f52b39 100644 --- a/src/PureJuMP/PureJuMP.jl +++ b/src/PureJuMP/PureJuMP.jl @@ -23,6 +23,11 @@ using JuMP, LinearAlgebra, SpecialFunctions _adjust_nvar_warn_message(problem_name, n_orig, n) = string(problem_name, ": number of variables adjusted from ", n_orig, " to ", n) +""" + @adjust_nvar_warn(problem_name, n_orig, n) + +Issue a warning if the number of variables was adjusted, showing both original and adjusted values. +""" macro adjust_nvar_warn(problem_name, n_orig, n) return quote local _n_orig = $(esc(n_orig)) diff --git a/src/PureJuMP/bearing.jl b/src/PureJuMP/bearing.jl index 86a3d47bc..442de6250 100644 --- a/src/PureJuMP/bearing.jl +++ b/src/PureJuMP/bearing.jl @@ -28,6 +28,12 @@ function bearing( # nx > 0 # grid points in 1st direction # ny > 0 # grid points in 2nd direction + n_orig = n + nx = max(1, nx) + ny = max(1, ny) + n = (nx + 2) * (ny + 2) + @adjust_nvar_warn("bearing", n_orig, n) + b = 10 # grid is (0,2*pi)x(0,2*b) e = 0.1 # eccentricity diff --git a/test/test-defined-problems.jl b/test/test-defined-problems.jl index 0007a346b..9da369935 100644 --- a/test/test-defined-problems.jl +++ b/test/test-defined-problems.jl @@ -22,6 +22,120 @@ probes = @sync begin end @info "PureJuMP missing per worker" probes +function _check_adjusted_warning(ctor, expected_msg::AbstractString, expected_nvar::Integer) + nlp = @test_logs (:warn, expected_msg) ctor() + @test nlp.meta.nvar == expected_nvar +end + +@testset "Adjusted dimension warnings" begin + _check_adjusted_warning("NZF1: number of variables adjusted from 1 to 26", 26) do + ADNLPProblems.NZF1(n = 1) + end + _check_adjusted_warning("NZF1: number of variables adjusted from 1 to 26", 26) do + MathOptNLPModel(PureJuMP.NZF1(n = 1)) + end + + _check_adjusted_warning("spmsrtls: number of variables adjusted from 99 to 100", 100) do + ADNLPProblems.spmsrtls(n = 99) + end + + _check_adjusted_warning("chainwoo: number of variables adjusted from 1 to 4", 4) do + ADNLPProblems.chainwoo(n = 1) + end + _check_adjusted_warning("chainwoo: number of variables adjusted from 1 to 4", 4) do + MathOptNLPModel(PureJuMP.chainwoo(n = 1)) + end + + _check_adjusted_warning("catenary: number of variables adjusted from 10 to 9", 9) do + ADNLPProblems.catenary(n = 10) + end + _check_adjusted_warning("catenary: number of variables adjusted from 10 to 9", 9) do + MathOptNLPModel(PureJuMP.catenary(n = 10)) + end + + _check_adjusted_warning("clplatea: number of variables adjusted from 5 to 9", 9) do + ADNLPProblems.clplatea(n = 5) + end + _check_adjusted_warning("clplatea: number of variables adjusted from 5 to 4", 4) do + MathOptNLPModel(PureJuMP.clplatea(n = 5)) + end + _check_adjusted_warning("clplateb: number of variables adjusted from 5 to 4", 4) do + MathOptNLPModel(PureJuMP.clplateb(n = 5)) + end + _check_adjusted_warning("clplatec: number of variables adjusted from 5 to 4", 4) do + MathOptNLPModel(PureJuMP.clplatec(n = 5)) + end + + _check_adjusted_warning("fminsrf2: number of variables adjusted from 1 to 4", 4) do + ADNLPProblems.fminsrf2(n = 1) + end + _check_adjusted_warning("fminsrf2: number of variables adjusted from 1 to 4", 4) do + MathOptNLPModel(PureJuMP.fminsrf2(n = 1)) + end + + _check_adjusted_warning("powellsg: number of variables adjusted from 1 to 4", 4) do + ADNLPProblems.powellsg(n = 1) + end + _check_adjusted_warning("powellsg: number of variables adjusted from 1 to 4", 4) do + ADNLPProblems.powellsg(use_nls = true, n = 1) + end + _check_adjusted_warning("powellsg: number of variables adjusted from 1 to 4", 4) do + MathOptNLPModel(PureJuMP.powellsg(n = 1)) + end + _check_adjusted_warning("powellsg: number of variables adjusted from 1 to 4", 4) do + MathOptNLPModel(PureJuMP.powellsg(use_nls = true, n = 1)) + end + + _check_adjusted_warning("srosenbr: number of variables adjusted from 1 to 2", 2) do + ADNLPProblems.srosenbr(n = 1) + end + _check_adjusted_warning("srosenbr: number of variables adjusted from 1 to 2", 2) do + MathOptNLPModel(PureJuMP.srosenbr(n = 1)) + end + + _check_adjusted_warning("watson: number of variables adjusted from 1 to 2", 2) do + ADNLPProblems.watson(n = 1) + end + _check_adjusted_warning("watson: number of variables adjusted from 1 to 2", 2) do + ADNLPProblems.watson(use_nls = true, n = 1) + end + + _check_adjusted_warning("woods: number of variables adjusted from 1 to 4", 4) do + ADNLPProblems.woods(n = 1) + end + _check_adjusted_warning("woods: number of variables adjusted from 1 to 4", 4) do + MathOptNLPModel(PureJuMP.woods(n = 1)) + end + + _check_adjusted_warning("bearing: number of variables adjusted from 1 to 9", 9) do + ADNLPProblems.bearing(n = 1) + end + _check_adjusted_warning("bearing: number of variables adjusted from 1 to 9", 9) do + MathOptNLPModel(PureJuMP.bearing(n = 1)) + end + + _check_adjusted_warning("broydn7d: number of variables adjusted from 1 to 2", 2) do + MathOptNLPModel(PureJuMP.broydn7d(n = 1)) + end + + _check_adjusted_warning("dixmaan: number of variables adjusted from 1 to 3", 3) do + MathOptNLPModel(PureJuMP.dixmaane(n = 1)) + end + _check_adjusted_warning("dixmaan: number of variables adjusted from 1 to 3", 3) do + MathOptNLPModel(PureJuMP.dixmaani(n = 1)) + end + _check_adjusted_warning("dixmaan: number of variables adjusted from 1 to 3", 3) do + MathOptNLPModel(PureJuMP.dixmaanm(n = 1)) + end + + _check_adjusted_warning("spmsrtls: number of variables adjusted from 99 to 100", 100) do + ADNLPProblems.spmsrtls(use_nls = true, n = 99) + end + _check_adjusted_warning("NZF1: number of variables adjusted from 1 to 26", 26) do + ADNLPProblems.NZF1(use_nls = true, n = 1) + end +end + @test setdiff(union(names(ADNLPProblems), list_problems_not_ADNLPProblems), list_problems) == [:ADNLPProblems] @test setdiff(union(names(PureJuMP), list_problems_not_PureJuMP), list_problems) == [:PureJuMP] From 4af9763becf2c7e4a80e0be950c362ec9fab0232 Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Sun, 19 Apr 2026 01:19:56 +0530 Subject: [PATCH 10/31] unifying macros for defining problems and dimensions, and added warnings for dimension mismatches in the defined problems. --- src/ADNLPProblems/ADNLPProblems.jl | 22 +--------------------- src/OptimizationProblems.jl | 16 ++++++++++++++++ src/PureJuMP/PureJuMP.jl | 17 +---------------- test/test-defined-problems.jl | 6 +++++- 4 files changed, 23 insertions(+), 38 deletions(-) diff --git a/src/ADNLPProblems/ADNLPProblems.jl b/src/ADNLPProblems/ADNLPProblems.jl index 69e0a9d12..23c40a033 100644 --- a/src/ADNLPProblems/ADNLPProblems.jl +++ b/src/ADNLPProblems/ADNLPProblems.jl @@ -1,6 +1,7 @@ module ADNLPProblems using Requires +import ..OptimizationProblems: @adjust_nvar_warn const default_nvar = 100 const data_path = joinpath(@__DIR__, "..", "..", "data") @@ -26,27 +27,6 @@ end @require ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a" begin using JLD2, LinearAlgebra, SparseArrays, SpecialFunctions - """ - @adjust_nvar_warn(problem_name, n_orig, n) - - Issue a warning if the number of variables was adjusted, showing both original and adjusted values. - This macro provides consistent warning messages across all problems with dimension adjustments. - - # Example - ```julia - n_orig = n - n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("woods", n_orig, n) - ``` - """ - macro adjust_nvar_warn(problem_name, n_orig, n) - return quote - ($(esc(n)) == $(esc(n_orig))) || - @warn(string($(esc(problem_name)), ": number of variables adjusted from ", - $(esc(n_orig)), " to ", $(esc(n)))) - end - end - path = dirname(@__FILE__) files = filter(x -> x[(end - 2):end] == ".jl", readdir(path)) for file in files diff --git a/src/OptimizationProblems.jl b/src/OptimizationProblems.jl index 7ea7b664e..feff54b3c 100644 --- a/src/OptimizationProblems.jl +++ b/src/OptimizationProblems.jl @@ -88,4 +88,20 @@ for name in cols_names, i = 1:number_of_problems meta[!, name][i] = eval(Meta.parse("$(split(files[i], ".")[1])_meta"))[name] end +_adjust_nvar_warn_message(problem_name, n_orig, n) = + string(problem_name, ": number of variables adjusted from ", n_orig, " to ", n) + +""" + @adjust_nvar_warn(problem_name, n_orig, n) + +Issue a warning if the number of variables was adjusted, showing both original and adjusted values. +""" +macro adjust_nvar_warn(problem_name, n_orig, n) + return quote + local _n_orig = $(esc(n_orig)) + local _n = $(esc(n)) + (_n == _n_orig) || @warn(_adjust_nvar_warn_message($(esc(problem_name)), _n_orig, _n)) + end +end + end # module diff --git a/src/PureJuMP/PureJuMP.jl b/src/PureJuMP/PureJuMP.jl index a52f52b39..ae7418db0 100644 --- a/src/PureJuMP/PureJuMP.jl +++ b/src/PureJuMP/PureJuMP.jl @@ -19,22 +19,7 @@ function _ensure_data!(key::Symbol, relpath::AbstractString) end using JuMP, LinearAlgebra, SpecialFunctions - -_adjust_nvar_warn_message(problem_name, n_orig, n) = - string(problem_name, ": number of variables adjusted from ", n_orig, " to ", n) - -""" - @adjust_nvar_warn(problem_name, n_orig, n) - -Issue a warning if the number of variables was adjusted, showing both original and adjusted values. -""" -macro adjust_nvar_warn(problem_name, n_orig, n) - return quote - local _n_orig = $(esc(n_orig)) - local _n = $(esc(n)) - (_n == _n_orig) || @warn(_adjust_nvar_warn_message($(esc(problem_name)), _n_orig, _n)) - end -end +import ..OptimizationProblems: @adjust_nvar_warn path = dirname(@__FILE__) files = filter(x -> x[(end - 2):end] == ".jl", readdir(path)) diff --git a/test/test-defined-problems.jl b/test/test-defined-problems.jl index 9da369935..5c4a5834a 100644 --- a/test/test-defined-problems.jl +++ b/test/test-defined-problems.jl @@ -22,11 +22,15 @@ probes = @sync begin end @info "PureJuMP missing per worker" probes -function _check_adjusted_warning(ctor, expected_msg::AbstractString, expected_nvar::Integer) +function _check_adjusted_warning(ctor::Function, expected_msg::AbstractString, expected_nvar::Integer) nlp = @test_logs (:warn, expected_msg) ctor() @test nlp.meta.nvar == expected_nvar end +function _check_adjusted_warning(expected_msg::AbstractString, expected_nvar::Integer, ctor::Function) + _check_adjusted_warning(ctor, expected_msg, expected_nvar) +end + @testset "Adjusted dimension warnings" begin _check_adjusted_warning("NZF1: number of variables adjusted from 1 to 26", 26) do ADNLPProblems.NZF1(n = 1) From b59e1ba171dc3a18b0372adf21e8757a4d68136a Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Sun, 19 Apr 2026 01:26:59 +0530 Subject: [PATCH 11/31] adding copilot suggestions to fix dimension warnings in ADNLPProblems and PureJuMP --- src/ADNLPProblems/ADNLPProblems.jl | 2 +- src/OptimizationProblems.jl | 16 ++++++++++++++++ src/PureJuMP/PureJuMP.jl | 2 +- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/ADNLPProblems/ADNLPProblems.jl b/src/ADNLPProblems/ADNLPProblems.jl index 23c40a033..bd51438f6 100644 --- a/src/ADNLPProblems/ADNLPProblems.jl +++ b/src/ADNLPProblems/ADNLPProblems.jl @@ -1,7 +1,7 @@ module ADNLPProblems using Requires -import ..OptimizationProblems: @adjust_nvar_warn +import ..: @adjust_nvar_warn const default_nvar = 100 const data_path = joinpath(@__DIR__, "..", "..", "data") diff --git a/src/OptimizationProblems.jl b/src/OptimizationProblems.jl index feff54b3c..396eb584d 100644 --- a/src/OptimizationProblems.jl +++ b/src/OptimizationProblems.jl @@ -2,6 +2,22 @@ module OptimizationProblems using DataFrames +_adjust_nvar_warn_message(problem_name, n_orig, n) = + string(problem_name, ": number of variables adjusted from ", n_orig, " to ", n) + +""" + @adjust_nvar_warn(problem_name, n_orig, n) + +Issue a warning if the number of variables was adjusted, showing both original and adjusted values. +""" +macro adjust_nvar_warn(problem_name, n_orig, n) + return quote + local _n_orig = $(esc(n_orig)) + local _n = $(esc(n)) + (_n == _n_orig) || @warn(_adjust_nvar_warn_message($(esc(problem_name)), _n_orig, _n)) + end +end + include("ADNLPProblems/ADNLPProblems.jl") include("PureJuMP/PureJuMP.jl") diff --git a/src/PureJuMP/PureJuMP.jl b/src/PureJuMP/PureJuMP.jl index ae7418db0..a5089e735 100644 --- a/src/PureJuMP/PureJuMP.jl +++ b/src/PureJuMP/PureJuMP.jl @@ -19,7 +19,7 @@ function _ensure_data!(key::Symbol, relpath::AbstractString) end using JuMP, LinearAlgebra, SpecialFunctions -import ..OptimizationProblems: @adjust_nvar_warn +import ..: @adjust_nvar_warn path = dirname(@__FILE__) files = filter(x -> x[(end - 2):end] == ".jl", readdir(path)) From be0c0e191e5550247f6fec8c145f93b563b0d17b Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Sun, 19 Apr 2026 01:28:17 +0530 Subject: [PATCH 12/31] Update OptimizationProblems.jl --- src/OptimizationProblems.jl | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/src/OptimizationProblems.jl b/src/OptimizationProblems.jl index 396eb584d..dfbcc4490 100644 --- a/src/OptimizationProblems.jl +++ b/src/OptimizationProblems.jl @@ -104,20 +104,4 @@ for name in cols_names, i = 1:number_of_problems meta[!, name][i] = eval(Meta.parse("$(split(files[i], ".")[1])_meta"))[name] end -_adjust_nvar_warn_message(problem_name, n_orig, n) = - string(problem_name, ": number of variables adjusted from ", n_orig, " to ", n) - -""" - @adjust_nvar_warn(problem_name, n_orig, n) - -Issue a warning if the number of variables was adjusted, showing both original and adjusted values. -""" -macro adjust_nvar_warn(problem_name, n_orig, n) - return quote - local _n_orig = $(esc(n_orig)) - local _n = $(esc(n)) - (_n == _n_orig) || @warn(_adjust_nvar_warn_message($(esc(problem_name)), _n_orig, _n)) - end -end - end # module From b5a7f23b9ae42773719bd51c3d61a0155da119df Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Sun, 19 Apr 2026 02:06:34 +0530 Subject: [PATCH 13/31] final changes --- src/ADNLPProblems/ADNLPProblems.jl | 2 +- src/PureJuMP/PureJuMP.jl | 2 +- test/test-defined-problems.jl | 6 ++++++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/ADNLPProblems/ADNLPProblems.jl b/src/ADNLPProblems/ADNLPProblems.jl index bd51438f6..23c40a033 100644 --- a/src/ADNLPProblems/ADNLPProblems.jl +++ b/src/ADNLPProblems/ADNLPProblems.jl @@ -1,7 +1,7 @@ module ADNLPProblems using Requires -import ..: @adjust_nvar_warn +import ..OptimizationProblems: @adjust_nvar_warn const default_nvar = 100 const data_path = joinpath(@__DIR__, "..", "..", "data") diff --git a/src/PureJuMP/PureJuMP.jl b/src/PureJuMP/PureJuMP.jl index a5089e735..ae7418db0 100644 --- a/src/PureJuMP/PureJuMP.jl +++ b/src/PureJuMP/PureJuMP.jl @@ -19,7 +19,7 @@ function _ensure_data!(key::Symbol, relpath::AbstractString) end using JuMP, LinearAlgebra, SpecialFunctions -import ..: @adjust_nvar_warn +import ..OptimizationProblems: @adjust_nvar_warn path = dirname(@__FILE__) files = filter(x -> x[(end - 2):end] == ".jl", readdir(path)) diff --git a/test/test-defined-problems.jl b/test/test-defined-problems.jl index 5c4a5834a..77159a075 100644 --- a/test/test-defined-problems.jl +++ b/test/test-defined-problems.jl @@ -63,9 +63,15 @@ end _check_adjusted_warning("clplatea: number of variables adjusted from 5 to 4", 4) do MathOptNLPModel(PureJuMP.clplatea(n = 5)) end + _check_adjusted_warning("clplateb: number of variables adjusted from 5 to 9", 9) do + ADNLPProblems.clplateb(n = 5) + end _check_adjusted_warning("clplateb: number of variables adjusted from 5 to 4", 4) do MathOptNLPModel(PureJuMP.clplateb(n = 5)) end + _check_adjusted_warning("clplatec: number of variables adjusted from 5 to 9", 9) do + ADNLPProblems.clplatec(n = 5) + end _check_adjusted_warning("clplatec: number of variables adjusted from 5 to 4", 4) do MathOptNLPModel(PureJuMP.clplatec(n = 5)) end From 95b8843a7ee6cfed35c8cff67905bbcb261ee82f Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Sun, 19 Apr 2026 02:10:53 +0530 Subject: [PATCH 14/31] Update src/OptimizationProblems.jl Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/OptimizationProblems.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/OptimizationProblems.jl b/src/OptimizationProblems.jl index dfbcc4490..e020283e7 100644 --- a/src/OptimizationProblems.jl +++ b/src/OptimizationProblems.jl @@ -11,10 +11,11 @@ _adjust_nvar_warn_message(problem_name, n_orig, n) = Issue a warning if the number of variables was adjusted, showing both original and adjusted values. """ macro adjust_nvar_warn(problem_name, n_orig, n) + helper = GlobalRef(@__MODULE__, :_adjust_nvar_warn_message) return quote local _n_orig = $(esc(n_orig)) local _n = $(esc(n)) - (_n == _n_orig) || @warn(_adjust_nvar_warn_message($(esc(problem_name)), _n_orig, _n)) + (_n == _n_orig) || @warn($helper($(esc(problem_name)), _n_orig, _n)) end end From 120031df9e05c753fecacb93845ef1a244a4a6b0 Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Tue, 21 Apr 2026 21:23:21 +0530 Subject: [PATCH 15/31] addressing review comments --- src/ADNLPProblems/broydn7d.jl | 3 +- src/ADNLPProblems/dixmaan_efgh.jl | 3 +- src/ADNLPProblems/dixmaan_ijkl.jl | 3 +- src/ADNLPProblems/dixmaan_mnop.jl | 3 +- src/PureJuMP/spmsrtls.jl | 2 + src/PureJuMP/watson.jl | 2 + test/test-defined-problems.jl | 155 +++++++++--------------------- 7 files changed, 56 insertions(+), 115 deletions(-) diff --git a/src/ADNLPProblems/broydn7d.jl b/src/ADNLPProblems/broydn7d.jl index 0ca69b21d..d76b8fa11 100644 --- a/src/ADNLPProblems/broydn7d.jl +++ b/src/ADNLPProblems/broydn7d.jl @@ -1,9 +1,10 @@ export broydn7d function broydn7d(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - mod(n, 2) > 0 && @warn("broydn7d: number of variables adjusted to be even") + n_orig = n n2 = max(1, div(n, 2)) n = 2 * n2 + @adjust_nvar_warn("broydn7d", n_orig, n) function f(x; n = length(x), n2 = n2) p = 7 // 3 return abs(1 - 2 * x[2] + (3 - x[1] / 2) * x[1])^p + diff --git a/src/ADNLPProblems/dixmaan_efgh.jl b/src/ADNLPProblems/dixmaan_efgh.jl index 325a3c089..d4ae531e1 100644 --- a/src/ADNLPProblems/dixmaan_efgh.jl +++ b/src/ADNLPProblems/dixmaan_efgh.jl @@ -9,9 +9,10 @@ function dixmaane(; δ = 125 // 1000, kwargs..., ) where {T} - (n % 3 == 0) || @warn("dixmaan: number of variables adjusted to be a multiple of 3") + n_orig = n m = max(1, div(n, 3)) n = 3 * m + @adjust_nvar_warn("dixmaan", n_orig, n) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum(i // n * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/dixmaan_ijkl.jl b/src/ADNLPProblems/dixmaan_ijkl.jl index f1d86948b..cc464e4e4 100644 --- a/src/ADNLPProblems/dixmaan_ijkl.jl +++ b/src/ADNLPProblems/dixmaan_ijkl.jl @@ -9,9 +9,10 @@ function dixmaani(; δ = 125 // 1000, kwargs..., ) where {T} - (n % 3 == 0) || @warn("dixmaan: number of variables adjusted to be a multiple of 3") + n_orig = n m = max(1, div(n, 3)) n = 3 * m + @adjust_nvar_warn("dixmaan", n_orig, n) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum((i // n)^2 * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/dixmaan_mnop.jl b/src/ADNLPProblems/dixmaan_mnop.jl index e62e449e4..ba9e9559b 100644 --- a/src/ADNLPProblems/dixmaan_mnop.jl +++ b/src/ADNLPProblems/dixmaan_mnop.jl @@ -9,9 +9,10 @@ function dixmaanm(; δ = 125 // 1000, kwargs..., ) where {T} - (n % 3 == 0) || @warn("dixmaan: number of variables adjusted to be a multiple of 3") + n_orig = n m = max(1, div(n, 3)) n = 3 * m + @adjust_nvar_warn("dixmaan", n_orig, n) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum((i // n)^2 * α * x[i]^2 for i = 1:n) + diff --git a/src/PureJuMP/spmsrtls.jl b/src/PureJuMP/spmsrtls.jl index 98ae91cc2..4e08dd863 100644 --- a/src/PureJuMP/spmsrtls.jl +++ b/src/PureJuMP/spmsrtls.jl @@ -21,8 +21,10 @@ export spmsrtls function spmsrtls(args...; n::Int = default_nvar, kwargs...) + n_orig = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 + @adjust_nvar_warn("spmsrtls", n_orig, n) p = [sin(i^2) for i = 1:n] x0 = [p[i] / 5 for i = 1:n] diff --git a/src/PureJuMP/watson.jl b/src/PureJuMP/watson.jl index 6dc9def6e..e9f6a582c 100644 --- a/src/PureJuMP/watson.jl +++ b/src/PureJuMP/watson.jl @@ -17,7 +17,9 @@ export watson function watson(args...; n::Int = default_nvar, kwargs...) + n_orig = n n = min(max(n, 2), 31) + @adjust_nvar_warn("watson", n_orig, n) m = 31 nlp = Model() diff --git a/test/test-defined-problems.jl b/test/test-defined-problems.jl index 77159a075..acbb28e73 100644 --- a/test/test-defined-problems.jl +++ b/test/test-defined-problems.jl @@ -32,117 +32,50 @@ function _check_adjusted_warning(expected_msg::AbstractString, expected_nvar::In end @testset "Adjusted dimension warnings" begin - _check_adjusted_warning("NZF1: number of variables adjusted from 1 to 26", 26) do - ADNLPProblems.NZF1(n = 1) - end - _check_adjusted_warning("NZF1: number of variables adjusted from 1 to 26", 26) do - MathOptNLPModel(PureJuMP.NZF1(n = 1)) - end - - _check_adjusted_warning("spmsrtls: number of variables adjusted from 99 to 100", 100) do - ADNLPProblems.spmsrtls(n = 99) - end - - _check_adjusted_warning("chainwoo: number of variables adjusted from 1 to 4", 4) do - ADNLPProblems.chainwoo(n = 1) - end - _check_adjusted_warning("chainwoo: number of variables adjusted from 1 to 4", 4) do - MathOptNLPModel(PureJuMP.chainwoo(n = 1)) - end - - _check_adjusted_warning("catenary: number of variables adjusted from 10 to 9", 9) do - ADNLPProblems.catenary(n = 10) - end - _check_adjusted_warning("catenary: number of variables adjusted from 10 to 9", 9) do - MathOptNLPModel(PureJuMP.catenary(n = 10)) - end - - _check_adjusted_warning("clplatea: number of variables adjusted from 5 to 9", 9) do - ADNLPProblems.clplatea(n = 5) - end - _check_adjusted_warning("clplatea: number of variables adjusted from 5 to 4", 4) do - MathOptNLPModel(PureJuMP.clplatea(n = 5)) - end - _check_adjusted_warning("clplateb: number of variables adjusted from 5 to 9", 9) do - ADNLPProblems.clplateb(n = 5) - end - _check_adjusted_warning("clplateb: number of variables adjusted from 5 to 4", 4) do - MathOptNLPModel(PureJuMP.clplateb(n = 5)) - end - _check_adjusted_warning("clplatec: number of variables adjusted from 5 to 9", 9) do - ADNLPProblems.clplatec(n = 5) - end - _check_adjusted_warning("clplatec: number of variables adjusted from 5 to 4", 4) do - MathOptNLPModel(PureJuMP.clplatec(n = 5)) - end - - _check_adjusted_warning("fminsrf2: number of variables adjusted from 1 to 4", 4) do - ADNLPProblems.fminsrf2(n = 1) - end - _check_adjusted_warning("fminsrf2: number of variables adjusted from 1 to 4", 4) do - MathOptNLPModel(PureJuMP.fminsrf2(n = 1)) - end - - _check_adjusted_warning("powellsg: number of variables adjusted from 1 to 4", 4) do - ADNLPProblems.powellsg(n = 1) - end - _check_adjusted_warning("powellsg: number of variables adjusted from 1 to 4", 4) do - ADNLPProblems.powellsg(use_nls = true, n = 1) - end - _check_adjusted_warning("powellsg: number of variables adjusted from 1 to 4", 4) do - MathOptNLPModel(PureJuMP.powellsg(n = 1)) - end - _check_adjusted_warning("powellsg: number of variables adjusted from 1 to 4", 4) do - MathOptNLPModel(PureJuMP.powellsg(use_nls = true, n = 1)) - end - - _check_adjusted_warning("srosenbr: number of variables adjusted from 1 to 2", 2) do - ADNLPProblems.srosenbr(n = 1) - end - _check_adjusted_warning("srosenbr: number of variables adjusted from 1 to 2", 2) do - MathOptNLPModel(PureJuMP.srosenbr(n = 1)) - end - - _check_adjusted_warning("watson: number of variables adjusted from 1 to 2", 2) do - ADNLPProblems.watson(n = 1) - end - _check_adjusted_warning("watson: number of variables adjusted from 1 to 2", 2) do - ADNLPProblems.watson(use_nls = true, n = 1) - end - - _check_adjusted_warning("woods: number of variables adjusted from 1 to 4", 4) do - ADNLPProblems.woods(n = 1) - end - _check_adjusted_warning("woods: number of variables adjusted from 1 to 4", 4) do - MathOptNLPModel(PureJuMP.woods(n = 1)) - end - - _check_adjusted_warning("bearing: number of variables adjusted from 1 to 9", 9) do - ADNLPProblems.bearing(n = 1) - end - _check_adjusted_warning("bearing: number of variables adjusted from 1 to 9", 9) do - MathOptNLPModel(PureJuMP.bearing(n = 1)) - end - - _check_adjusted_warning("broydn7d: number of variables adjusted from 1 to 2", 2) do - MathOptNLPModel(PureJuMP.broydn7d(n = 1)) - end - - _check_adjusted_warning("dixmaan: number of variables adjusted from 1 to 3", 3) do - MathOptNLPModel(PureJuMP.dixmaane(n = 1)) - end - _check_adjusted_warning("dixmaan: number of variables adjusted from 1 to 3", 3) do - MathOptNLPModel(PureJuMP.dixmaani(n = 1)) - end - _check_adjusted_warning("dixmaan: number of variables adjusted from 1 to 3", 3) do - MathOptNLPModel(PureJuMP.dixmaanm(n = 1)) - end - - _check_adjusted_warning("spmsrtls: number of variables adjusted from 99 to 100", 100) do - ADNLPProblems.spmsrtls(use_nls = true, n = 99) - end - _check_adjusted_warning("NZF1: number of variables adjusted from 1 to 26", 26) do - ADNLPProblems.NZF1(use_nls = true, n = 1) + warning_cases = [ + (; msg = "NZF1: number of variables adjusted from 1 to 26", nvar = 26, ctor = () -> ADNLPProblems.NZF1(n = 1)), + (; msg = "NZF1: number of variables adjusted from 1 to 26", nvar = 26, ctor = () -> MathOptNLPModel(PureJuMP.NZF1(n = 1))), + (; msg = "spmsrtls: number of variables adjusted from 99 to 100", nvar = 100, ctor = () -> ADNLPProblems.spmsrtls(n = 99)), + (; msg = "spmsrtls: number of variables adjusted from 99 to 100", nvar = 100, ctor = () -> MathOptNLPModel(PureJuMP.spmsrtls(n = 99))), + (; msg = "chainwoo: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> ADNLPProblems.chainwoo(n = 1)), + (; msg = "chainwoo: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.chainwoo(n = 1))), + (; msg = "catenary: number of variables adjusted from 10 to 9", nvar = 9, ctor = () -> ADNLPProblems.catenary(n = 10)), + (; msg = "catenary: number of variables adjusted from 10 to 9", nvar = 9, ctor = () -> MathOptNLPModel(PureJuMP.catenary(n = 10))), + (; msg = "clplatea: number of variables adjusted from 5 to 9", nvar = 9, ctor = () -> ADNLPProblems.clplatea(n = 5)), + (; msg = "clplatea: number of variables adjusted from 5 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.clplatea(n = 5))), + (; msg = "clplateb: number of variables adjusted from 5 to 9", nvar = 9, ctor = () -> ADNLPProblems.clplateb(n = 5)), + (; msg = "clplateb: number of variables adjusted from 5 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.clplateb(n = 5))), + (; msg = "clplatec: number of variables adjusted from 5 to 9", nvar = 9, ctor = () -> ADNLPProblems.clplatec(n = 5)), + (; msg = "clplatec: number of variables adjusted from 5 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.clplatec(n = 5))), + (; msg = "fminsrf2: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> ADNLPProblems.fminsrf2(n = 1)), + (; msg = "fminsrf2: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.fminsrf2(n = 1))), + (; msg = "powellsg: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> ADNLPProblems.powellsg(n = 1)), + (; msg = "powellsg: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> ADNLPProblems.powellsg(use_nls = true, n = 1)), + (; msg = "powellsg: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.powellsg(n = 1))), + (; msg = "powellsg: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.powellsg(use_nls = true, n = 1))), + (; msg = "srosenbr: number of variables adjusted from 1 to 2", nvar = 2, ctor = () -> ADNLPProblems.srosenbr(n = 1)), + (; msg = "srosenbr: number of variables adjusted from 1 to 2", nvar = 2, ctor = () -> MathOptNLPModel(PureJuMP.srosenbr(n = 1))), + (; msg = "watson: number of variables adjusted from 1 to 2", nvar = 2, ctor = () -> ADNLPProblems.watson(n = 1)), + (; msg = "watson: number of variables adjusted from 1 to 2", nvar = 2, ctor = () -> ADNLPProblems.watson(use_nls = true, n = 1)), + (; msg = "watson: number of variables adjusted from 1 to 2", nvar = 2, ctor = () -> MathOptNLPModel(PureJuMP.watson(n = 1))), + (; msg = "woods: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> ADNLPProblems.woods(n = 1)), + (; msg = "woods: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.woods(n = 1))), + (; msg = "bearing: number of variables adjusted from 1 to 9", nvar = 9, ctor = () -> ADNLPProblems.bearing(n = 1)), + (; msg = "bearing: number of variables adjusted from 1 to 9", nvar = 9, ctor = () -> MathOptNLPModel(PureJuMP.bearing(n = 1))), + (; msg = "broydn7d: number of variables adjusted from 5 to 4", nvar = 4, ctor = () -> ADNLPProblems.broydn7d(n = 5)), + (; msg = "broydn7d: number of variables adjusted from 5 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.broydn7d(n = 5))), + (; msg = "dixmaan: number of variables adjusted from 1 to 3", nvar = 3, ctor = () -> ADNLPProblems.dixmaane(n = 1)), + (; msg = "dixmaan: number of variables adjusted from 1 to 3", nvar = 3, ctor = () -> MathOptNLPModel(PureJuMP.dixmaane(n = 1))), + (; msg = "dixmaan: number of variables adjusted from 1 to 3", nvar = 3, ctor = () -> ADNLPProblems.dixmaani(n = 1)), + (; msg = "dixmaan: number of variables adjusted from 1 to 3", nvar = 3, ctor = () -> MathOptNLPModel(PureJuMP.dixmaani(n = 1))), + (; msg = "dixmaan: number of variables adjusted from 1 to 3", nvar = 3, ctor = () -> ADNLPProblems.dixmaanm(n = 1)), + (; msg = "dixmaan: number of variables adjusted from 1 to 3", nvar = 3, ctor = () -> MathOptNLPModel(PureJuMP.dixmaanm(n = 1))), + (; msg = "spmsrtls: number of variables adjusted from 99 to 100", nvar = 100, ctor = () -> ADNLPProblems.spmsrtls(use_nls = true, n = 99)), + (; msg = "NZF1: number of variables adjusted from 1 to 26", nvar = 26, ctor = () -> ADNLPProblems.NZF1(use_nls = true, n = 1)), + ] + + for case in warning_cases + _check_adjusted_warning(case.ctor, case.msg, case.nvar) end end From 270308f19b82ae7c2d822850d6854e848e12a229 Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Sun, 26 Apr 2026 02:13:32 +0530 Subject: [PATCH 16/31] addressing review comments --- src/OptimizationProblems.jl | 6 +- test/test-defined-problems.jl | 108 ++++++++++++++++++---------------- 2 files changed, 57 insertions(+), 57 deletions(-) diff --git a/src/OptimizationProblems.jl b/src/OptimizationProblems.jl index e020283e7..4479f6f69 100644 --- a/src/OptimizationProblems.jl +++ b/src/OptimizationProblems.jl @@ -2,20 +2,16 @@ module OptimizationProblems using DataFrames -_adjust_nvar_warn_message(problem_name, n_orig, n) = - string(problem_name, ": number of variables adjusted from ", n_orig, " to ", n) - """ @adjust_nvar_warn(problem_name, n_orig, n) Issue a warning if the number of variables was adjusted, showing both original and adjusted values. """ macro adjust_nvar_warn(problem_name, n_orig, n) - helper = GlobalRef(@__MODULE__, :_adjust_nvar_warn_message) return quote local _n_orig = $(esc(n_orig)) local _n = $(esc(n)) - (_n == _n_orig) || @warn($helper($(esc(problem_name)), _n_orig, _n)) + (_n == _n_orig) || @warn(string($(esc(problem_name)), ": number of variables adjusted from ", _n_orig, " to ", _n)) end end diff --git a/test/test-defined-problems.jl b/test/test-defined-problems.jl index acbb28e73..fc2a0b85f 100644 --- a/test/test-defined-problems.jl +++ b/test/test-defined-problems.jl @@ -3,17 +3,17 @@ for s in syms if !isdefined(mod, s) push!(missing, s) - end end +end return (pid = myid(), missing = missing) end probes = @sync begin for pid in workers() @async remotecall_fetch(probe_missing, pid, ADNLPProblems, list_problems_ADNLPProblems) - end -end -@info "ADNLPProblems missing per worker" probes + end + end + @info "ADNLPProblems missing per worker" probes probes = @sync begin for pid in workers() @@ -22,60 +22,64 @@ probes = @sync begin end @info "PureJuMP missing per worker" probes -function _check_adjusted_warning(ctor::Function, expected_msg::AbstractString, expected_nvar::Integer) - nlp = @test_logs (:warn, expected_msg) ctor() - @test nlp.meta.nvar == expected_nvar +function _warning_problems() + probs = Set{Symbol}() + src_root = joinpath(@__DIR__, "..", "src") + + for subdir in ("ADNLPProblems", "PureJuMP") + for file in readdir(joinpath(src_root, subdir)) + endswith(file, ".jl") || continue + source = read(joinpath(src_root, subdir, file), String) + occursin("@adjust_nvar_warn", source) || continue + + stem = Symbol(first(splitext(file))) + if stem in list_problems + push!(probs, stem) + elseif startswith(String(stem), "dixmaan_") + union!(probs, filter(prob -> startswith(String(prob), "dixmaan"), list_problems)) + end + end + end + + return sort!(collect(probs)) end -function _check_adjusted_warning(expected_msg::AbstractString, expected_nvar::Integer, ctor::Function) - _check_adjusted_warning(ctor, expected_msg, expected_nvar) +function _check_adjusted_warning(prob::Symbol, backend::Symbol) + make_model(n) = let + mod = backend === :ad ? ADNLPProblems : + backend === :jump ? PureJuMP : + error("Unknown backend $(backend) for $(prob)") + model = getfield(mod, prob)(; n = n) + backend === :jump ? MathOptNLPModel(model) : model + end + + for n in (1, 2, 3, 4, 5, 9, 10, 26, 99, 100) + nlp_probe = try + make_model(n) + catch + continue + end + + n_adj = nlp_probe.meta.nvar + + n_adj == n && continue + + msg_re = Regex("number of variables adjusted from $(n) to $(n_adj)") + @test_logs (:warn, msg_re) make_model(n) + @test_nowarn make_model(n_adj) + return + end + + @test false end @testset "Adjusted dimension warnings" begin - warning_cases = [ - (; msg = "NZF1: number of variables adjusted from 1 to 26", nvar = 26, ctor = () -> ADNLPProblems.NZF1(n = 1)), - (; msg = "NZF1: number of variables adjusted from 1 to 26", nvar = 26, ctor = () -> MathOptNLPModel(PureJuMP.NZF1(n = 1))), - (; msg = "spmsrtls: number of variables adjusted from 99 to 100", nvar = 100, ctor = () -> ADNLPProblems.spmsrtls(n = 99)), - (; msg = "spmsrtls: number of variables adjusted from 99 to 100", nvar = 100, ctor = () -> MathOptNLPModel(PureJuMP.spmsrtls(n = 99))), - (; msg = "chainwoo: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> ADNLPProblems.chainwoo(n = 1)), - (; msg = "chainwoo: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.chainwoo(n = 1))), - (; msg = "catenary: number of variables adjusted from 10 to 9", nvar = 9, ctor = () -> ADNLPProblems.catenary(n = 10)), - (; msg = "catenary: number of variables adjusted from 10 to 9", nvar = 9, ctor = () -> MathOptNLPModel(PureJuMP.catenary(n = 10))), - (; msg = "clplatea: number of variables adjusted from 5 to 9", nvar = 9, ctor = () -> ADNLPProblems.clplatea(n = 5)), - (; msg = "clplatea: number of variables adjusted from 5 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.clplatea(n = 5))), - (; msg = "clplateb: number of variables adjusted from 5 to 9", nvar = 9, ctor = () -> ADNLPProblems.clplateb(n = 5)), - (; msg = "clplateb: number of variables adjusted from 5 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.clplateb(n = 5))), - (; msg = "clplatec: number of variables adjusted from 5 to 9", nvar = 9, ctor = () -> ADNLPProblems.clplatec(n = 5)), - (; msg = "clplatec: number of variables adjusted from 5 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.clplatec(n = 5))), - (; msg = "fminsrf2: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> ADNLPProblems.fminsrf2(n = 1)), - (; msg = "fminsrf2: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.fminsrf2(n = 1))), - (; msg = "powellsg: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> ADNLPProblems.powellsg(n = 1)), - (; msg = "powellsg: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> ADNLPProblems.powellsg(use_nls = true, n = 1)), - (; msg = "powellsg: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.powellsg(n = 1))), - (; msg = "powellsg: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.powellsg(use_nls = true, n = 1))), - (; msg = "srosenbr: number of variables adjusted from 1 to 2", nvar = 2, ctor = () -> ADNLPProblems.srosenbr(n = 1)), - (; msg = "srosenbr: number of variables adjusted from 1 to 2", nvar = 2, ctor = () -> MathOptNLPModel(PureJuMP.srosenbr(n = 1))), - (; msg = "watson: number of variables adjusted from 1 to 2", nvar = 2, ctor = () -> ADNLPProblems.watson(n = 1)), - (; msg = "watson: number of variables adjusted from 1 to 2", nvar = 2, ctor = () -> ADNLPProblems.watson(use_nls = true, n = 1)), - (; msg = "watson: number of variables adjusted from 1 to 2", nvar = 2, ctor = () -> MathOptNLPModel(PureJuMP.watson(n = 1))), - (; msg = "woods: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> ADNLPProblems.woods(n = 1)), - (; msg = "woods: number of variables adjusted from 1 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.woods(n = 1))), - (; msg = "bearing: number of variables adjusted from 1 to 9", nvar = 9, ctor = () -> ADNLPProblems.bearing(n = 1)), - (; msg = "bearing: number of variables adjusted from 1 to 9", nvar = 9, ctor = () -> MathOptNLPModel(PureJuMP.bearing(n = 1))), - (; msg = "broydn7d: number of variables adjusted from 5 to 4", nvar = 4, ctor = () -> ADNLPProblems.broydn7d(n = 5)), - (; msg = "broydn7d: number of variables adjusted from 5 to 4", nvar = 4, ctor = () -> MathOptNLPModel(PureJuMP.broydn7d(n = 5))), - (; msg = "dixmaan: number of variables adjusted from 1 to 3", nvar = 3, ctor = () -> ADNLPProblems.dixmaane(n = 1)), - (; msg = "dixmaan: number of variables adjusted from 1 to 3", nvar = 3, ctor = () -> MathOptNLPModel(PureJuMP.dixmaane(n = 1))), - (; msg = "dixmaan: number of variables adjusted from 1 to 3", nvar = 3, ctor = () -> ADNLPProblems.dixmaani(n = 1)), - (; msg = "dixmaan: number of variables adjusted from 1 to 3", nvar = 3, ctor = () -> MathOptNLPModel(PureJuMP.dixmaani(n = 1))), - (; msg = "dixmaan: number of variables adjusted from 1 to 3", nvar = 3, ctor = () -> ADNLPProblems.dixmaanm(n = 1)), - (; msg = "dixmaan: number of variables adjusted from 1 to 3", nvar = 3, ctor = () -> MathOptNLPModel(PureJuMP.dixmaanm(n = 1))), - (; msg = "spmsrtls: number of variables adjusted from 99 to 100", nvar = 100, ctor = () -> ADNLPProblems.spmsrtls(use_nls = true, n = 99)), - (; msg = "NZF1: number of variables adjusted from 1 to 26", nvar = 26, ctor = () -> ADNLPProblems.NZF1(use_nls = true, n = 1)), - ] + probs = _warning_problems() + @test !isempty(probs) - for case in warning_cases - _check_adjusted_warning(case.ctor, case.msg, case.nvar) + for prob in probs + isdefined(ADNLPProblems, prob) && _check_adjusted_warning(prob, :ad) + isdefined(PureJuMP, prob) && _check_adjusted_warning(prob, :jump) end end From abd2a674d84a8ee199fc97fe6871aaeda95f838a Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Sun, 26 Apr 2026 02:47:21 +0530 Subject: [PATCH 17/31] format --- src/OptimizationProblems.jl | 4 +++- test/test-defined-problems.jl | 23 ++++++++++++----------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/src/OptimizationProblems.jl b/src/OptimizationProblems.jl index 4479f6f69..1d5a5540d 100644 --- a/src/OptimizationProblems.jl +++ b/src/OptimizationProblems.jl @@ -11,7 +11,9 @@ macro adjust_nvar_warn(problem_name, n_orig, n) return quote local _n_orig = $(esc(n_orig)) local _n = $(esc(n)) - (_n == _n_orig) || @warn(string($(esc(problem_name)), ": number of variables adjusted from ", _n_orig, " to ", _n)) + (_n == _n_orig) || @warn( + string($(esc(problem_name)), ": number of variables adjusted from ", _n_orig, " to ", _n) + ) end end diff --git a/test/test-defined-problems.jl b/test/test-defined-problems.jl index fc2a0b85f..b83f5cc40 100644 --- a/test/test-defined-problems.jl +++ b/test/test-defined-problems.jl @@ -3,17 +3,17 @@ for s in syms if !isdefined(mod, s) push!(missing, s) + end end -end return (pid = myid(), missing = missing) end probes = @sync begin for pid in workers() @async remotecall_fetch(probe_missing, pid, ADNLPProblems, list_problems_ADNLPProblems) - end - end - @info "ADNLPProblems missing per worker" probes + end +end +@info "ADNLPProblems missing per worker" probes probes = @sync begin for pid in workers() @@ -45,13 +45,14 @@ function _warning_problems() end function _check_adjusted_warning(prob::Symbol, backend::Symbol) - make_model(n) = let - mod = backend === :ad ? ADNLPProblems : - backend === :jump ? PureJuMP : - error("Unknown backend $(backend) for $(prob)") - model = getfield(mod, prob)(; n = n) - backend === :jump ? MathOptNLPModel(model) : model - end + make_model(n) = + let + mod = + backend === :ad ? ADNLPProblems : + backend === :jump ? PureJuMP : error("Unknown backend $(backend) for $(prob)") + model = getfield(mod, prob)(; n = n) + backend === :jump ? MathOptNLPModel(model) : model + end for n in (1, 2, 3, 4, 5, 9, 10, 26, 99, 100) nlp_probe = try From 1828bf5dd790475bfcb6ae16ce607962e7c2ec43 Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Thu, 30 Apr 2026 13:55:33 +0530 Subject: [PATCH 18/31] addressing review comments --- test/test-defined-problems.jl | 87 +++++++++++++++++------------------ 1 file changed, 42 insertions(+), 45 deletions(-) diff --git a/test/test-defined-problems.jl b/test/test-defined-problems.jl index b83f5cc40..175cbd29f 100644 --- a/test/test-defined-problems.jl +++ b/test/test-defined-problems.jl @@ -22,65 +22,62 @@ probes = @sync begin end @info "PureJuMP missing per worker" probes -function _warning_problems() - probs = Set{Symbol}() - src_root = joinpath(@__DIR__, "..", "src") +@testset "Adjusted dimension warnings" begin + # Get all scalable problems from the metadata registry + var_probs = OptimizationProblems.meta[OptimizationProblems.meta.variable_nvar, :name] + @test !isempty(var_probs) + # Filter to only problems that actually use the @adjust_nvar_warn macro + src_root = joinpath(@__DIR__, "..", "src") + probs_with_macro = Set{String}() for subdir in ("ADNLPProblems", "PureJuMP") for file in readdir(joinpath(src_root, subdir)) endswith(file, ".jl") || continue source = read(joinpath(src_root, subdir, file), String) - occursin("@adjust_nvar_warn", source) || continue - - stem = Symbol(first(splitext(file))) - if stem in list_problems - push!(probs, stem) - elseif startswith(String(stem), "dixmaan_") - union!(probs, filter(prob -> startswith(String(prob), "dixmaan"), list_problems)) + if occursin("@adjust_nvar_warn", source) + stem = first(splitext(file)) + push!(probs_with_macro, stem) end end end - return sort!(collect(probs)) -end + # Test each problem that uses the macro + for prob_name in sort(collect(probs_with_macro)) + prob_sym = Symbol(prob_name) + + # Check if problem is actually in the registry and scalable + prob_name in var_probs || continue + + get_nvar_func = getfield(OptimizationProblems, Symbol("get_", prob_name, "_nvar")) -function _check_adjusted_warning(prob::Symbol, backend::Symbol) - make_model(n) = - let - mod = - backend === :ad ? ADNLPProblems : - backend === :jump ? PureJuMP : error("Unknown backend $(backend) for $(prob)") - model = getfield(mod, prob)(; n = n) - backend === :jump ? MathOptNLPModel(model) : model - end + # Try standard test dimensions + for n in (50, 100, 150) + n_adjusted = get_nvar_func(; n = n) + n_adjusted == n && continue # Skip if no adjustment for this n - for n in (1, 2, 3, 4, 5, 9, 10, 26, 99, 100) - nlp_probe = try - make_model(n) - catch - continue - end + # Found an adjustment - test it + msg_re = Regex("number of variables adjusted from $(n) to $(n_adjusted)") - n_adj = nlp_probe.meta.nvar + for mod in (ADNLPProblems, PureJuMP) + isdefined(mod, prob_sym) || continue - n_adj == n && continue + constructor = getfield(mod, prob_sym) + + try + # Try to verify the model can be constructed with adjusted size + _ = constructor(; n = n_adjusted) + catch + continue # Skip if construction fails + end - msg_re = Regex("number of variables adjusted from $(n) to $(n_adj)") - @test_logs (:warn, msg_re) make_model(n) - @test_nowarn make_model(n_adj) - return - end - - @test false -end - -@testset "Adjusted dimension warnings" begin - probs = _warning_problems() - @test !isempty(probs) - - for prob in probs - isdefined(ADNLPProblems, prob) && _check_adjusted_warning(prob, :ad) - isdefined(PureJuMP, prob) && _check_adjusted_warning(prob, :jump) + # Test that warning is emitted + @test_logs (:warn, msg_re) constructor(; n = n) + # Test that no warning when using adjusted size + @test_nowarn constructor(; n = n_adjusted) + end + + break # Move to next problem after testing one adjustment + end end end From 25b360a62551727669a1c584c63c5534c14a5866 Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Mon, 4 May 2026 14:44:44 +0530 Subject: [PATCH 19/31] further macro additions --- src/ADNLPProblems/chain.jl | 3 +++ src/ADNLPProblems/channel.jl | 3 +++ src/ADNLPProblems/clnlbeam.jl | 3 +++ src/ADNLPProblems/dixmaan_efgh.jl | 2 +- src/ADNLPProblems/dixmaan_ijkl.jl | 2 +- src/ADNLPProblems/dixmaan_mnop.jl | 2 +- src/ADNLPProblems/elec.jl | 5 ++++- src/ADNLPProblems/hovercraft1d.jl | 10 ++++++++-- src/ADNLPProblems/robotarm.jl | 5 ++++- src/ADNLPProblems/structural.jl | 6 +++++- src/PureJuMP/catmix.jl | 3 +++ src/PureJuMP/chain.jl | 3 +++ src/PureJuMP/channel.jl | 3 +++ src/PureJuMP/clnlbeam.jl | 3 +++ src/PureJuMP/dixmaan_efgh.jl | 2 +- src/PureJuMP/dixmaan_ijkl.jl | 2 +- src/PureJuMP/dixmaan_mnop.jl | 2 +- src/PureJuMP/elec.jl | 5 ++++- src/PureJuMP/gasoil.jl | 3 +++ src/PureJuMP/glider.jl | 3 +++ src/PureJuMP/hovercraft1d.jl | 5 ++++- src/PureJuMP/marine.jl | 5 ++++- src/PureJuMP/methanol.jl | 3 +++ src/PureJuMP/minsurf.jl | 5 ++++- src/PureJuMP/pinene.jl | 3 +++ src/PureJuMP/robotarm.jl | 5 ++++- src/PureJuMP/rocket.jl | 3 +++ src/PureJuMP/steering.jl | 3 +++ src/PureJuMP/structural.jl | 6 +++++- src/PureJuMP/torsion.jl | 5 ++++- 30 files changed, 95 insertions(+), 18 deletions(-) diff --git a/src/ADNLPProblems/chain.jl b/src/ADNLPProblems/chain.jl index f92a59202..5d0db8cb1 100644 --- a/src/ADNLPProblems/chain.jl +++ b/src/ADNLPProblems/chain.jl @@ -1,7 +1,10 @@ export chain function chain(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n nh = max(2, div(n - 4, 4)) + n = 4 * nh + 4 + @adjust_nvar_warn("chain", n_orig, n) L = 4 a = 1 diff --git a/src/ADNLPProblems/channel.jl b/src/ADNLPProblems/channel.jl index 154f6ebd5..718d0bf7f 100644 --- a/src/ADNLPProblems/channel.jl +++ b/src/ADNLPProblems/channel.jl @@ -1,7 +1,10 @@ export channel function channel(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n nh = max(2, div(n, 8)) + n = 8 * nh + @adjust_nvar_warn("channel", n_orig, n) nc = 4 nd = 4 diff --git a/src/ADNLPProblems/clnlbeam.jl b/src/ADNLPProblems/clnlbeam.jl index fc46d8e97..bf25a8b68 100644 --- a/src/ADNLPProblems/clnlbeam.jl +++ b/src/ADNLPProblems/clnlbeam.jl @@ -1,7 +1,10 @@ export clnlbeam function clnlbeam(args...; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n N = div(n - 3, 3) + n = 3 * N + 3 + @adjust_nvar_warn("clnlbeam", n_orig, n) h = 1 // N alpha = 350 function f(y; N = N, h = h, alpha = alpha) diff --git a/src/ADNLPProblems/dixmaan_efgh.jl b/src/ADNLPProblems/dixmaan_efgh.jl index d4ae531e1..b38fb16e9 100644 --- a/src/ADNLPProblems/dixmaan_efgh.jl +++ b/src/ADNLPProblems/dixmaan_efgh.jl @@ -12,7 +12,7 @@ function dixmaane(; n_orig = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaan", n_orig, n) + @adjust_nvar_warn("dixmaane", n_orig, n) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum(i // n * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/dixmaan_ijkl.jl b/src/ADNLPProblems/dixmaan_ijkl.jl index cc464e4e4..d0b5d8476 100644 --- a/src/ADNLPProblems/dixmaan_ijkl.jl +++ b/src/ADNLPProblems/dixmaan_ijkl.jl @@ -12,7 +12,7 @@ function dixmaani(; n_orig = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaan", n_orig, n) + @adjust_nvar_warn("dixmaani", n_orig, n) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum((i // n)^2 * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/dixmaan_mnop.jl b/src/ADNLPProblems/dixmaan_mnop.jl index ba9e9559b..627353c22 100644 --- a/src/ADNLPProblems/dixmaan_mnop.jl +++ b/src/ADNLPProblems/dixmaan_mnop.jl @@ -12,7 +12,7 @@ function dixmaanm(; n_orig = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaan", n_orig, n) + @adjust_nvar_warn("dixmaanm", n_orig, n) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum((i // n)^2 * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/elec.jl b/src/ADNLPProblems/elec.jl index 058d7f771..d319fd5c1 100644 --- a/src/ADNLPProblems/elec.jl +++ b/src/ADNLPProblems/elec.jl @@ -1,7 +1,10 @@ export elec function elec(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n = max(2, div(n, 3)) + n_orig = n + m = max(2, div(n_orig, 3)) + n = 3 * m + @adjust_nvar_warn("elec", n_orig, n) # Define the objective function to minimize function f(x; n = n) return sum( diff --git a/src/ADNLPProblems/hovercraft1d.jl b/src/ADNLPProblems/hovercraft1d.jl index e2e1f30c4..2dfab9bb9 100644 --- a/src/ADNLPProblems/hovercraft1d.jl +++ b/src/ADNLPProblems/hovercraft1d.jl @@ -11,7 +11,10 @@ function hovercraft1d( type::Type{T} = Float64, kwargs..., ) where {T} - N = div(n, 3) + n_orig = n + N = div(n_orig, 3) + n = 3 * N - 1 + @adjust_nvar_warn("hovercraft1d", n_orig, n) function f(y; N = N) @views x, v, u = y[1:N], y[(N + 1):(2 * N)], y[(2 * N + 1):end] return 1 // 2 * sum(u .^ 2) @@ -72,7 +75,10 @@ function hovercraft1d( type::Type{T} = Float64, kwargs..., ) where {T} - N = div(n, 3) + n_orig = n + N = div(n_orig, 3) + n = 3 * N - 1 + @adjust_nvar_warn("hovercraft1d", n_orig, n) function F!(r, y; N = N) @views x, v, u = y[1:N], y[(N + 1):(2 * N)], y[(2 * N + 1):end] r .= u diff --git a/src/ADNLPProblems/robotarm.jl b/src/ADNLPProblems/robotarm.jl index a1264eebd..e1f0d23db 100644 --- a/src/ADNLPProblems/robotarm.jl +++ b/src/ADNLPProblems/robotarm.jl @@ -10,8 +10,11 @@ export robotarm # classification OOR2-AN-V-V function robotarm(; n::Int = default_nvar, L = 4.5, type::Type{T} = Float64, kwargs...) where {T} - N = max(2, div(n, 9)) + n_orig = n + N = max(2, div(n_orig, 9)) n = N + 1 + nvars = 9 * n + 1 + @adjust_nvar_warn("robotarm", n_orig, nvars) L = T(L) # x : vector of variables, of the form : [ρ(t=t1); ρ(t=t2); ... ρ(t=tf), θ(t=t1), ..., then ρ_dot, ..., then ρ_acc, .. ϕ_acc, tf] diff --git a/src/ADNLPProblems/structural.jl b/src/ADNLPProblems/structural.jl index 0793f199b..f39948fdf 100644 --- a/src/ADNLPProblems/structural.jl +++ b/src/ADNLPProblems/structural.jl @@ -1,7 +1,8 @@ export structural function structural(args...; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n = max(n, 100) + n_orig = n + n = max(n_orig, 100) sub2ind(shape, a, b) = LinearIndices(shape)[CartesianIndex.(a, b)] Nx = min(Int(round(n^(1 / 3))), 6) @@ -23,6 +24,9 @@ function structural(args...; n::Int = default_nvar, type::Type{T} = Float64, kwa M = Int(N * (N - 1) / 2) # number of edges + nvars = 2 * M + @adjust_nvar_warn("structural", n_orig, nvars) + # EDGES: columns are the indices of the nodes at either end edges = Array{Int}(zeros(M, 2)) diff --git a/src/PureJuMP/catmix.jl b/src/PureJuMP/catmix.jl index 580f2db7c..bdc68fefc 100644 --- a/src/PureJuMP/catmix.jl +++ b/src/PureJuMP/catmix.jl @@ -6,8 +6,11 @@ export catmix function catmix(args...; n::Int = default_nvar, kwargs...) + n_orig = n ne = 2 nc = 3 + n = 23 * n_orig + 2 + @adjust_nvar_warn("catmix", n_orig, n) tf = 1 h = tf / n # Final time diff --git a/src/PureJuMP/chain.jl b/src/PureJuMP/chain.jl index befb5f6cd..dddc3128d 100644 --- a/src/PureJuMP/chain.jl +++ b/src/PureJuMP/chain.jl @@ -13,7 +13,10 @@ export chain function chain(args...; n::Int = default_nvar, kwargs...) + n_orig = n nh = max(2, div(n - 4, 4)) + n = 4 * nh + 4 + @adjust_nvar_warn("chain", n_orig, n) L = 4 a = 1 diff --git a/src/PureJuMP/channel.jl b/src/PureJuMP/channel.jl index cce634660..8054a121d 100644 --- a/src/PureJuMP/channel.jl +++ b/src/PureJuMP/channel.jl @@ -12,7 +12,10 @@ export channel function channel(args...; n::Int = default_nvar, kwargs...) + n_orig = n nh = max(2, div(n, 8)) + n = 8 * nh + @adjust_nvar_warn("channel", n_orig, n) nc = 4 nd = 4 diff --git a/src/PureJuMP/clnlbeam.jl b/src/PureJuMP/clnlbeam.jl index 3849c7fe8..6cafb82cb 100644 --- a/src/PureJuMP/clnlbeam.jl +++ b/src/PureJuMP/clnlbeam.jl @@ -14,7 +14,10 @@ export clnlbeam "The clnlbeam problem in size `n`" function clnlbeam(args...; n::Int = default_nvar, kwargs...) + n_orig = n N = div(n - 3, 3) + n = 3 * N + 3 + @adjust_nvar_warn("clnlbeam", n_orig, n) h = 1 / N alpha = 350 model = Model() diff --git a/src/PureJuMP/dixmaan_efgh.jl b/src/PureJuMP/dixmaan_efgh.jl index 47c5b5f2e..f25edac80 100644 --- a/src/PureJuMP/dixmaan_efgh.jl +++ b/src/PureJuMP/dixmaan_efgh.jl @@ -36,7 +36,7 @@ function dixmaane( n_orig = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaan", n_orig, n) + @adjust_nvar_warn("dixmaane", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_ijkl.jl b/src/PureJuMP/dixmaan_ijkl.jl index 004e62f2e..e4f22ed05 100644 --- a/src/PureJuMP/dixmaan_ijkl.jl +++ b/src/PureJuMP/dixmaan_ijkl.jl @@ -36,7 +36,7 @@ function dixmaani( n_orig = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaan", n_orig, n) + @adjust_nvar_warn("dixmaani", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_mnop.jl b/src/PureJuMP/dixmaan_mnop.jl index 4ac9bf7c4..0c676cc59 100644 --- a/src/PureJuMP/dixmaan_mnop.jl +++ b/src/PureJuMP/dixmaan_mnop.jl @@ -34,7 +34,7 @@ function dixmaanm( n_orig = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaan", n_orig, n) + @adjust_nvar_warn("dixmaanm", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/elec.jl b/src/PureJuMP/elec.jl index a1f0ca3db..1a9155327 100644 --- a/src/PureJuMP/elec.jl +++ b/src/PureJuMP/elec.jl @@ -11,7 +11,10 @@ export elec function elec(args...; n::Int = default_nvar, kwargs...) - n = max(2, div(n, 3)) + n_orig = n + m = max(2, div(n_orig, 3)) + n = 3 * m + @adjust_nvar_warn("elec", n_orig, n) nlp = Model() diff --git a/src/PureJuMP/gasoil.jl b/src/PureJuMP/gasoil.jl index c39531f3c..cbd55c83e 100644 --- a/src/PureJuMP/gasoil.jl +++ b/src/PureJuMP/gasoil.jl @@ -8,7 +8,10 @@ export gasoil function gasoil(; n::Int = default_nvar, kwargs...) + n_orig = n nc = 4 # number of collocation points + n = 26 * n_orig + 3 + @adjust_nvar_warn("gasoil", n_orig, n) ne = 2 # number of differential equations np = 3 # number of ODE parameters nm = 21 # number of measurements diff --git a/src/PureJuMP/glider.jl b/src/PureJuMP/glider.jl index 9d217b716..0159130d9 100644 --- a/src/PureJuMP/glider.jl +++ b/src/PureJuMP/glider.jl @@ -8,7 +8,10 @@ export glider function glider(; n::Int = default_nvar, kwargs...) + n_orig = n # Design parameters + n = 5 * n_orig + 6 + @adjust_nvar_warn("glider", n_orig, n) x_0 = 0.0 y_0 = 1000.0 y_f = 900.0 diff --git a/src/PureJuMP/hovercraft1d.jl b/src/PureJuMP/hovercraft1d.jl index 0ddce6c3f..f98eacb56 100644 --- a/src/PureJuMP/hovercraft1d.jl +++ b/src/PureJuMP/hovercraft1d.jl @@ -6,9 +6,12 @@ export hovercraft1d function hovercraft1d(args...; n::Int = default_nvar, kwargs...) + n_orig = n nlp = Model() - T = div(n, 3) # length of time horizon + T = div(n_orig, 3) # length of time horizon + n = 3 * T - 1 + @adjust_nvar_warn("hovercraft1d", n_orig, n) @variable(nlp, x[1:T]) # resulting position @variable(nlp, v[1:T]) # resulting velocity @variable(nlp, u[1:(T - 1)]) # thruster input diff --git a/src/PureJuMP/marine.jl b/src/PureJuMP/marine.jl index afd07b22d..462d775a7 100644 --- a/src/PureJuMP/marine.jl +++ b/src/PureJuMP/marine.jl @@ -18,14 +18,17 @@ export marine function marine(args...; n::Int = default_nvar, nc::Int = 1, kwargs...) + n_orig = n nlp = Model() nc = max(min(nc, 4), 1) # number of collocation points ne = 8 # number of differential equations nm = 21 # number of measurements - n = max(n, 3 * ne * nc + ne + 2 * ne) + n = max(n_orig, 3 * ne * nc + ne + 2 * ne) nh = Int(round((n - 2 * ne + 1) / (3 * ne * nc + ne))) # number of partition intervals + n = 8 + 7 + nh * (8 + 3 * 8 * nc) + @adjust_nvar_warn("marine", n_orig, n) # roots of k-th degree Legendre polynomial rho = if nc == 1 diff --git a/src/PureJuMP/methanol.jl b/src/PureJuMP/methanol.jl index f27f055ca..81223f054 100644 --- a/src/PureJuMP/methanol.jl +++ b/src/PureJuMP/methanol.jl @@ -8,6 +8,7 @@ export methanol function methanol(args...; n::Int = default_nvar, kwargs...) + n_orig = n ne = 3 np = 5 nc = 3 @@ -35,6 +36,8 @@ function methanol(args...; n::Int = default_nvar, kwargs...) 1.122, ] tf = tau[nm] # ODEs defined in [0,tf] + n = 30 * n_orig + 5 + @adjust_nvar_warn("methanol", n_orig, n) h = tf / n # uniform interval length t = [(i-1)*h for i = 1:(n + 1)] # partition fact = [factorial(k) for k = 0:nc] diff --git a/src/PureJuMP/minsurf.jl b/src/PureJuMP/minsurf.jl index a50e97f15..f755d60bf 100644 --- a/src/PureJuMP/minsurf.jl +++ b/src/PureJuMP/minsurf.jl @@ -12,10 +12,13 @@ export minsurf function minsurf(args...; n = default_nvar, kwargs...) + n_orig = n # number of variables is (nx + 2) x (ny + 2) if !((:nx in keys(kwargs)) & (:ny in keys(kwargs))) - nx, ny = Int(round(sqrt(max(1, n - 2)))), Int(round(sqrt(max(1, n - 2)))) + nx, ny = Int(round(sqrt(max(1, n_orig - 2)))), Int(round(sqrt(max(1, n_orig - 2)))) end + n = (nx + 2) * (ny + 2) + @adjust_nvar_warn("minsurf", n_orig, n) x_mesh = LinRange(0, 1, nx + 2) # coordinates of the mesh points x v0 = zeros(nx + 2, ny + 2) # Surface matrix initialization diff --git a/src/PureJuMP/pinene.jl b/src/PureJuMP/pinene.jl index 347fa83d5..a89942765 100644 --- a/src/PureJuMP/pinene.jl +++ b/src/PureJuMP/pinene.jl @@ -13,6 +13,7 @@ export pinene function pinene(; n::Int = default_nvar, kwargs...) + n_orig = n nc = 3 # number of collocation points ne = 5 # number of differential equations np = 5 # number of ODE parameters @@ -25,6 +26,8 @@ function pinene(; n::Int = default_nvar, kwargs...) # times at which observations made tau = [1230.0, 3060.0, 4920.0, 7800.0, 10680.0, 15030.0, 22620.0, 36420.0] tf = tau[nm] # ODEs defined in [0,tf] + n = 50 * n_orig + 5 + @adjust_nvar_warn("pinene", n_orig, n) h = tf / n # uniform interval length t = [(i-1)*h for i = 1:(n + 1)] # partition diff --git a/src/PureJuMP/robotarm.jl b/src/PureJuMP/robotarm.jl index b6252bd90..950187b4a 100644 --- a/src/PureJuMP/robotarm.jl +++ b/src/PureJuMP/robotarm.jl @@ -14,8 +14,11 @@ export robotarm function robotarm(; n::Int = default_nvar, L = 4.5, kwargs...) - N = max(2, div(n, 9)) + n_orig = n + N = max(2, div(n_orig, 9)) n = N + 1 + nvars = 9 * n + 1 + @adjust_nvar_warn("robotarm", n_orig, nvars) nlp = Model() diff --git a/src/PureJuMP/rocket.jl b/src/PureJuMP/rocket.jl index 56ab5a499..c824cdeca 100644 --- a/src/PureJuMP/rocket.jl +++ b/src/PureJuMP/rocket.jl @@ -7,7 +7,10 @@ export rocket function rocket(; n::Int = default_nvar, kwargs...) + n_orig = n h_0 = 1.0 + nvars = 4 * n_orig + 5 + @adjust_nvar_warn("rocket", n_orig, nvars) v_0 = 0.0 m_0 = 1.0 g_0 = 1.0 diff --git a/src/PureJuMP/steering.jl b/src/PureJuMP/steering.jl index 353be640b..9a72508a0 100644 --- a/src/PureJuMP/steering.jl +++ b/src/PureJuMP/steering.jl @@ -7,7 +7,10 @@ export steering function steering(; n::Int = default_nvar, kwargs...) + n_orig = n a = 100.0 # Magnitude of force. + nvars = 5 * n_orig + 6 + @adjust_nvar_warn("steering", n_orig, nvars) # Bounds on the control u_min, u_max = -pi/2.0, pi/2.0 xs = zeros(4) diff --git a/src/PureJuMP/structural.jl b/src/PureJuMP/structural.jl index 65ba54d47..4bfbb8184 100644 --- a/src/PureJuMP/structural.jl +++ b/src/PureJuMP/structural.jl @@ -6,7 +6,8 @@ export structural function structural(args...; n::Int = default_nvar, kwargs...) - n = max(n, 100) + n_orig = n + n = max(n_orig, 100) sub2ind(shape, a, b) = LinearIndices(shape)[CartesianIndex.(a, b)] Nx = min(Int(round(n^(1 / 3))), 6) @@ -29,6 +30,9 @@ function structural(args...; n::Int = default_nvar, kwargs...) M = Int(N * (N - 1) / 2) # number of edges + nvars = 2 * M + @adjust_nvar_warn("structural", n_orig, nvars) + # EDGES: columns are the indices of the nodes at either end edges = Array{Int}(zeros(M, 2)) diff --git a/src/PureJuMP/torsion.jl b/src/PureJuMP/torsion.jl index 926d1e456..8fee5c164 100644 --- a/src/PureJuMP/torsion.jl +++ b/src/PureJuMP/torsion.jl @@ -7,9 +7,12 @@ export torsion function torsion(args...; n = default_nvar, kwargs...) # number of variables is (nx + 1) x (ny + 1) + n_orig = n if !((:nx in keys(kwargs)) & (:ny in keys(kwargs))) - nx, ny = Int(round(sqrt(max(1, n - 2)))), Int(round(sqrt(max(1, n - 2)))) + nx, ny = Int(round(sqrt(max(1, n_orig - 2)))), Int(round(sqrt(max(1, n_orig - 2)))) end + nvars = (nx + 2) * (ny + 2) + @adjust_nvar_warn("torsion", n_orig, nvars) c = 5.0 hx = 1.0 / (nx + 1.0) # grid spacing hy = 1.0 / (ny + 1.0) # grid spacing From 1858c62793ca867db3facee4ea3fe0c2514bbd9d Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Mon, 4 May 2026 15:10:13 +0530 Subject: [PATCH 20/31] addressing review comments --- test/test-defined-problems.jl | 37 +++-------------------------------- 1 file changed, 3 insertions(+), 34 deletions(-) diff --git a/test/test-defined-problems.jl b/test/test-defined-problems.jl index 175cbd29f..4db6dbdd0 100644 --- a/test/test-defined-problems.jl +++ b/test/test-defined-problems.jl @@ -23,60 +23,29 @@ end @info "PureJuMP missing per worker" probes @testset "Adjusted dimension warnings" begin - # Get all scalable problems from the metadata registry var_probs = OptimizationProblems.meta[OptimizationProblems.meta.variable_nvar, :name] @test !isempty(var_probs) - # Filter to only problems that actually use the @adjust_nvar_warn macro - src_root = joinpath(@__DIR__, "..", "src") - probs_with_macro = Set{String}() - for subdir in ("ADNLPProblems", "PureJuMP") - for file in readdir(joinpath(src_root, subdir)) - endswith(file, ".jl") || continue - source = read(joinpath(src_root, subdir, file), String) - if occursin("@adjust_nvar_warn", source) - stem = first(splitext(file)) - push!(probs_with_macro, stem) - end - end - end - - # Test each problem that uses the macro - for prob_name in sort(collect(probs_with_macro)) + for prob_name in var_probs prob_sym = Symbol(prob_name) - # Check if problem is actually in the registry and scalable - prob_name in var_probs || continue - get_nvar_func = getfield(OptimizationProblems, Symbol("get_", prob_name, "_nvar")) - # Try standard test dimensions for n in (50, 100, 150) n_adjusted = get_nvar_func(; n = n) n_adjusted == n && continue # Skip if no adjustment for this n - # Found an adjustment - test it msg_re = Regex("number of variables adjusted from $(n) to $(n_adjusted)") for mod in (ADNLPProblems, PureJuMP) isdefined(mod, prob_sym) || continue constructor = getfield(mod, prob_sym) - - try - # Try to verify the model can be constructed with adjusted size - _ = constructor(; n = n_adjusted) - catch - continue # Skip if construction fails - end - # Test that warning is emitted - @test_logs (:warn, msg_re) constructor(; n = n) - # Test that no warning when using adjusted size - @test_nowarn constructor(; n = n_adjusted) + @test_logs (:warn, msg_re) constructor(; n = n) end - break # Move to next problem after testing one adjustment + break end end end From 528a616fa3668f347718a99ff406c630d7905da7 Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Mon, 4 May 2026 15:49:32 +0530 Subject: [PATCH 21/31] missing macro --- src/ADNLPProblems/marine.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/ADNLPProblems/marine.jl b/src/ADNLPProblems/marine.jl index aa96414d9..0ddcdefc0 100644 --- a/src/ADNLPProblems/marine.jl +++ b/src/ADNLPProblems/marine.jl @@ -1,12 +1,15 @@ export marine function marine(; n::Int = default_nvar, nc::Int = 1, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n nc = max(min(nc, 4), 1) # number of collocation points ne = 8 # number of differential equations nm = 21 # number of measurements n = max(n, 3 * ne * nc + ne + 2 * ne) nh = Int(round((n - 2 * ne + 1) / (3 * ne * nc + ne))) # number of partition intervals + n = 8 + 7 + nh * (8 + 3 * 8 * nc) + @adjust_nvar_warn("marine", n_orig, n) # roots of k-th degree Legendre polynomial rho = if nc == 1 From f60efc25b69ca8de7bdb877bbb15221665379559 Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Tue, 5 May 2026 04:45:28 +0530 Subject: [PATCH 22/31] fixing failing checks --- src/ADNLPProblems/elec.jl | 20 ++++++++++---------- src/PureJuMP/catmix.jl | 4 ++-- src/PureJuMP/elec.jl | 18 +++++++++--------- src/PureJuMP/gasoil.jl | 4 ++-- src/PureJuMP/glider.jl | 4 ++-- src/PureJuMP/methanol.jl | 4 ++-- src/PureJuMP/pinene.jl | 4 ++-- 7 files changed, 29 insertions(+), 29 deletions(-) diff --git a/src/ADNLPProblems/elec.jl b/src/ADNLPProblems/elec.jl index d319fd5c1..11a6a30e3 100644 --- a/src/ADNLPProblems/elec.jl +++ b/src/ADNLPProblems/elec.jl @@ -9,31 +9,31 @@ function elec(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where function f(x; n = n) return sum( sum( - 1 / sqrt((x[j] - x[i])^2 + (x[n + j] - x[n + i])^2 + (x[2n + j] - x[2n + i])^2) for - j = (i + 1):n - ) for i = 1:(n - 1) + 1 / sqrt((x[j] - x[i])^2 + (x[m + j] - x[m + i])^2 + (x[2m + j] - x[2m + i])^2) for + j = (i + 1):m + ) for i = 1:(m - 1) ) end # Define the constraints on these points (sum of the square of the coordinates = 1) function c!(cx, x; n = n) - for k = 1:n - cx[k] = x[k]^2 + x[n + k]^2 + x[2n + k]^2 + for k = 1:m + cx[k] = x[k]^2 + x[m + k]^2 + x[2m + k]^2 end return cx end # bounds on the constraints - lcon = ucon = ones(T, n) + lcon = ucon = ones(T, m) # building a feasible x0 - range0 = T[i / n for i = 1:n] + range0 = T[i / m for i = 1:m] θ0 = 2π .* range0 ϕ0 = π .* range0 - xini = T[sin(θ0[i]) * cos(ϕ0[i]) for i = 1:n] # x coordinate - yini = T[sin(θ0[i]) * sin(ϕ0[i]) for i = 1:n] # y coordinate - zini = T[cos(θ0[i]) for i = 1:n] # z coordinate + xini = T[sin(θ0[i]) * cos(ϕ0[i]) for i = 1:m] # x coordinate + yini = T[sin(θ0[i]) * sin(ϕ0[i]) for i = 1:m] # y coordinate + zini = T[cos(θ0[i]) for i = 1:m] # z coordinate x0 = [xini; yini; zini] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "elec"; kwargs...) diff --git a/src/PureJuMP/catmix.jl b/src/PureJuMP/catmix.jl index bdc68fefc..ca118b4d8 100644 --- a/src/PureJuMP/catmix.jl +++ b/src/PureJuMP/catmix.jl @@ -9,8 +9,8 @@ function catmix(args...; n::Int = default_nvar, kwargs...) n_orig = n ne = 2 nc = 3 - n = 23 * n_orig + 2 - @adjust_nvar_warn("catmix", n_orig, n) + n = n_orig + @adjust_nvar_warn("catmix", n_orig, 23 * n + 2) tf = 1 h = tf / n # Final time diff --git a/src/PureJuMP/elec.jl b/src/PureJuMP/elec.jl index 1a9155327..0d7f690ae 100644 --- a/src/PureJuMP/elec.jl +++ b/src/PureJuMP/elec.jl @@ -18,29 +18,29 @@ function elec(args...; n::Int = default_nvar, kwargs...) nlp = Model() - range0 = [i / n for i = 1:n] + range0 = [i / m for i = 1:m] θ0 = 2π .* range0 ϕ0 = π .* range0 - xini = [sin(θ0[i]) * cos(ϕ0[i]) for i = 1:n] # x coordinate - yini = [sin(θ0[i]) * sin(ϕ0[i]) for i = 1:n] # y coordinate - zini = [cos(θ0[i]) for i = 1:n] # z coordinate + xini = [sin(θ0[i]) * cos(ϕ0[i]) for i = 1:m] # x coordinate + yini = [sin(θ0[i]) * sin(ϕ0[i]) for i = 1:m] # y coordinate + zini = [cos(θ0[i]) for i = 1:m] # z coordinate x0 = [xini; yini; zini] - @variable(nlp, x[i = 1:(3n)], start = x0[i]) + @variable(nlp, x[i = 1:(3m)], start = x0[i]) @objective( nlp, Min, sum( sum( - 1 / sqrt((x[j] - x[i])^2 + (x[n + j] - x[n + i])^2 + (x[2n + j] - x[2n + i])^2) for - j = (i + 1):n - ) for i = 1:(n - 1) + 1 / sqrt((x[j] - x[i])^2 + (x[m + j] - x[m + i])^2 + (x[2m + j] - x[2m + i])^2) for + j = (i + 1):m + ) for i = 1:(m - 1) ) ) - @constraint(nlp, [k = 1:n], x[k]^2 + x[n + k]^2 + x[2n + k]^2 == 1) + @constraint(nlp, [k = 1:m], x[k]^2 + x[m + k]^2 + x[2m + k]^2 == 1) return nlp end diff --git a/src/PureJuMP/gasoil.jl b/src/PureJuMP/gasoil.jl index cbd55c83e..577fa93f8 100644 --- a/src/PureJuMP/gasoil.jl +++ b/src/PureJuMP/gasoil.jl @@ -10,8 +10,8 @@ export gasoil function gasoil(; n::Int = default_nvar, kwargs...) n_orig = n nc = 4 # number of collocation points - n = 26 * n_orig + 3 - @adjust_nvar_warn("gasoil", n_orig, n) + n = n_orig + @adjust_nvar_warn("gasoil", n_orig, 26 * n + 3) ne = 2 # number of differential equations np = 3 # number of ODE parameters nm = 21 # number of measurements diff --git a/src/PureJuMP/glider.jl b/src/PureJuMP/glider.jl index 0159130d9..af1d0c3cf 100644 --- a/src/PureJuMP/glider.jl +++ b/src/PureJuMP/glider.jl @@ -10,8 +10,8 @@ export glider function glider(; n::Int = default_nvar, kwargs...) n_orig = n # Design parameters - n = 5 * n_orig + 6 - @adjust_nvar_warn("glider", n_orig, n) + n = n_orig + @adjust_nvar_warn("glider", n_orig, 5 * n + 6) x_0 = 0.0 y_0 = 1000.0 y_f = 900.0 diff --git a/src/PureJuMP/methanol.jl b/src/PureJuMP/methanol.jl index 81223f054..2e70e2309 100644 --- a/src/PureJuMP/methanol.jl +++ b/src/PureJuMP/methanol.jl @@ -36,8 +36,8 @@ function methanol(args...; n::Int = default_nvar, kwargs...) 1.122, ] tf = tau[nm] # ODEs defined in [0,tf] - n = 30 * n_orig + 5 - @adjust_nvar_warn("methanol", n_orig, n) + n = n_orig + @adjust_nvar_warn("methanol", n_orig, 30 * n + 5) h = tf / n # uniform interval length t = [(i-1)*h for i = 1:(n + 1)] # partition fact = [factorial(k) for k = 0:nc] diff --git a/src/PureJuMP/pinene.jl b/src/PureJuMP/pinene.jl index a89942765..314813113 100644 --- a/src/PureJuMP/pinene.jl +++ b/src/PureJuMP/pinene.jl @@ -26,8 +26,8 @@ function pinene(; n::Int = default_nvar, kwargs...) # times at which observations made tau = [1230.0, 3060.0, 4920.0, 7800.0, 10680.0, 15030.0, 22620.0, 36420.0] tf = tau[nm] # ODEs defined in [0,tf] - n = 50 * n_orig + 5 - @adjust_nvar_warn("pinene", n_orig, n) + n = n_orig + @adjust_nvar_warn("pinene", n_orig, 50 * n + 5) h = tf / n # uniform interval length t = [(i-1)*h for i = 1:(n + 1)] # partition From 71efe31cda9ff3021401ca56f56d96c28535a2af Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Sat, 9 May 2026 07:16:19 +0530 Subject: [PATCH 23/31] reverting elec.jl to the version before the dimension warnings were added. The changes in this commit are to fix the dimension warnings that were introduced in the previous commit. The changes include changing the bounds on the constraints, building a feasible x0, and changing the number of variables and constraints in the model. The changes are made in both the ADNLPProblems and PureJuMP versions of elec.jl. --- src/ADNLPProblems/elec.jl | 20 ++++++++++---------- src/PureJuMP/elec.jl | 18 +++++++++--------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/ADNLPProblems/elec.jl b/src/ADNLPProblems/elec.jl index 11a6a30e3..133111007 100644 --- a/src/ADNLPProblems/elec.jl +++ b/src/ADNLPProblems/elec.jl @@ -9,31 +9,31 @@ function elec(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where function f(x; n = n) return sum( sum( - 1 / sqrt((x[j] - x[i])^2 + (x[m + j] - x[m + i])^2 + (x[2m + j] - x[2m + i])^2) for - j = (i + 1):m - ) for i = 1:(m - 1) + 1 / sqrt((x[j] - x[i])^2 + (x[n + j] - x[n + i])^2 + (x[2n + j] - x[2n + i])^2) for + j = (i + 1):n + ) for i = 1:(n - 1) ) end # Define the constraints on these points (sum of the square of the coordinates = 1) function c!(cx, x; n = n) - for k = 1:m - cx[k] = x[k]^2 + x[m + k]^2 + x[2m + k]^2 + for k = 1:n + cx[k] = x[k]^2 + x[n + k]^2 + x[2n + k]^2 end return cx end # bounds on the constraints - lcon = ucon = ones(T, m) + lcon = ucon = ones(T, n) # building a feasible x0 - range0 = T[i / m for i = 1:m] + range0 = T[i / n for i = 1:n] θ0 = 2π .* range0 ϕ0 = π .* range0 - xini = T[sin(θ0[i]) * cos(ϕ0[i]) for i = 1:m] # x coordinate - yini = T[sin(θ0[i]) * sin(ϕ0[i]) for i = 1:m] # y coordinate - zini = T[cos(θ0[i]) for i = 1:m] # z coordinate + xini = T[sin(θ0[i]) * cos(ϕ0[i]) for i = 1:n] # x coordinate + yini = T[sin(θ0[i]) * sin(ϕ0[i]) for i = 1:n] # y coordinate + zini = T[cos(θ0[i]) for i = 1:n] # z coordinate x0 = [xini; yini; zini] return ADNLPModels.ADNLPModel!(f, x0, c!, lcon, ucon, name = "elec"; kwargs...) diff --git a/src/PureJuMP/elec.jl b/src/PureJuMP/elec.jl index 0d7f690ae..1a9155327 100644 --- a/src/PureJuMP/elec.jl +++ b/src/PureJuMP/elec.jl @@ -18,29 +18,29 @@ function elec(args...; n::Int = default_nvar, kwargs...) nlp = Model() - range0 = [i / m for i = 1:m] + range0 = [i / n for i = 1:n] θ0 = 2π .* range0 ϕ0 = π .* range0 - xini = [sin(θ0[i]) * cos(ϕ0[i]) for i = 1:m] # x coordinate - yini = [sin(θ0[i]) * sin(ϕ0[i]) for i = 1:m] # y coordinate - zini = [cos(θ0[i]) for i = 1:m] # z coordinate + xini = [sin(θ0[i]) * cos(ϕ0[i]) for i = 1:n] # x coordinate + yini = [sin(θ0[i]) * sin(ϕ0[i]) for i = 1:n] # y coordinate + zini = [cos(θ0[i]) for i = 1:n] # z coordinate x0 = [xini; yini; zini] - @variable(nlp, x[i = 1:(3m)], start = x0[i]) + @variable(nlp, x[i = 1:(3n)], start = x0[i]) @objective( nlp, Min, sum( sum( - 1 / sqrt((x[j] - x[i])^2 + (x[m + j] - x[m + i])^2 + (x[2m + j] - x[2m + i])^2) for - j = (i + 1):m - ) for i = 1:(m - 1) + 1 / sqrt((x[j] - x[i])^2 + (x[n + j] - x[n + i])^2 + (x[2n + j] - x[2n + i])^2) for + j = (i + 1):n + ) for i = 1:(n - 1) ) ) - @constraint(nlp, [k = 1:m], x[k]^2 + x[m + k]^2 + x[2m + k]^2 == 1) + @constraint(nlp, [k = 1:n], x[k]^2 + x[n + k]^2 + x[2n + k]^2 == 1) return nlp end From dd9d82199310fbe0c5c129bf3adce41b648478e9 Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Sat, 9 May 2026 07:51:36 +0530 Subject: [PATCH 24/31] passing elec.jl macro --- src/ADNLPProblems/elec.jl | 4 ++-- src/PureJuMP/elec.jl | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/ADNLPProblems/elec.jl b/src/ADNLPProblems/elec.jl index 133111007..b439f7e88 100644 --- a/src/ADNLPProblems/elec.jl +++ b/src/ADNLPProblems/elec.jl @@ -3,8 +3,8 @@ export elec function elec(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} n_orig = n m = max(2, div(n_orig, 3)) - n = 3 * m - @adjust_nvar_warn("elec", n_orig, n) + n = m + @adjust_nvar_warn("elec", n_orig, 3 * n) # Define the objective function to minimize function f(x; n = n) return sum( diff --git a/src/PureJuMP/elec.jl b/src/PureJuMP/elec.jl index 1a9155327..ce9a88e84 100644 --- a/src/PureJuMP/elec.jl +++ b/src/PureJuMP/elec.jl @@ -13,8 +13,8 @@ export elec function elec(args...; n::Int = default_nvar, kwargs...) n_orig = n m = max(2, div(n_orig, 3)) - n = 3 * m - @adjust_nvar_warn("elec", n_orig, n) + n = m + @adjust_nvar_warn("elec", n_orig, 3 * n) nlp = Model() From 0a149f2f65c0c0595a32b16c44a93eefc8be2d4f Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Sat, 9 May 2026 13:25:58 +0530 Subject: [PATCH 25/31] Update elec.jl --- src/ADNLPProblems/elec.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ADNLPProblems/elec.jl b/src/ADNLPProblems/elec.jl index b439f7e88..3aba48d8c 100644 --- a/src/ADNLPProblems/elec.jl +++ b/src/ADNLPProblems/elec.jl @@ -17,8 +17,8 @@ function elec(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where # Define the constraints on these points (sum of the square of the coordinates = 1) function c!(cx, x; n = n) - for k = 1:n - cx[k] = x[k]^2 + x[n + k]^2 + x[2n + k]^2 + for k = 1:n + cx[k] = x[k]^2 + x[n + k]^2 + x[2n + k]^2 end return cx end From 26c75d031ff83d7e9ac9722d780c1fe3b845eb43 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Sun, 10 May 2026 20:57:57 +0530 Subject: [PATCH 26/31] Apply suggestions from code review Co-authored-by: Tangi Migot --- src/PureJuMP/rocket.jl | 4 +--- src/PureJuMP/steering.jl | 4 +--- src/PureJuMP/torsion.jl | 5 ++--- test/test-defined-problems.jl | 7 ++----- 4 files changed, 6 insertions(+), 14 deletions(-) diff --git a/src/PureJuMP/rocket.jl b/src/PureJuMP/rocket.jl index c824cdeca..0d71255ee 100644 --- a/src/PureJuMP/rocket.jl +++ b/src/PureJuMP/rocket.jl @@ -7,10 +7,8 @@ export rocket function rocket(; n::Int = default_nvar, kwargs...) - n_orig = n h_0 = 1.0 - nvars = 4 * n_orig + 5 - @adjust_nvar_warn("rocket", n_orig, nvars) + @adjust_nvar_warn("rocket", n, 4 * n + 5) v_0 = 0.0 m_0 = 1.0 g_0 = 1.0 diff --git a/src/PureJuMP/steering.jl b/src/PureJuMP/steering.jl index 9a72508a0..a9d9e5c1d 100644 --- a/src/PureJuMP/steering.jl +++ b/src/PureJuMP/steering.jl @@ -7,10 +7,8 @@ export steering function steering(; n::Int = default_nvar, kwargs...) - n_orig = n a = 100.0 # Magnitude of force. - nvars = 5 * n_orig + 6 - @adjust_nvar_warn("steering", n_orig, nvars) + @adjust_nvar_warn("steering", n, 5 * n_orig + 6) # Bounds on the control u_min, u_max = -pi/2.0, pi/2.0 xs = zeros(4) diff --git a/src/PureJuMP/torsion.jl b/src/PureJuMP/torsion.jl index 8fee5c164..19d175ee3 100644 --- a/src/PureJuMP/torsion.jl +++ b/src/PureJuMP/torsion.jl @@ -9,10 +9,9 @@ function torsion(args...; n = default_nvar, kwargs...) # number of variables is (nx + 1) x (ny + 1) n_orig = n if !((:nx in keys(kwargs)) & (:ny in keys(kwargs))) - nx, ny = Int(round(sqrt(max(1, n_orig - 2)))), Int(round(sqrt(max(1, n_orig - 2)))) + nx, ny = Int(round(sqrt(max(1, n - 2)))), Int(round(sqrt(max(1, n - 2)))) end - nvars = (nx + 2) * (ny + 2) - @adjust_nvar_warn("torsion", n_orig, nvars) + @adjust_nvar_warn("torsion", n_orig, (nx + 2) * (ny + 2)) c = 5.0 hx = 1.0 / (nx + 1.0) # grid spacing hy = 1.0 / (ny + 1.0) # grid spacing diff --git a/test/test-defined-problems.jl b/test/test-defined-problems.jl index 4db6dbdd0..c99ca714e 100644 --- a/test/test-defined-problems.jl +++ b/test/test-defined-problems.jl @@ -24,14 +24,13 @@ end @testset "Adjusted dimension warnings" begin var_probs = OptimizationProblems.meta[OptimizationProblems.meta.variable_nvar, :name] - @test !isempty(var_probs) for prob_name in var_probs prob_sym = Symbol(prob_name) get_nvar_func = getfield(OptimizationProblems, Symbol("get_", prob_name, "_nvar")) - for n in (50, 100, 150) + for n in (50, 100) n_adjusted = get_nvar_func(; n = n) n_adjusted == n && continue # Skip if no adjustment for this n @@ -39,10 +38,8 @@ end for mod in (ADNLPProblems, PureJuMP) isdefined(mod, prob_sym) || continue - constructor = getfield(mod, prob_sym) - - @test_logs (:warn, msg_re) constructor(; n = n) + @test_logs (:warn, msg_re) constructor(; n = n) end break From f618ef1c423419d731aff92525abcafc231fe55f Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Sun, 10 May 2026 15:54:37 +0530 Subject: [PATCH 27/31] ADNLProblems changes --- src/ADNLPProblems/NZF1.jl | 6 ++---- src/ADNLPProblems/bearing.jl | 4 +--- src/ADNLPProblems/broydn7d.jl | 3 +-- src/ADNLPProblems/catenary.jl | 3 +-- src/ADNLPProblems/chain.jl | 4 +--- src/ADNLPProblems/chainwoo.jl | 6 ++---- src/ADNLPProblems/channel.jl | 4 +--- src/ADNLPProblems/clnlbeam.jl | 4 +--- src/ADNLPProblems/clplatea.jl | 3 +-- src/ADNLPProblems/clplateb.jl | 3 +-- src/ADNLPProblems/clplatec.jl | 3 +-- src/ADNLPProblems/dixmaan_efgh.jl | 3 +-- src/ADNLPProblems/dixmaan_ijkl.jl | 3 +-- src/ADNLPProblems/dixmaan_mnop.jl | 3 +-- src/ADNLPProblems/elec.jl | 6 ++---- src/ADNLPProblems/fminsrf2.jl | 4 ++-- src/ADNLPProblems/hovercraft1d.jl | 12 ++++-------- src/ADNLPProblems/marine.jl | 4 +--- src/ADNLPProblems/powellsg.jl | 10 +++------- src/ADNLPProblems/robotarm.jl | 6 ++---- src/ADNLPProblems/spmsrtls.jl | 6 ++---- src/ADNLPProblems/srosenbr.jl | 3 +-- src/ADNLPProblems/structural.jl | 7 ++----- src/ADNLPProblems/watson.jl | 6 ++---- src/ADNLPProblems/woods.jl | 3 +-- src/PureJuMP/catmix.jl | 3 +-- src/PureJuMP/elec.jl | 3 +-- src/PureJuMP/gasoil.jl | 3 +-- src/PureJuMP/glider.jl | 4 ++-- src/PureJuMP/pinene.jl | 4 ++-- 30 files changed, 45 insertions(+), 91 deletions(-) diff --git a/src/ADNLPProblems/NZF1.jl b/src/ADNLPProblems/NZF1.jl index 5edd35f6f..a7b3fab60 100644 --- a/src/ADNLPProblems/NZF1.jl +++ b/src/ADNLPProblems/NZF1.jl @@ -6,10 +6,9 @@ function NZF1(; use_nls::Bool = false, kwargs...) end function NZF1(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n nbis = max(2, div(n, 13)) n = 13 * nbis - @adjust_nvar_warn("NZF1", n_orig, n) + @adjust_nvar_warn("NZF1", n, n) l = div(n, 13) function f(x; l = l) return sum( @@ -31,10 +30,9 @@ function NZF1(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwarg end function NZF1(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n nbis = max(2, div(n, 13)) n = 13 * nbis - @adjust_nvar_warn("NZF1", n_orig, n) + @adjust_nvar_warn("NZF1", n, n) l = div(n, 13) function F!(r, x; l = l) for i = 1:l diff --git a/src/ADNLPProblems/bearing.jl b/src/ADNLPProblems/bearing.jl index 238d07e30..95d0d3b4f 100644 --- a/src/ADNLPProblems/bearing.jl +++ b/src/ADNLPProblems/bearing.jl @@ -10,11 +10,9 @@ function bearing(; # nx > 0 # grid points in 1st direction # ny > 0 # grid points in 2nd direction - n_orig = n nx = max(1, nx) ny = max(1, ny) - n = (nx + 2) * (ny + 2) - @adjust_nvar_warn("bearing", n_orig, n) + @adjust_nvar_warn("bearing", n, (nx + 2) * (ny + 2)) b = 10 # grid is (0,2*pi)x(0,2*b) e = 1 // 10 # eccentricity diff --git a/src/ADNLPProblems/broydn7d.jl b/src/ADNLPProblems/broydn7d.jl index d76b8fa11..2c2f1ef2e 100644 --- a/src/ADNLPProblems/broydn7d.jl +++ b/src/ADNLPProblems/broydn7d.jl @@ -1,10 +1,9 @@ export broydn7d function broydn7d(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n n2 = max(1, div(n, 2)) n = 2 * n2 - @adjust_nvar_warn("broydn7d", n_orig, n) + @adjust_nvar_warn("broydn7d", n, n) function f(x; n = length(x), n2 = n2) p = 7 // 3 return abs(1 - 2 * x[2] + (3 - x[1] / 2) * x[1])^p + diff --git a/src/ADNLPProblems/catenary.jl b/src/ADNLPProblems/catenary.jl index 345a0bdc3..c56207fd8 100644 --- a/src/ADNLPProblems/catenary.jl +++ b/src/ADNLPProblems/catenary.jl @@ -8,10 +8,9 @@ function catenary( FRACT = 0.6, kwargs..., ) where {T} - n_orig = n n = 3 * max(1, div(n, 3)) n = max(n, 6) - @adjust_nvar_warn("catenary", n_orig, n) + @adjust_nvar_warn("catenary", n, n) ## Model Parameters N = div(n, 3) - 2 diff --git a/src/ADNLPProblems/chain.jl b/src/ADNLPProblems/chain.jl index 5d0db8cb1..9ebea3ed6 100644 --- a/src/ADNLPProblems/chain.jl +++ b/src/ADNLPProblems/chain.jl @@ -1,10 +1,8 @@ export chain function chain(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n nh = max(2, div(n - 4, 4)) - n = 4 * nh + 4 - @adjust_nvar_warn("chain", n_orig, n) + @adjust_nvar_warn("chain", n, 4 * nh + 4) L = 4 a = 1 diff --git a/src/ADNLPProblems/chainwoo.jl b/src/ADNLPProblems/chainwoo.jl index 154b71ba8..db17942e3 100644 --- a/src/ADNLPProblems/chainwoo.jl +++ b/src/ADNLPProblems/chainwoo.jl @@ -6,9 +6,8 @@ function chainwoo(; use_nls::Bool = false, kwargs...) end function chainwoo(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("chainwoo", n_orig, n) + @adjust_nvar_warn("chainwoo", n, n) function f(x; n = length(x)) return 1 + sum( 100 * (x[2 * i] - x[2 * i - 1]^2)^2 + @@ -24,9 +23,8 @@ function chainwoo(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function chainwoo(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("chainwoo", n_orig, n) + @adjust_nvar_warn("chainwoo", n, n) function F!(r, x; n = length(x)) nb = div(n, 2) - 1 r[1] = 1 diff --git a/src/ADNLPProblems/channel.jl b/src/ADNLPProblems/channel.jl index 718d0bf7f..69bdc1644 100644 --- a/src/ADNLPProblems/channel.jl +++ b/src/ADNLPProblems/channel.jl @@ -1,10 +1,8 @@ export channel function channel(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n nh = max(2, div(n, 8)) - n = 8 * nh - @adjust_nvar_warn("channel", n_orig, n) + @adjust_nvar_warn("channel", n, 8 * nh) nc = 4 nd = 4 diff --git a/src/ADNLPProblems/clnlbeam.jl b/src/ADNLPProblems/clnlbeam.jl index bf25a8b68..cf4f63cfb 100644 --- a/src/ADNLPProblems/clnlbeam.jl +++ b/src/ADNLPProblems/clnlbeam.jl @@ -1,10 +1,8 @@ export clnlbeam function clnlbeam(args...; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n N = div(n - 3, 3) - n = 3 * N + 3 - @adjust_nvar_warn("clnlbeam", n_orig, n) + @adjust_nvar_warn("clnlbeam", n, 3 * N + 3) h = 1 // N alpha = 350 function f(y; N = N, h = h, alpha = alpha) diff --git a/src/ADNLPProblems/clplatea.jl b/src/ADNLPProblems/clplatea.jl index 7174be032..1a4be4f3b 100644 --- a/src/ADNLPProblems/clplatea.jl +++ b/src/ADNLPProblems/clplatea.jl @@ -6,10 +6,9 @@ function clplatea(; wght = -0.1, kwargs..., ) where {T} - n_orig = n p = max(floor(Int, sqrt(n)), 3) n = p * p - @adjust_nvar_warn("clplatea", n_orig, n) + @adjust_nvar_warn("clplatea", n, n) hp2 = (1 // 2) * p^2 function f(x; p = p, hp2 = hp2, wght = wght) return (eltype(x)(wght) * x[p + (p - 1) * p]) + diff --git a/src/ADNLPProblems/clplateb.jl b/src/ADNLPProblems/clplateb.jl index f1fbc0120..3ff2e0cf9 100644 --- a/src/ADNLPProblems/clplateb.jl +++ b/src/ADNLPProblems/clplateb.jl @@ -6,10 +6,9 @@ function clplateb(; wght = -0.1, kwargs..., ) where {T} - n_orig = n p = max(floor(Int, sqrt(n)), 3) n = p * p - @adjust_nvar_warn("clplateb", n_orig, n) + @adjust_nvar_warn("clplateb", n, n) hp2 = 1 // 2 * p^2 function f(x; p = p, hp2 = hp2, wght = wght) return sum(eltype(x)(wght) / (p - 1) * x[p + (j - 1) * p] for j = 1:p) + diff --git a/src/ADNLPProblems/clplatec.jl b/src/ADNLPProblems/clplatec.jl index 43d73eae4..dcbc6b148 100644 --- a/src/ADNLPProblems/clplatec.jl +++ b/src/ADNLPProblems/clplatec.jl @@ -8,10 +8,9 @@ function clplatec(; l = 0.01, kwargs..., ) where {T} - n_orig = n p = max(floor(Int, sqrt(n)), 3) n = p * p - @adjust_nvar_warn("clplatec", n_orig, n) + @adjust_nvar_warn("clplatec", n, n) hp2 = 1 // 2 * p^2 function f(x; p = p, hp2 = hp2, wght = wght, r = r, l = l) diff --git a/src/ADNLPProblems/dixmaan_efgh.jl b/src/ADNLPProblems/dixmaan_efgh.jl index b38fb16e9..5797f4bd1 100644 --- a/src/ADNLPProblems/dixmaan_efgh.jl +++ b/src/ADNLPProblems/dixmaan_efgh.jl @@ -9,10 +9,9 @@ function dixmaane(; δ = 125 // 1000, kwargs..., ) where {T} - n_orig = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaane", n_orig, n) + @adjust_nvar_warn("dixmaane", n, 3 * m) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum(i // n * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/dixmaan_ijkl.jl b/src/ADNLPProblems/dixmaan_ijkl.jl index d0b5d8476..a253bd498 100644 --- a/src/ADNLPProblems/dixmaan_ijkl.jl +++ b/src/ADNLPProblems/dixmaan_ijkl.jl @@ -9,10 +9,9 @@ function dixmaani(; δ = 125 // 1000, kwargs..., ) where {T} - n_orig = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaani", n_orig, n) + @adjust_nvar_warn("dixmaani", n, 3 * m) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum((i // n)^2 * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/dixmaan_mnop.jl b/src/ADNLPProblems/dixmaan_mnop.jl index 627353c22..aa1f70b7a 100644 --- a/src/ADNLPProblems/dixmaan_mnop.jl +++ b/src/ADNLPProblems/dixmaan_mnop.jl @@ -9,10 +9,9 @@ function dixmaanm(; δ = 125 // 1000, kwargs..., ) where {T} - n_orig = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaanm", n_orig, n) + @adjust_nvar_warn("dixmaanm", n, 3 * m) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum((i // n)^2 * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/elec.jl b/src/ADNLPProblems/elec.jl index 3aba48d8c..07e9eacc0 100644 --- a/src/ADNLPProblems/elec.jl +++ b/src/ADNLPProblems/elec.jl @@ -1,10 +1,8 @@ export elec function elec(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n - m = max(2, div(n_orig, 3)) - n = m - @adjust_nvar_warn("elec", n_orig, 3 * n) + n = max(2, div(n, 3)) + @adjust_nvar_warn("elec", n, 3 * n) # Define the objective function to minimize function f(x; n = n) return sum( diff --git a/src/ADNLPProblems/fminsrf2.jl b/src/ADNLPProblems/fminsrf2.jl index eae7709ba..0d7ab3283 100644 --- a/src/ADNLPProblems/fminsrf2.jl +++ b/src/ADNLPProblems/fminsrf2.jl @@ -1,11 +1,11 @@ export fminsrf2 function fminsrf2(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n n = max(4, n) + p = floor(Int, sqrt(n)) n = p * p - @adjust_nvar_warn("fminsrf2", n_orig, n) + @adjust_nvar_warn("fminsrf2", n, n) h00 = 1 slopej = 4 diff --git a/src/ADNLPProblems/hovercraft1d.jl b/src/ADNLPProblems/hovercraft1d.jl index 2dfab9bb9..09e2aa4a0 100644 --- a/src/ADNLPProblems/hovercraft1d.jl +++ b/src/ADNLPProblems/hovercraft1d.jl @@ -11,10 +11,8 @@ function hovercraft1d( type::Type{T} = Float64, kwargs..., ) where {T} - n_orig = n - N = div(n_orig, 3) - n = 3 * N - 1 - @adjust_nvar_warn("hovercraft1d", n_orig, n) + N = div(n, 3) + @adjust_nvar_warn("hovercraft1d", n, 3 * N - 1) function f(y; N = N) @views x, v, u = y[1:N], y[(N + 1):(2 * N)], y[(2 * N + 1):end] return 1 // 2 * sum(u .^ 2) @@ -75,10 +73,8 @@ function hovercraft1d( type::Type{T} = Float64, kwargs..., ) where {T} - n_orig = n - N = div(n_orig, 3) - n = 3 * N - 1 - @adjust_nvar_warn("hovercraft1d", n_orig, n) + N = div(n, 3) + @adjust_nvar_warn("hovercraft1d", n, 3 * N - 1) function F!(r, y; N = N) @views x, v, u = y[1:N], y[(N + 1):(2 * N)], y[(2 * N + 1):end] r .= u diff --git a/src/ADNLPProblems/marine.jl b/src/ADNLPProblems/marine.jl index 0ddcdefc0..77b4ac442 100644 --- a/src/ADNLPProblems/marine.jl +++ b/src/ADNLPProblems/marine.jl @@ -1,15 +1,13 @@ export marine function marine(; n::Int = default_nvar, nc::Int = 1, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n nc = max(min(nc, 4), 1) # number of collocation points ne = 8 # number of differential equations nm = 21 # number of measurements n = max(n, 3 * ne * nc + ne + 2 * ne) nh = Int(round((n - 2 * ne + 1) / (3 * ne * nc + ne))) # number of partition intervals - n = 8 + 7 + nh * (8 + 3 * 8 * nc) - @adjust_nvar_warn("marine", n_orig, n) + @adjust_nvar_warn("marine", n, 8 + 7 + nh * (8 + 3 * 8 * nc)) # roots of k-th degree Legendre polynomial rho = if nc == 1 diff --git a/src/ADNLPProblems/powellsg.jl b/src/ADNLPProblems/powellsg.jl index e95cc1d55..27524433d 100644 --- a/src/ADNLPProblems/powellsg.jl +++ b/src/ADNLPProblems/powellsg.jl @@ -6,9 +6,7 @@ function powellsg(; use_nls::Bool = false, kwargs...) end function powellsg(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n - n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("powellsg", n_orig, n) + @adjust_nvar_warn("powellsg", n, 4 * max(1, div(n, 4))) function f(x; n = length(x)) return sum( (x[j] + 10 * x[j + 1])^2 + @@ -25,9 +23,7 @@ function powellsg(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function powellsg(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n - n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("powellsg", n_orig, n) + @adjust_nvar_warn("powellsg", n, 4 * max(1, div(n, 4))) function F!(r, x; n = length(x)) @inbounds for j = 1:4:n r[j] = x[j] + 10 * x[j + 1] @@ -41,5 +37,5 @@ function powellsg(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, k x0[4 * (collect(1:div(n, 4))) .- 3] .= 3 x0[4 * (collect(1:div(n, 4))) .- 2] .= -1 x0[4 * (collect(1:div(n, 4)))] .= 1 - return ADNLPModels.ADNLSModel!(F!, x0, n, name = "powellsg-nls"; kwargs...) + return ADNLPModels.ADNLSModel!(F!, x0, 4 * max(1, div(n, 4)), name = "powellsg-nls"; kwargs...) end diff --git a/src/ADNLPProblems/robotarm.jl b/src/ADNLPProblems/robotarm.jl index e1f0d23db..ebbf9176a 100644 --- a/src/ADNLPProblems/robotarm.jl +++ b/src/ADNLPProblems/robotarm.jl @@ -10,11 +10,9 @@ export robotarm # classification OOR2-AN-V-V function robotarm(; n::Int = default_nvar, L = 4.5, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n - N = max(2, div(n_orig, 9)) + N = max(2, div(n, 9)) n = N + 1 - nvars = 9 * n + 1 - @adjust_nvar_warn("robotarm", n_orig, nvars) + @adjust_nvar_warn("robotarm", n, 9 * n + 1) L = T(L) # x : vector of variables, of the form : [ρ(t=t1); ρ(t=t2); ... ρ(t=tf), θ(t=t1), ..., then ρ_dot, ..., then ρ_acc, .. ϕ_acc, tf] diff --git a/src/ADNLPProblems/spmsrtls.jl b/src/ADNLPProblems/spmsrtls.jl index 8cbc967cf..fd6cd911e 100644 --- a/src/ADNLPProblems/spmsrtls.jl +++ b/src/ADNLPProblems/spmsrtls.jl @@ -6,10 +6,9 @@ function spmsrtls(; use_nls::Bool = false, kwargs...) end function spmsrtls(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 - @adjust_nvar_warn("spmsrtls", n_orig, n) + @adjust_nvar_warn("spmsrtls", n, n) p = [sin(i^2) for i = 1:n] x0 = T[p[i] / 5 for i = 1:n] @@ -61,10 +60,9 @@ function spmsrtls(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function spmsrtls(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 - @adjust_nvar_warn("spmsrtls", n_orig, n) + @adjust_nvar_warn("spmsrtls", n, n) p = [sin(i^2) for i = 1:n] x0 = T[p[i] / 5 for i = 1:n] diff --git a/src/ADNLPProblems/srosenbr.jl b/src/ADNLPProblems/srosenbr.jl index 8c51d9e64..b45dd9958 100644 --- a/src/ADNLPProblems/srosenbr.jl +++ b/src/ADNLPProblems/srosenbr.jl @@ -1,9 +1,8 @@ export srosenbr function srosenbr(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n n = 2 * max(1, div(n, 2)) - @adjust_nvar_warn("srosenbr", n_orig, n) + @adjust_nvar_warn("srosenbr", n, n) function f(x; n = length(x)) return sum(100 * (x[2 * i] - x[2 * i - 1]^2)^2 + (x[2 * i - 1] - 1)^2 for i = 1:div(n, 2)) end diff --git a/src/ADNLPProblems/structural.jl b/src/ADNLPProblems/structural.jl index f39948fdf..811e74495 100644 --- a/src/ADNLPProblems/structural.jl +++ b/src/ADNLPProblems/structural.jl @@ -1,8 +1,7 @@ export structural function structural(args...; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n - n = max(n_orig, 100) + n = max(n, 100) sub2ind(shape, a, b) = LinearIndices(shape)[CartesianIndex.(a, b)] Nx = min(Int(round(n^(1 / 3))), 6) @@ -24,9 +23,7 @@ function structural(args...; n::Int = default_nvar, type::Type{T} = Float64, kwa M = Int(N * (N - 1) / 2) # number of edges - nvars = 2 * M - @adjust_nvar_warn("structural", n_orig, nvars) - + @adjust_nvar_warn("structural", n, 2 * M) # EDGES: columns are the indices of the nodes at either end edges = Array{Int}(zeros(M, 2)) diff --git a/src/ADNLPProblems/watson.jl b/src/ADNLPProblems/watson.jl index cf75edc30..7f8586d03 100644 --- a/src/ADNLPProblems/watson.jl +++ b/src/ADNLPProblems/watson.jl @@ -6,9 +6,8 @@ function watson(; use_nls::Bool = false, kwargs...) end function watson(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n n = min(max(n, 2), 31) - @adjust_nvar_warn("watson", n_orig, n) + @adjust_nvar_warn("watson", n, n) function f(x; n = n) Ti = eltype(x) return 1 // 2 * sum( @@ -33,9 +32,8 @@ function watson(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwa end function watson(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n n = min(max(n, 2), 31) - @adjust_nvar_warn("watson", n_orig, n) + @adjust_nvar_warn("watson", n, 31) function F!(r, x; n = n) Ti = eltype(x) for i = 1:29 diff --git a/src/ADNLPProblems/woods.jl b/src/ADNLPProblems/woods.jl index 575630098..f93ef7d01 100644 --- a/src/ADNLPProblems/woods.jl +++ b/src/ADNLPProblems/woods.jl @@ -1,9 +1,8 @@ export woods function woods(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n_orig = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("woods", n_orig, n) + @adjust_nvar_warn("woods", n, n) function f(x; n = length(x)) return sum( 100 * (x[4 * i - 2] - x[4 * i - 3]^2)^2 + diff --git a/src/PureJuMP/catmix.jl b/src/PureJuMP/catmix.jl index ca118b4d8..2b78d19a6 100644 --- a/src/PureJuMP/catmix.jl +++ b/src/PureJuMP/catmix.jl @@ -9,8 +9,7 @@ function catmix(args...; n::Int = default_nvar, kwargs...) n_orig = n ne = 2 nc = 3 - n = n_orig - @adjust_nvar_warn("catmix", n_orig, 23 * n + 2) + @adjust_nvar_warn("catmix", n_orig, 23 * n_orig + 2) tf = 1 h = tf / n # Final time diff --git a/src/PureJuMP/elec.jl b/src/PureJuMP/elec.jl index ce9a88e84..fa719433d 100644 --- a/src/PureJuMP/elec.jl +++ b/src/PureJuMP/elec.jl @@ -12,8 +12,7 @@ export elec function elec(args...; n::Int = default_nvar, kwargs...) n_orig = n - m = max(2, div(n_orig, 3)) - n = m + n = max(2, div(n_orig, 3)) @adjust_nvar_warn("elec", n_orig, 3 * n) nlp = Model() diff --git a/src/PureJuMP/gasoil.jl b/src/PureJuMP/gasoil.jl index 577fa93f8..e0f505c89 100644 --- a/src/PureJuMP/gasoil.jl +++ b/src/PureJuMP/gasoil.jl @@ -10,8 +10,7 @@ export gasoil function gasoil(; n::Int = default_nvar, kwargs...) n_orig = n nc = 4 # number of collocation points - n = n_orig - @adjust_nvar_warn("gasoil", n_orig, 26 * n + 3) + @adjust_nvar_warn("gasoil", n_orig, 26 * n_orig + 3) ne = 2 # number of differential equations np = 3 # number of ODE parameters nm = 21 # number of measurements diff --git a/src/PureJuMP/glider.jl b/src/PureJuMP/glider.jl index af1d0c3cf..6b8c2811d 100644 --- a/src/PureJuMP/glider.jl +++ b/src/PureJuMP/glider.jl @@ -10,8 +10,8 @@ export glider function glider(; n::Int = default_nvar, kwargs...) n_orig = n # Design parameters - n = n_orig - @adjust_nvar_warn("glider", n_orig, 5 * n + 6) + @adjust_nvar_warn("glider", n_orig, 5 * n_orig + 6) + n = 5 * n_orig + 6 x_0 = 0.0 y_0 = 1000.0 y_f = 900.0 diff --git a/src/PureJuMP/pinene.jl b/src/PureJuMP/pinene.jl index 314813113..ee3f9d909 100644 --- a/src/PureJuMP/pinene.jl +++ b/src/PureJuMP/pinene.jl @@ -26,8 +26,8 @@ function pinene(; n::Int = default_nvar, kwargs...) # times at which observations made tau = [1230.0, 3060.0, 4920.0, 7800.0, 10680.0, 15030.0, 22620.0, 36420.0] tf = tau[nm] # ODEs defined in [0,tf] - n = n_orig - @adjust_nvar_warn("pinene", n_orig, 50 * n + 5) + @adjust_nvar_warn("pinene", n_orig, 50 * n_orig + 5) + n = 50 * n_orig + 5 h = tf / n # uniform interval length t = [(i-1)*h for i = 1:(n + 1)] # partition From 933c3d2130cf8767c954f3258e715a107f3388f8 Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Sun, 10 May 2026 16:14:26 +0530 Subject: [PATCH 28/31] PureJuMP changes --- src/ADNLPProblems/powellsg.jl | 8 +++++--- src/PureJuMP/NZF1.jl | 3 +-- src/PureJuMP/bearing.jl | 5 +---- src/PureJuMP/broydn7d.jl | 3 +-- src/PureJuMP/catenary.jl | 3 +-- src/PureJuMP/catmix.jl | 3 +-- src/PureJuMP/chain.jl | 4 +--- src/PureJuMP/chainwoo.jl | 3 +-- src/PureJuMP/channel.jl | 4 +--- src/PureJuMP/clnlbeam.jl | 4 +--- src/PureJuMP/clplatea.jl | 3 +-- src/PureJuMP/clplateb.jl | 3 +-- src/PureJuMP/clplatec.jl | 3 +-- src/PureJuMP/dixmaan_efgh.jl | 3 +-- src/PureJuMP/dixmaan_ijkl.jl | 3 +-- src/PureJuMP/dixmaan_mnop.jl | 3 +-- src/PureJuMP/elec.jl | 5 ++--- src/PureJuMP/fminsrf2.jl | 3 +-- src/PureJuMP/gasoil.jl | 3 +-- src/PureJuMP/glider.jl | 4 +--- src/PureJuMP/hovercraft1d.jl | 6 ++---- src/PureJuMP/marine.jl | 6 ++---- src/PureJuMP/methanol.jl | 4 +--- src/PureJuMP/minsurf.jl | 5 ++--- src/PureJuMP/pinene.jl | 4 +--- src/PureJuMP/powellsg.jl | 3 +-- src/PureJuMP/robotarm.jl | 6 ++---- src/PureJuMP/spmsrtls.jl | 3 +-- src/PureJuMP/srosenbr.jl | 3 +-- src/PureJuMP/structural.jl | 7 ++----- src/PureJuMP/torsion.jl | 3 +-- src/PureJuMP/watson.jl | 3 +-- src/PureJuMP/woods.jl | 3 +-- 33 files changed, 43 insertions(+), 86 deletions(-) diff --git a/src/ADNLPProblems/powellsg.jl b/src/ADNLPProblems/powellsg.jl index 27524433d..50400b97f 100644 --- a/src/ADNLPProblems/powellsg.jl +++ b/src/ADNLPProblems/powellsg.jl @@ -6,7 +6,8 @@ function powellsg(; use_nls::Bool = false, kwargs...) end function powellsg(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - @adjust_nvar_warn("powellsg", n, 4 * max(1, div(n, 4))) + n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("powellsg", n, n) function f(x; n = length(x)) return sum( (x[j] + 10 * x[j + 1])^2 + @@ -23,7 +24,8 @@ function powellsg(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function powellsg(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - @adjust_nvar_warn("powellsg", n, 4 * max(1, div(n, 4))) + n = 4 * max(1, div(n, 4)) + @adjust_nvar_warn("powellsg", n, n) function F!(r, x; n = length(x)) @inbounds for j = 1:4:n r[j] = x[j] + 10 * x[j + 1] @@ -37,5 +39,5 @@ function powellsg(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, k x0[4 * (collect(1:div(n, 4))) .- 3] .= 3 x0[4 * (collect(1:div(n, 4))) .- 2] .= -1 x0[4 * (collect(1:div(n, 4)))] .= 1 - return ADNLPModels.ADNLSModel!(F!, x0, 4 * max(1, div(n, 4)), name = "powellsg-nls"; kwargs...) + return ADNLPModels.ADNLSModel!(F!, x0, n, name = "powellsg-nls"; kwargs...) end diff --git a/src/PureJuMP/NZF1.jl b/src/PureJuMP/NZF1.jl index 886bfc857..3bc5ead82 100644 --- a/src/PureJuMP/NZF1.jl +++ b/src/PureJuMP/NZF1.jl @@ -7,10 +7,9 @@ export NZF1 function NZF1(args...; n::Int = default_nvar, kwargs...) - n_orig = n nbis = max(2, div(n, 13)) n = 13 * nbis - @adjust_nvar_warn("NZF1", n_orig, n) + @adjust_nvar_warn("NZF1", n, n) l = div(n, 13) diff --git a/src/PureJuMP/bearing.jl b/src/PureJuMP/bearing.jl index 442de6250..64b0214c5 100644 --- a/src/PureJuMP/bearing.jl +++ b/src/PureJuMP/bearing.jl @@ -27,12 +27,9 @@ function bearing( # nx > 0 # grid points in 1st direction # ny > 0 # grid points in 2nd direction - - n_orig = n nx = max(1, nx) ny = max(1, ny) - n = (nx + 2) * (ny + 2) - @adjust_nvar_warn("bearing", n_orig, n) + @adjust_nvar_warn("bearing", n, (nx + 2) * (ny + 2)) b = 10 # grid is (0,2*pi)x(0,2*b) e = 0.1 # eccentricity diff --git a/src/PureJuMP/broydn7d.jl b/src/PureJuMP/broydn7d.jl index eb24cfc2f..24bc78a73 100644 --- a/src/PureJuMP/broydn7d.jl +++ b/src/PureJuMP/broydn7d.jl @@ -46,10 +46,9 @@ export broydn7d "Broyden 7-diagonal model in size `n`" function broydn7d(args...; n::Int = default_nvar, p::Float64 = 7 / 3, kwargs...) - n_orig = n n2 = max(1, div(n, 2)) n = 2 * n2 - @adjust_nvar_warn("broydn7d", n_orig, n) + @adjust_nvar_warn("broydn7d", n, n) nlp = Model() diff --git a/src/PureJuMP/catenary.jl b/src/PureJuMP/catenary.jl index 6be4300b3..f7a9c035c 100644 --- a/src/PureJuMP/catenary.jl +++ b/src/PureJuMP/catenary.jl @@ -17,10 +17,9 @@ export catenary function catenary(args...; n::Int = default_nvar, Bl = 1.0, FRACT = 0.6, kwargs...) - n_orig = n n = 3 * max(1, div(n, 3)) n = max(n, 6) - @adjust_nvar_warn("catenary", n_orig, n) + @adjust_nvar_warn("catenary", n, n) ## Model Parameters diff --git a/src/PureJuMP/catmix.jl b/src/PureJuMP/catmix.jl index 2b78d19a6..4d010bb18 100644 --- a/src/PureJuMP/catmix.jl +++ b/src/PureJuMP/catmix.jl @@ -6,10 +6,9 @@ export catmix function catmix(args...; n::Int = default_nvar, kwargs...) - n_orig = n ne = 2 nc = 3 - @adjust_nvar_warn("catmix", n_orig, 23 * n_orig + 2) + @adjust_nvar_warn("catmix", n, 23 * n + 2) tf = 1 h = tf / n # Final time diff --git a/src/PureJuMP/chain.jl b/src/PureJuMP/chain.jl index dddc3128d..c3b2d23a5 100644 --- a/src/PureJuMP/chain.jl +++ b/src/PureJuMP/chain.jl @@ -13,10 +13,8 @@ export chain function chain(args...; n::Int = default_nvar, kwargs...) - n_orig = n nh = max(2, div(n - 4, 4)) - n = 4 * nh + 4 - @adjust_nvar_warn("chain", n_orig, n) + @adjust_nvar_warn("chain", n, 4 * nh + 4) L = 4 a = 1 diff --git a/src/PureJuMP/chainwoo.jl b/src/PureJuMP/chainwoo.jl index 7d0112043..ff90d3701 100644 --- a/src/PureJuMP/chainwoo.jl +++ b/src/PureJuMP/chainwoo.jl @@ -35,9 +35,8 @@ export chainwoo "The chained Woods function in size `n`, a variant on the Woods function" function chainwoo(args...; n::Int = default_nvar, kwargs...) - n_orig = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("chainwoo", n_orig, n) + @adjust_nvar_warn("chainwoo", n, n) nlp = Model() diff --git a/src/PureJuMP/channel.jl b/src/PureJuMP/channel.jl index 8054a121d..f21beeeee 100644 --- a/src/PureJuMP/channel.jl +++ b/src/PureJuMP/channel.jl @@ -12,10 +12,8 @@ export channel function channel(args...; n::Int = default_nvar, kwargs...) - n_orig = n nh = max(2, div(n, 8)) - n = 8 * nh - @adjust_nvar_warn("channel", n_orig, n) + @adjust_nvar_warn("channel", n, 8 * nh) nc = 4 nd = 4 diff --git a/src/PureJuMP/clnlbeam.jl b/src/PureJuMP/clnlbeam.jl index 6cafb82cb..d0036e288 100644 --- a/src/PureJuMP/clnlbeam.jl +++ b/src/PureJuMP/clnlbeam.jl @@ -14,10 +14,8 @@ export clnlbeam "The clnlbeam problem in size `n`" function clnlbeam(args...; n::Int = default_nvar, kwargs...) - n_orig = n N = div(n - 3, 3) - n = 3 * N + 3 - @adjust_nvar_warn("clnlbeam", n_orig, n) + @adjust_nvar_warn("clnlbeam", n, 3 * N + 3) h = 1 / N alpha = 350 model = Model() diff --git a/src/PureJuMP/clplatea.jl b/src/PureJuMP/clplatea.jl index a12947dce..6bd6b9411 100644 --- a/src/PureJuMP/clplatea.jl +++ b/src/PureJuMP/clplatea.jl @@ -26,10 +26,9 @@ export clplatea "The clamped plate problem (Strang, Nocedal, Dax)." function clplatea(args...; n::Int = default_nvar, wght::Float64 = -0.1, kwargs...) - n_orig = n p = floor(Int, sqrt(n)) n = p * p - @adjust_nvar_warn("clplatea", n_orig, n) + @adjust_nvar_warn("clplatea", n, n) nlp = Model() diff --git a/src/PureJuMP/clplateb.jl b/src/PureJuMP/clplateb.jl index ec6315f17..885048f9c 100644 --- a/src/PureJuMP/clplateb.jl +++ b/src/PureJuMP/clplateb.jl @@ -27,10 +27,9 @@ export clplateb "The clamped plate problem (Strang, Nocedal, Dax)." function clplateb(args...; n::Int = default_nvar, wght::Float64 = -0.1, kwargs...) - n_orig = n p = floor(Int, sqrt(n)) n = p * p - @adjust_nvar_warn("clplateb", n_orig, n) + @adjust_nvar_warn("clplateb", n, n) nlp = Model() diff --git a/src/PureJuMP/clplatec.jl b/src/PureJuMP/clplatec.jl index e85f9686d..38c963e20 100644 --- a/src/PureJuMP/clplatec.jl +++ b/src/PureJuMP/clplatec.jl @@ -33,10 +33,9 @@ function clplatec( l::Float64 = 0.01, kwargs..., ) - n_orig = n p = floor(Int, sqrt(n)) n = p * p - @adjust_nvar_warn("clplatec", n_orig, n) + @adjust_nvar_warn("clplatec", n, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_efgh.jl b/src/PureJuMP/dixmaan_efgh.jl index f25edac80..6f7ce4795 100644 --- a/src/PureJuMP/dixmaan_efgh.jl +++ b/src/PureJuMP/dixmaan_efgh.jl @@ -33,10 +33,9 @@ function dixmaane( δ::Float64 = 0.125, kwargs..., ) - n_orig = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaane", n_orig, n) + @adjust_nvar_warn("dixmaane", n, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_ijkl.jl b/src/PureJuMP/dixmaan_ijkl.jl index e4f22ed05..0152a3a49 100644 --- a/src/PureJuMP/dixmaan_ijkl.jl +++ b/src/PureJuMP/dixmaan_ijkl.jl @@ -33,10 +33,9 @@ function dixmaani( δ::Float64 = 0.125, kwargs..., ) - n_orig = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaani", n_orig, n) + @adjust_nvar_warn("dixmaani", n, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_mnop.jl b/src/PureJuMP/dixmaan_mnop.jl index 0c676cc59..5c4e79557 100644 --- a/src/PureJuMP/dixmaan_mnop.jl +++ b/src/PureJuMP/dixmaan_mnop.jl @@ -31,10 +31,9 @@ function dixmaanm( δ::Float64 = 0.125, kwargs..., ) - n_orig = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaanm", n_orig, n) + @adjust_nvar_warn("dixmaanm", n, n) nlp = Model() diff --git a/src/PureJuMP/elec.jl b/src/PureJuMP/elec.jl index fa719433d..fec9c01ed 100644 --- a/src/PureJuMP/elec.jl +++ b/src/PureJuMP/elec.jl @@ -11,9 +11,8 @@ export elec function elec(args...; n::Int = default_nvar, kwargs...) - n_orig = n - n = max(2, div(n_orig, 3)) - @adjust_nvar_warn("elec", n_orig, 3 * n) + n = max(2, div(n, 3)) + @adjust_nvar_warn("elec", n, 3 * n) nlp = Model() diff --git a/src/PureJuMP/fminsrf2.jl b/src/PureJuMP/fminsrf2.jl index d73c4c630..e2b485a87 100644 --- a/src/PureJuMP/fminsrf2.jl +++ b/src/PureJuMP/fminsrf2.jl @@ -21,12 +21,11 @@ export fminsrf2 function fminsrf2(args...; n::Int = default_nvar, kwargs...) - n_orig = n n = max(4, n) p = floor(Int, sqrt(n)) n = p * p - @adjust_nvar_warn("fminsrf2", n_orig, n) + @adjust_nvar_warn("fminsrf2", n, n) h00 = 1.0 slopej = 4.0 diff --git a/src/PureJuMP/gasoil.jl b/src/PureJuMP/gasoil.jl index e0f505c89..1213e5d2b 100644 --- a/src/PureJuMP/gasoil.jl +++ b/src/PureJuMP/gasoil.jl @@ -8,9 +8,8 @@ export gasoil function gasoil(; n::Int = default_nvar, kwargs...) - n_orig = n nc = 4 # number of collocation points - @adjust_nvar_warn("gasoil", n_orig, 26 * n_orig + 3) + @adjust_nvar_warn("gasoil", n, 26 * n + 3) ne = 2 # number of differential equations np = 3 # number of ODE parameters nm = 21 # number of measurements diff --git a/src/PureJuMP/glider.jl b/src/PureJuMP/glider.jl index 6b8c2811d..66ca5c79e 100644 --- a/src/PureJuMP/glider.jl +++ b/src/PureJuMP/glider.jl @@ -8,10 +8,8 @@ export glider function glider(; n::Int = default_nvar, kwargs...) - n_orig = n # Design parameters - @adjust_nvar_warn("glider", n_orig, 5 * n_orig + 6) - n = 5 * n_orig + 6 + @adjust_nvar_warn("glider", n, 5 * n + 6) x_0 = 0.0 y_0 = 1000.0 y_f = 900.0 diff --git a/src/PureJuMP/hovercraft1d.jl b/src/PureJuMP/hovercraft1d.jl index f98eacb56..ab683f844 100644 --- a/src/PureJuMP/hovercraft1d.jl +++ b/src/PureJuMP/hovercraft1d.jl @@ -6,12 +6,10 @@ export hovercraft1d function hovercraft1d(args...; n::Int = default_nvar, kwargs...) - n_orig = n nlp = Model() - T = div(n_orig, 3) # length of time horizon - n = 3 * T - 1 - @adjust_nvar_warn("hovercraft1d", n_orig, n) + T = div(n, 3) # length of time horizon + @adjust_nvar_warn("hovercraft1d", n, 3 * T - 1) @variable(nlp, x[1:T]) # resulting position @variable(nlp, v[1:T]) # resulting velocity @variable(nlp, u[1:(T - 1)]) # thruster input diff --git a/src/PureJuMP/marine.jl b/src/PureJuMP/marine.jl index 462d775a7..d90360d7d 100644 --- a/src/PureJuMP/marine.jl +++ b/src/PureJuMP/marine.jl @@ -18,17 +18,15 @@ export marine function marine(args...; n::Int = default_nvar, nc::Int = 1, kwargs...) - n_orig = n nlp = Model() nc = max(min(nc, 4), 1) # number of collocation points ne = 8 # number of differential equations nm = 21 # number of measurements - n = max(n_orig, 3 * ne * nc + ne + 2 * ne) + n = max(n, 3 * ne * nc + ne + 2 * ne) nh = Int(round((n - 2 * ne + 1) / (3 * ne * nc + ne))) # number of partition intervals - n = 8 + 7 + nh * (8 + 3 * 8 * nc) - @adjust_nvar_warn("marine", n_orig, n) + @adjust_nvar_warn("marine", n, 8 + 7 + nh * (8 + 3 * 8 * nc)) # roots of k-th degree Legendre polynomial rho = if nc == 1 diff --git a/src/PureJuMP/methanol.jl b/src/PureJuMP/methanol.jl index 2e70e2309..bbf0f671a 100644 --- a/src/PureJuMP/methanol.jl +++ b/src/PureJuMP/methanol.jl @@ -8,7 +8,6 @@ export methanol function methanol(args...; n::Int = default_nvar, kwargs...) - n_orig = n ne = 3 np = 5 nc = 3 @@ -36,8 +35,7 @@ function methanol(args...; n::Int = default_nvar, kwargs...) 1.122, ] tf = tau[nm] # ODEs defined in [0,tf] - n = n_orig - @adjust_nvar_warn("methanol", n_orig, 30 * n + 5) + @adjust_nvar_warn("methanol", n, 30 * n + 5) h = tf / n # uniform interval length t = [(i-1)*h for i = 1:(n + 1)] # partition fact = [factorial(k) for k = 0:nc] diff --git a/src/PureJuMP/minsurf.jl b/src/PureJuMP/minsurf.jl index f755d60bf..bf0580044 100644 --- a/src/PureJuMP/minsurf.jl +++ b/src/PureJuMP/minsurf.jl @@ -12,13 +12,12 @@ export minsurf function minsurf(args...; n = default_nvar, kwargs...) - n_orig = n # number of variables is (nx + 2) x (ny + 2) if !((:nx in keys(kwargs)) & (:ny in keys(kwargs))) - nx, ny = Int(round(sqrt(max(1, n_orig - 2)))), Int(round(sqrt(max(1, n_orig - 2)))) + nx, ny = Int(round(sqrt(max(1, n - 2)))), Int(round(sqrt(max(1, n - 2)))) end n = (nx + 2) * (ny + 2) - @adjust_nvar_warn("minsurf", n_orig, n) + @adjust_nvar_warn("minsurf", n, n) x_mesh = LinRange(0, 1, nx + 2) # coordinates of the mesh points x v0 = zeros(nx + 2, ny + 2) # Surface matrix initialization diff --git a/src/PureJuMP/pinene.jl b/src/PureJuMP/pinene.jl index ee3f9d909..49f5a3137 100644 --- a/src/PureJuMP/pinene.jl +++ b/src/PureJuMP/pinene.jl @@ -13,7 +13,6 @@ export pinene function pinene(; n::Int = default_nvar, kwargs...) - n_orig = n nc = 3 # number of collocation points ne = 5 # number of differential equations np = 5 # number of ODE parameters @@ -26,8 +25,7 @@ function pinene(; n::Int = default_nvar, kwargs...) # times at which observations made tau = [1230.0, 3060.0, 4920.0, 7800.0, 10680.0, 15030.0, 22620.0, 36420.0] tf = tau[nm] # ODEs defined in [0,tf] - @adjust_nvar_warn("pinene", n_orig, 50 * n_orig + 5) - n = 50 * n_orig + 5 + @adjust_nvar_warn("pinene", n, 50 * n + 5) h = tf / n # uniform interval length t = [(i-1)*h for i = 1:(n + 1)] # partition diff --git a/src/PureJuMP/powellsg.jl b/src/PureJuMP/powellsg.jl index e7ecdd2a5..571a2b6c1 100644 --- a/src/PureJuMP/powellsg.jl +++ b/src/PureJuMP/powellsg.jl @@ -37,9 +37,8 @@ export powellsg "The extended Powell singular problem in size 'n' " function powellsg(args...; n::Int = default_nvar, kwargs...) - n_orig = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("powellsg", n_orig, n) + @adjust_nvar_warn("powellsg", n, n) x0 = zeros(n) x0[4 * (collect(1:div(n, 4))) .- 3] .= 3.0 diff --git a/src/PureJuMP/robotarm.jl b/src/PureJuMP/robotarm.jl index 950187b4a..545f8c4f2 100644 --- a/src/PureJuMP/robotarm.jl +++ b/src/PureJuMP/robotarm.jl @@ -14,11 +14,9 @@ export robotarm function robotarm(; n::Int = default_nvar, L = 4.5, kwargs...) - n_orig = n - N = max(2, div(n_orig, 9)) + N = max(2, div(n, 9)) n = N + 1 - nvars = 9 * n + 1 - @adjust_nvar_warn("robotarm", n_orig, nvars) + @adjust_nvar_warn("robotarm", n, 9 * n + 1) nlp = Model() diff --git a/src/PureJuMP/spmsrtls.jl b/src/PureJuMP/spmsrtls.jl index 4e08dd863..a1fcb3c67 100644 --- a/src/PureJuMP/spmsrtls.jl +++ b/src/PureJuMP/spmsrtls.jl @@ -21,10 +21,9 @@ export spmsrtls function spmsrtls(args...; n::Int = default_nvar, kwargs...) - n_orig = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 - @adjust_nvar_warn("spmsrtls", n_orig, n) + @adjust_nvar_warn("spmsrtls", n, n) p = [sin(i^2) for i = 1:n] x0 = [p[i] / 5 for i = 1:n] diff --git a/src/PureJuMP/srosenbr.jl b/src/PureJuMP/srosenbr.jl index 1381f38c0..db1556147 100644 --- a/src/PureJuMP/srosenbr.jl +++ b/src/PureJuMP/srosenbr.jl @@ -21,9 +21,8 @@ export srosenbr "The separable extension of Rosenbrock's function 'n' " function srosenbr(args...; n::Int = default_nvar, kwargs...) - n_orig = n n = 2 * max(1, div(n, 2)) - @adjust_nvar_warn("srosenbr", n_orig, n) + @adjust_nvar_warn("srosenbr", n, n) x0 = ones(n) x0[2 * (collect(1:div(n, 2))) .- 1] .= -1.2 diff --git a/src/PureJuMP/structural.jl b/src/PureJuMP/structural.jl index 4bfbb8184..667fefd65 100644 --- a/src/PureJuMP/structural.jl +++ b/src/PureJuMP/structural.jl @@ -6,8 +6,7 @@ export structural function structural(args...; n::Int = default_nvar, kwargs...) - n_orig = n - n = max(n_orig, 100) + n = max(n, 100) sub2ind(shape, a, b) = LinearIndices(shape)[CartesianIndex.(a, b)] Nx = min(Int(round(n^(1 / 3))), 6) @@ -30,9 +29,7 @@ function structural(args...; n::Int = default_nvar, kwargs...) M = Int(N * (N - 1) / 2) # number of edges - nvars = 2 * M - @adjust_nvar_warn("structural", n_orig, nvars) - + @adjust_nvar_warn("structural", n, 2 * M) # EDGES: columns are the indices of the nodes at either end edges = Array{Int}(zeros(M, 2)) diff --git a/src/PureJuMP/torsion.jl b/src/PureJuMP/torsion.jl index 19d175ee3..6ea03cec2 100644 --- a/src/PureJuMP/torsion.jl +++ b/src/PureJuMP/torsion.jl @@ -7,11 +7,10 @@ export torsion function torsion(args...; n = default_nvar, kwargs...) # number of variables is (nx + 1) x (ny + 1) - n_orig = n if !((:nx in keys(kwargs)) & (:ny in keys(kwargs))) nx, ny = Int(round(sqrt(max(1, n - 2)))), Int(round(sqrt(max(1, n - 2)))) end - @adjust_nvar_warn("torsion", n_orig, (nx + 2) * (ny + 2)) + @adjust_nvar_warn("torsion", n, (nx + 2) * (ny + 2)) c = 5.0 hx = 1.0 / (nx + 1.0) # grid spacing hy = 1.0 / (ny + 1.0) # grid spacing diff --git a/src/PureJuMP/watson.jl b/src/PureJuMP/watson.jl index e9f6a582c..7ab786ba5 100644 --- a/src/PureJuMP/watson.jl +++ b/src/PureJuMP/watson.jl @@ -17,9 +17,8 @@ export watson function watson(args...; n::Int = default_nvar, kwargs...) - n_orig = n n = min(max(n, 2), 31) - @adjust_nvar_warn("watson", n_orig, n) + @adjust_nvar_warn("watson", n, n) m = 31 nlp = Model() diff --git a/src/PureJuMP/woods.jl b/src/PureJuMP/woods.jl index c9f5b8938..1cec7a722 100644 --- a/src/PureJuMP/woods.jl +++ b/src/PureJuMP/woods.jl @@ -39,9 +39,8 @@ export woods "The extended Woods problem `n` " function woods(args...; n::Int = default_nvar, kwargs...) - n_orig = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("woods", n_orig, n) + @adjust_nvar_warn("woods", n, n) nlp = Model() From 29748af663ba4f9fc9c3b83a118dde09bc4c7e48 Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Sun, 10 May 2026 16:58:51 +0530 Subject: [PATCH 29/31] --- src/ADNLPProblems/NZF1.jl | 6 ++++-- src/ADNLPProblems/broydn7d.jl | 3 ++- src/ADNLPProblems/catenary.jl | 3 ++- src/ADNLPProblems/chainwoo.jl | 6 ++++-- src/ADNLPProblems/clplatea.jl | 3 ++- src/ADNLPProblems/clplateb.jl | 3 ++- src/ADNLPProblems/clplatec.jl | 3 ++- src/ADNLPProblems/dixmaan_efgh.jl | 3 ++- src/ADNLPProblems/dixmaan_ijkl.jl | 3 ++- src/ADNLPProblems/dixmaan_mnop.jl | 3 ++- src/ADNLPProblems/elec.jl | 3 ++- src/ADNLPProblems/fminsrf2.jl | 3 ++- src/ADNLPProblems/marine.jl | 3 ++- src/ADNLPProblems/powellsg.jl | 6 ++++-- src/ADNLPProblems/robotarm.jl | 3 ++- src/ADNLPProblems/spmsrtls.jl | 6 ++++-- src/ADNLPProblems/srosenbr.jl | 3 ++- src/ADNLPProblems/structural.jl | 3 ++- src/ADNLPProblems/watson.jl | 6 ++++-- src/ADNLPProblems/woods.jl | 3 ++- src/PureJuMP/NZF1.jl | 3 ++- src/PureJuMP/broydn7d.jl | 3 ++- src/PureJuMP/catenary.jl | 3 ++- src/PureJuMP/chainwoo.jl | 3 ++- src/PureJuMP/clplatea.jl | 3 ++- src/PureJuMP/clplateb.jl | 3 ++- src/PureJuMP/clplatec.jl | 3 ++- src/PureJuMP/dixmaan_efgh.jl | 3 ++- src/PureJuMP/dixmaan_ijkl.jl | 3 ++- src/PureJuMP/dixmaan_mnop.jl | 3 ++- src/PureJuMP/elec.jl | 3 ++- src/PureJuMP/fminsrf2.jl | 3 ++- src/PureJuMP/marine.jl | 3 ++- src/PureJuMP/minsurf.jl | 3 +-- src/PureJuMP/powellsg.jl | 3 ++- src/PureJuMP/robotarm.jl | 3 ++- src/PureJuMP/spmsrtls.jl | 3 ++- src/PureJuMP/srosenbr.jl | 3 ++- src/PureJuMP/steering.jl | 2 +- src/PureJuMP/structural.jl | 3 ++- src/PureJuMP/watson.jl | 3 ++- src/PureJuMP/woods.jl | 3 ++- 42 files changed, 92 insertions(+), 48 deletions(-) diff --git a/src/ADNLPProblems/NZF1.jl b/src/ADNLPProblems/NZF1.jl index a7b3fab60..1dcf962d7 100644 --- a/src/ADNLPProblems/NZF1.jl +++ b/src/ADNLPProblems/NZF1.jl @@ -6,9 +6,10 @@ function NZF1(; use_nls::Bool = false, kwargs...) end function NZF1(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_org = n nbis = max(2, div(n, 13)) n = 13 * nbis - @adjust_nvar_warn("NZF1", n, n) + @adjust_nvar_warn("NZF1", n_org, n) l = div(n, 13) function f(x; l = l) return sum( @@ -30,9 +31,10 @@ function NZF1(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwarg end function NZF1(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_org = n nbis = max(2, div(n, 13)) n = 13 * nbis - @adjust_nvar_warn("NZF1", n, n) + @adjust_nvar_warn("NZF1", n_org, n) l = div(n, 13) function F!(r, x; l = l) for i = 1:l diff --git a/src/ADNLPProblems/broydn7d.jl b/src/ADNLPProblems/broydn7d.jl index 2c2f1ef2e..8a2f74e80 100644 --- a/src/ADNLPProblems/broydn7d.jl +++ b/src/ADNLPProblems/broydn7d.jl @@ -1,9 +1,10 @@ export broydn7d function broydn7d(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_org = n n2 = max(1, div(n, 2)) n = 2 * n2 - @adjust_nvar_warn("broydn7d", n, n) + @adjust_nvar_warn("broydn7d", n_org, n) function f(x; n = length(x), n2 = n2) p = 7 // 3 return abs(1 - 2 * x[2] + (3 - x[1] / 2) * x[1])^p + diff --git a/src/ADNLPProblems/catenary.jl b/src/ADNLPProblems/catenary.jl index c56207fd8..bd96b25fb 100644 --- a/src/ADNLPProblems/catenary.jl +++ b/src/ADNLPProblems/catenary.jl @@ -8,9 +8,10 @@ function catenary( FRACT = 0.6, kwargs..., ) where {T} + n_org = n n = 3 * max(1, div(n, 3)) n = max(n, 6) - @adjust_nvar_warn("catenary", n, n) + @adjust_nvar_warn("catenary", n_org, n) ## Model Parameters N = div(n, 3) - 2 diff --git a/src/ADNLPProblems/chainwoo.jl b/src/ADNLPProblems/chainwoo.jl index db17942e3..63c8e5345 100644 --- a/src/ADNLPProblems/chainwoo.jl +++ b/src/ADNLPProblems/chainwoo.jl @@ -6,8 +6,9 @@ function chainwoo(; use_nls::Bool = false, kwargs...) end function chainwoo(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_org = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("chainwoo", n, n) + @adjust_nvar_warn("chainwoo", n_org, n) function f(x; n = length(x)) return 1 + sum( 100 * (x[2 * i] - x[2 * i - 1]^2)^2 + @@ -23,8 +24,9 @@ function chainwoo(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function chainwoo(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_org = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("chainwoo", n, n) + @adjust_nvar_warn("chainwoo", n_org, n) function F!(r, x; n = length(x)) nb = div(n, 2) - 1 r[1] = 1 diff --git a/src/ADNLPProblems/clplatea.jl b/src/ADNLPProblems/clplatea.jl index 1a4be4f3b..828eaba0a 100644 --- a/src/ADNLPProblems/clplatea.jl +++ b/src/ADNLPProblems/clplatea.jl @@ -6,9 +6,10 @@ function clplatea(; wght = -0.1, kwargs..., ) where {T} + n_org = n p = max(floor(Int, sqrt(n)), 3) n = p * p - @adjust_nvar_warn("clplatea", n, n) + @adjust_nvar_warn("clplatea", n_org, n) hp2 = (1 // 2) * p^2 function f(x; p = p, hp2 = hp2, wght = wght) return (eltype(x)(wght) * x[p + (p - 1) * p]) + diff --git a/src/ADNLPProblems/clplateb.jl b/src/ADNLPProblems/clplateb.jl index 3ff2e0cf9..5f2d449cb 100644 --- a/src/ADNLPProblems/clplateb.jl +++ b/src/ADNLPProblems/clplateb.jl @@ -6,9 +6,10 @@ function clplateb(; wght = -0.1, kwargs..., ) where {T} + n_org = n p = max(floor(Int, sqrt(n)), 3) n = p * p - @adjust_nvar_warn("clplateb", n, n) + @adjust_nvar_warn("clplateb", n_org, n) hp2 = 1 // 2 * p^2 function f(x; p = p, hp2 = hp2, wght = wght) return sum(eltype(x)(wght) / (p - 1) * x[p + (j - 1) * p] for j = 1:p) + diff --git a/src/ADNLPProblems/clplatec.jl b/src/ADNLPProblems/clplatec.jl index dcbc6b148..03d71267f 100644 --- a/src/ADNLPProblems/clplatec.jl +++ b/src/ADNLPProblems/clplatec.jl @@ -8,9 +8,10 @@ function clplatec(; l = 0.01, kwargs..., ) where {T} + n_org = n p = max(floor(Int, sqrt(n)), 3) n = p * p - @adjust_nvar_warn("clplatec", n, n) + @adjust_nvar_warn("clplatec", n_org, n) hp2 = 1 // 2 * p^2 function f(x; p = p, hp2 = hp2, wght = wght, r = r, l = l) diff --git a/src/ADNLPProblems/dixmaan_efgh.jl b/src/ADNLPProblems/dixmaan_efgh.jl index 5797f4bd1..dddd4f371 100644 --- a/src/ADNLPProblems/dixmaan_efgh.jl +++ b/src/ADNLPProblems/dixmaan_efgh.jl @@ -9,9 +9,10 @@ function dixmaane(; δ = 125 // 1000, kwargs..., ) where {T} + n_org = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaane", n, 3 * m) + @adjust_nvar_warn("dixmaane", n_org, n) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum(i // n * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/dixmaan_ijkl.jl b/src/ADNLPProblems/dixmaan_ijkl.jl index a253bd498..c3985b14e 100644 --- a/src/ADNLPProblems/dixmaan_ijkl.jl +++ b/src/ADNLPProblems/dixmaan_ijkl.jl @@ -9,9 +9,10 @@ function dixmaani(; δ = 125 // 1000, kwargs..., ) where {T} + n_org = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaani", n, 3 * m) + @adjust_nvar_warn("dixmaani", n_org, n) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum((i // n)^2 * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/dixmaan_mnop.jl b/src/ADNLPProblems/dixmaan_mnop.jl index aa1f70b7a..ea5d0a247 100644 --- a/src/ADNLPProblems/dixmaan_mnop.jl +++ b/src/ADNLPProblems/dixmaan_mnop.jl @@ -9,9 +9,10 @@ function dixmaanm(; δ = 125 // 1000, kwargs..., ) where {T} + n_org = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaanm", n, 3 * m) + @adjust_nvar_warn("dixmaanm", n_org, n) function f(x; n = length(x), α = α, β = β, γ = γ, δ = δ) return 1 + sum((i // n)^2 * α * x[i]^2 for i = 1:n) + diff --git a/src/ADNLPProblems/elec.jl b/src/ADNLPProblems/elec.jl index 07e9eacc0..92269bbd8 100644 --- a/src/ADNLPProblems/elec.jl +++ b/src/ADNLPProblems/elec.jl @@ -1,8 +1,9 @@ export elec function elec(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n n = max(2, div(n, 3)) - @adjust_nvar_warn("elec", n, 3 * n) + @adjust_nvar_warn("elec", n_orig, 3 * n) # Define the objective function to minimize function f(x; n = n) return sum( diff --git a/src/ADNLPProblems/fminsrf2.jl b/src/ADNLPProblems/fminsrf2.jl index 0d7ab3283..e33eeb3d2 100644 --- a/src/ADNLPProblems/fminsrf2.jl +++ b/src/ADNLPProblems/fminsrf2.jl @@ -2,10 +2,11 @@ export fminsrf2 function fminsrf2(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} n = max(4, n) + n_org = n p = floor(Int, sqrt(n)) n = p * p - @adjust_nvar_warn("fminsrf2", n, n) + @adjust_nvar_warn("fminsrf2", n_org, n) h00 = 1 slopej = 4 diff --git a/src/ADNLPProblems/marine.jl b/src/ADNLPProblems/marine.jl index 77b4ac442..5b01f2b1e 100644 --- a/src/ADNLPProblems/marine.jl +++ b/src/ADNLPProblems/marine.jl @@ -1,13 +1,14 @@ export marine function marine(; n::Int = default_nvar, nc::Int = 1, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n nc = max(min(nc, 4), 1) # number of collocation points ne = 8 # number of differential equations nm = 21 # number of measurements n = max(n, 3 * ne * nc + ne + 2 * ne) nh = Int(round((n - 2 * ne + 1) / (3 * ne * nc + ne))) # number of partition intervals - @adjust_nvar_warn("marine", n, 8 + 7 + nh * (8 + 3 * 8 * nc)) + @adjust_nvar_warn("marine", n_orig, 8 + 7 + nh * (8 + 3 * 8 * nc)) # roots of k-th degree Legendre polynomial rho = if nc == 1 diff --git a/src/ADNLPProblems/powellsg.jl b/src/ADNLPProblems/powellsg.jl index 50400b97f..0c6d99280 100644 --- a/src/ADNLPProblems/powellsg.jl +++ b/src/ADNLPProblems/powellsg.jl @@ -6,8 +6,9 @@ function powellsg(; use_nls::Bool = false, kwargs...) end function powellsg(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_org = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("powellsg", n, n) + @adjust_nvar_warn("powellsg", n_org, n) function f(x; n = length(x)) return sum( (x[j] + 10 * x[j + 1])^2 + @@ -24,8 +25,9 @@ function powellsg(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function powellsg(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_org = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("powellsg", n, n) + @adjust_nvar_warn("powellsg", n_org, n) function F!(r, x; n = length(x)) @inbounds for j = 1:4:n r[j] = x[j] + 10 * x[j + 1] diff --git a/src/ADNLPProblems/robotarm.jl b/src/ADNLPProblems/robotarm.jl index ebbf9176a..cd41e9cce 100644 --- a/src/ADNLPProblems/robotarm.jl +++ b/src/ADNLPProblems/robotarm.jl @@ -10,9 +10,10 @@ export robotarm # classification OOR2-AN-V-V function robotarm(; n::Int = default_nvar, L = 4.5, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n N = max(2, div(n, 9)) n = N + 1 - @adjust_nvar_warn("robotarm", n, 9 * n + 1) + @adjust_nvar_warn("robotarm", n_orig, 9 * n + 1) L = T(L) # x : vector of variables, of the form : [ρ(t=t1); ρ(t=t2); ... ρ(t=tf), θ(t=t1), ..., then ρ_dot, ..., then ρ_acc, .. ϕ_acc, tf] diff --git a/src/ADNLPProblems/spmsrtls.jl b/src/ADNLPProblems/spmsrtls.jl index fd6cd911e..cfcc190b4 100644 --- a/src/ADNLPProblems/spmsrtls.jl +++ b/src/ADNLPProblems/spmsrtls.jl @@ -6,9 +6,10 @@ function spmsrtls(; use_nls::Bool = false, kwargs...) end function spmsrtls(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_org = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 - @adjust_nvar_warn("spmsrtls", n, n) + @adjust_nvar_warn("spmsrtls", n_org, n) p = [sin(i^2) for i = 1:n] x0 = T[p[i] / 5 for i = 1:n] @@ -60,9 +61,10 @@ function spmsrtls(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, k end function spmsrtls(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_org = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 - @adjust_nvar_warn("spmsrtls", n, n) + @adjust_nvar_warn("spmsrtls", n_org, n) p = [sin(i^2) for i = 1:n] x0 = T[p[i] / 5 for i = 1:n] diff --git a/src/ADNLPProblems/srosenbr.jl b/src/ADNLPProblems/srosenbr.jl index b45dd9958..a3d42b4b9 100644 --- a/src/ADNLPProblems/srosenbr.jl +++ b/src/ADNLPProblems/srosenbr.jl @@ -1,8 +1,9 @@ export srosenbr function srosenbr(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_org = n n = 2 * max(1, div(n, 2)) - @adjust_nvar_warn("srosenbr", n, n) + @adjust_nvar_warn("srosenbr", n_org, n) function f(x; n = length(x)) return sum(100 * (x[2 * i] - x[2 * i - 1]^2)^2 + (x[2 * i - 1] - 1)^2 for i = 1:div(n, 2)) end diff --git a/src/ADNLPProblems/structural.jl b/src/ADNLPProblems/structural.jl index 811e74495..1f8742b1c 100644 --- a/src/ADNLPProblems/structural.jl +++ b/src/ADNLPProblems/structural.jl @@ -1,6 +1,7 @@ export structural function structural(args...; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_orig = n n = max(n, 100) sub2ind(shape, a, b) = LinearIndices(shape)[CartesianIndex.(a, b)] @@ -23,7 +24,7 @@ function structural(args...; n::Int = default_nvar, type::Type{T} = Float64, kwa M = Int(N * (N - 1) / 2) # number of edges - @adjust_nvar_warn("structural", n, 2 * M) + @adjust_nvar_warn("structural", n_orig, 2 * M) # EDGES: columns are the indices of the nodes at either end edges = Array{Int}(zeros(M, 2)) diff --git a/src/ADNLPProblems/watson.jl b/src/ADNLPProblems/watson.jl index 7f8586d03..ef4900795 100644 --- a/src/ADNLPProblems/watson.jl +++ b/src/ADNLPProblems/watson.jl @@ -6,8 +6,9 @@ function watson(; use_nls::Bool = false, kwargs...) end function watson(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_org = n n = min(max(n, 2), 31) - @adjust_nvar_warn("watson", n, n) + @adjust_nvar_warn("watson", n_org, n) function f(x; n = n) Ti = eltype(x) return 1 // 2 * sum( @@ -32,8 +33,9 @@ function watson(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwa end function watson(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_org = n n = min(max(n, 2), 31) - @adjust_nvar_warn("watson", n, 31) + @adjust_nvar_warn("watson", n_org, n) function F!(r, x; n = n) Ti = eltype(x) for i = 1:29 diff --git a/src/ADNLPProblems/woods.jl b/src/ADNLPProblems/woods.jl index f93ef7d01..7c7934d39 100644 --- a/src/ADNLPProblems/woods.jl +++ b/src/ADNLPProblems/woods.jl @@ -1,8 +1,9 @@ export woods function woods(; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} + n_org = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("woods", n, n) + @adjust_nvar_warn("woods", n_org, n) function f(x; n = length(x)) return sum( 100 * (x[4 * i - 2] - x[4 * i - 3]^2)^2 + diff --git a/src/PureJuMP/NZF1.jl b/src/PureJuMP/NZF1.jl index 3bc5ead82..9451f482f 100644 --- a/src/PureJuMP/NZF1.jl +++ b/src/PureJuMP/NZF1.jl @@ -7,9 +7,10 @@ export NZF1 function NZF1(args...; n::Int = default_nvar, kwargs...) + n_org = n nbis = max(2, div(n, 13)) n = 13 * nbis - @adjust_nvar_warn("NZF1", n, n) + @adjust_nvar_warn("NZF1", n_org, n) l = div(n, 13) diff --git a/src/PureJuMP/broydn7d.jl b/src/PureJuMP/broydn7d.jl index 24bc78a73..87b2da64c 100644 --- a/src/PureJuMP/broydn7d.jl +++ b/src/PureJuMP/broydn7d.jl @@ -46,9 +46,10 @@ export broydn7d "Broyden 7-diagonal model in size `n`" function broydn7d(args...; n::Int = default_nvar, p::Float64 = 7 / 3, kwargs...) + n_org = n n2 = max(1, div(n, 2)) n = 2 * n2 - @adjust_nvar_warn("broydn7d", n, n) + @adjust_nvar_warn("broydn7d", n_org, n) nlp = Model() diff --git a/src/PureJuMP/catenary.jl b/src/PureJuMP/catenary.jl index f7a9c035c..7acffa67b 100644 --- a/src/PureJuMP/catenary.jl +++ b/src/PureJuMP/catenary.jl @@ -18,8 +18,9 @@ export catenary function catenary(args...; n::Int = default_nvar, Bl = 1.0, FRACT = 0.6, kwargs...) n = 3 * max(1, div(n, 3)) + n_org = n n = max(n, 6) - @adjust_nvar_warn("catenary", n, n) + @adjust_nvar_warn("catenary", n_org, n) ## Model Parameters diff --git a/src/PureJuMP/chainwoo.jl b/src/PureJuMP/chainwoo.jl index ff90d3701..43b948c5a 100644 --- a/src/PureJuMP/chainwoo.jl +++ b/src/PureJuMP/chainwoo.jl @@ -35,8 +35,9 @@ export chainwoo "The chained Woods function in size `n`, a variant on the Woods function" function chainwoo(args...; n::Int = default_nvar, kwargs...) + n_org = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("chainwoo", n, n) + @adjust_nvar_warn("chainwoo", n_org, n) nlp = Model() diff --git a/src/PureJuMP/clplatea.jl b/src/PureJuMP/clplatea.jl index 6bd6b9411..4076cc932 100644 --- a/src/PureJuMP/clplatea.jl +++ b/src/PureJuMP/clplatea.jl @@ -26,9 +26,10 @@ export clplatea "The clamped plate problem (Strang, Nocedal, Dax)." function clplatea(args...; n::Int = default_nvar, wght::Float64 = -0.1, kwargs...) + n_org = n p = floor(Int, sqrt(n)) n = p * p - @adjust_nvar_warn("clplatea", n, n) + @adjust_nvar_warn("clplatea", n_org, n) nlp = Model() diff --git a/src/PureJuMP/clplateb.jl b/src/PureJuMP/clplateb.jl index 885048f9c..c8314729c 100644 --- a/src/PureJuMP/clplateb.jl +++ b/src/PureJuMP/clplateb.jl @@ -27,9 +27,10 @@ export clplateb "The clamped plate problem (Strang, Nocedal, Dax)." function clplateb(args...; n::Int = default_nvar, wght::Float64 = -0.1, kwargs...) + n_org = n p = floor(Int, sqrt(n)) n = p * p - @adjust_nvar_warn("clplateb", n, n) + @adjust_nvar_warn("clplateb", n_org, n) nlp = Model() diff --git a/src/PureJuMP/clplatec.jl b/src/PureJuMP/clplatec.jl index 38c963e20..1b6a70b84 100644 --- a/src/PureJuMP/clplatec.jl +++ b/src/PureJuMP/clplatec.jl @@ -33,9 +33,10 @@ function clplatec( l::Float64 = 0.01, kwargs..., ) + n_org = n p = floor(Int, sqrt(n)) n = p * p - @adjust_nvar_warn("clplatec", n, n) + @adjust_nvar_warn("clplatec", n_org, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_efgh.jl b/src/PureJuMP/dixmaan_efgh.jl index 6f7ce4795..5cea0bf79 100644 --- a/src/PureJuMP/dixmaan_efgh.jl +++ b/src/PureJuMP/dixmaan_efgh.jl @@ -33,9 +33,10 @@ function dixmaane( δ::Float64 = 0.125, kwargs..., ) + n_org = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaane", n, n) + @adjust_nvar_warn("dixmaane", n_org, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_ijkl.jl b/src/PureJuMP/dixmaan_ijkl.jl index 0152a3a49..0dcac5508 100644 --- a/src/PureJuMP/dixmaan_ijkl.jl +++ b/src/PureJuMP/dixmaan_ijkl.jl @@ -33,9 +33,10 @@ function dixmaani( δ::Float64 = 0.125, kwargs..., ) + n_org = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaani", n, n) + @adjust_nvar_warn("dixmaani", n_org, n) nlp = Model() diff --git a/src/PureJuMP/dixmaan_mnop.jl b/src/PureJuMP/dixmaan_mnop.jl index 5c4e79557..d25574bbf 100644 --- a/src/PureJuMP/dixmaan_mnop.jl +++ b/src/PureJuMP/dixmaan_mnop.jl @@ -31,9 +31,10 @@ function dixmaanm( δ::Float64 = 0.125, kwargs..., ) + n_org = n m = max(1, div(n, 3)) n = 3 * m - @adjust_nvar_warn("dixmaanm", n, n) + @adjust_nvar_warn("dixmaanm", n_org, n) nlp = Model() diff --git a/src/PureJuMP/elec.jl b/src/PureJuMP/elec.jl index fec9c01ed..032d27a05 100644 --- a/src/PureJuMP/elec.jl +++ b/src/PureJuMP/elec.jl @@ -11,8 +11,9 @@ export elec function elec(args...; n::Int = default_nvar, kwargs...) + n_orig = n n = max(2, div(n, 3)) - @adjust_nvar_warn("elec", n, 3 * n) + @adjust_nvar_warn("elec", n_orig, 3 * n) nlp = Model() diff --git a/src/PureJuMP/fminsrf2.jl b/src/PureJuMP/fminsrf2.jl index e2b485a87..b3ccc82ac 100644 --- a/src/PureJuMP/fminsrf2.jl +++ b/src/PureJuMP/fminsrf2.jl @@ -22,10 +22,11 @@ export fminsrf2 function fminsrf2(args...; n::Int = default_nvar, kwargs...) n = max(4, n) + n_org = n p = floor(Int, sqrt(n)) n = p * p - @adjust_nvar_warn("fminsrf2", n, n) + @adjust_nvar_warn("fminsrf2", n_org, n) h00 = 1.0 slopej = 4.0 diff --git a/src/PureJuMP/marine.jl b/src/PureJuMP/marine.jl index d90360d7d..c28a86b1a 100644 --- a/src/PureJuMP/marine.jl +++ b/src/PureJuMP/marine.jl @@ -20,13 +20,14 @@ export marine function marine(args...; n::Int = default_nvar, nc::Int = 1, kwargs...) nlp = Model() + n_orig = n nc = max(min(nc, 4), 1) # number of collocation points ne = 8 # number of differential equations nm = 21 # number of measurements n = max(n, 3 * ne * nc + ne + 2 * ne) nh = Int(round((n - 2 * ne + 1) / (3 * ne * nc + ne))) # number of partition intervals - @adjust_nvar_warn("marine", n, 8 + 7 + nh * (8 + 3 * 8 * nc)) + @adjust_nvar_warn("marine", n_orig, 8 + 7 + nh * (8 + 3 * 8 * nc)) # roots of k-th degree Legendre polynomial rho = if nc == 1 diff --git a/src/PureJuMP/minsurf.jl b/src/PureJuMP/minsurf.jl index bf0580044..852beaeff 100644 --- a/src/PureJuMP/minsurf.jl +++ b/src/PureJuMP/minsurf.jl @@ -16,8 +16,7 @@ function minsurf(args...; n = default_nvar, kwargs...) if !((:nx in keys(kwargs)) & (:ny in keys(kwargs))) nx, ny = Int(round(sqrt(max(1, n - 2)))), Int(round(sqrt(max(1, n - 2)))) end - n = (nx + 2) * (ny + 2) - @adjust_nvar_warn("minsurf", n, n) + @adjust_nvar_warn("minsurf", n, (nx + 2) * (ny + 2)) x_mesh = LinRange(0, 1, nx + 2) # coordinates of the mesh points x v0 = zeros(nx + 2, ny + 2) # Surface matrix initialization diff --git a/src/PureJuMP/powellsg.jl b/src/PureJuMP/powellsg.jl index 571a2b6c1..31b9dcf60 100644 --- a/src/PureJuMP/powellsg.jl +++ b/src/PureJuMP/powellsg.jl @@ -37,8 +37,9 @@ export powellsg "The extended Powell singular problem in size 'n' " function powellsg(args...; n::Int = default_nvar, kwargs...) + n_org = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("powellsg", n, n) + @adjust_nvar_warn("powellsg", n_org, n) x0 = zeros(n) x0[4 * (collect(1:div(n, 4))) .- 3] .= 3.0 diff --git a/src/PureJuMP/robotarm.jl b/src/PureJuMP/robotarm.jl index 545f8c4f2..ff1a76a8a 100644 --- a/src/PureJuMP/robotarm.jl +++ b/src/PureJuMP/robotarm.jl @@ -14,9 +14,10 @@ export robotarm function robotarm(; n::Int = default_nvar, L = 4.5, kwargs...) + n_orig = n N = max(2, div(n, 9)) n = N + 1 - @adjust_nvar_warn("robotarm", n, 9 * n + 1) + @adjust_nvar_warn("robotarm", n_orig, 9 * n + 1) nlp = Model() diff --git a/src/PureJuMP/spmsrtls.jl b/src/PureJuMP/spmsrtls.jl index a1fcb3c67..57a96a868 100644 --- a/src/PureJuMP/spmsrtls.jl +++ b/src/PureJuMP/spmsrtls.jl @@ -21,9 +21,10 @@ export spmsrtls function spmsrtls(args...; n::Int = default_nvar, kwargs...) + n_org = n m = max(Int(round((n + 2) / 3)), 34) n = m * 3 - 2 - @adjust_nvar_warn("spmsrtls", n, n) + @adjust_nvar_warn("spmsrtls", n_org, n) p = [sin(i^2) for i = 1:n] x0 = [p[i] / 5 for i = 1:n] diff --git a/src/PureJuMP/srosenbr.jl b/src/PureJuMP/srosenbr.jl index db1556147..7333bf636 100644 --- a/src/PureJuMP/srosenbr.jl +++ b/src/PureJuMP/srosenbr.jl @@ -21,8 +21,9 @@ export srosenbr "The separable extension of Rosenbrock's function 'n' " function srosenbr(args...; n::Int = default_nvar, kwargs...) + n_org = n n = 2 * max(1, div(n, 2)) - @adjust_nvar_warn("srosenbr", n, n) + @adjust_nvar_warn("srosenbr", n_org, n) x0 = ones(n) x0[2 * (collect(1:div(n, 2))) .- 1] .= -1.2 diff --git a/src/PureJuMP/steering.jl b/src/PureJuMP/steering.jl index a9d9e5c1d..fae18b753 100644 --- a/src/PureJuMP/steering.jl +++ b/src/PureJuMP/steering.jl @@ -8,7 +8,7 @@ export steering function steering(; n::Int = default_nvar, kwargs...) a = 100.0 # Magnitude of force. - @adjust_nvar_warn("steering", n, 5 * n_orig + 6) + @adjust_nvar_warn("steering", n, 5 * n + 6) # Bounds on the control u_min, u_max = -pi/2.0, pi/2.0 xs = zeros(4) diff --git a/src/PureJuMP/structural.jl b/src/PureJuMP/structural.jl index 667fefd65..474f4ff3b 100644 --- a/src/PureJuMP/structural.jl +++ b/src/PureJuMP/structural.jl @@ -6,6 +6,7 @@ export structural function structural(args...; n::Int = default_nvar, kwargs...) + n_orig = n n = max(n, 100) sub2ind(shape, a, b) = LinearIndices(shape)[CartesianIndex.(a, b)] @@ -29,7 +30,7 @@ function structural(args...; n::Int = default_nvar, kwargs...) M = Int(N * (N - 1) / 2) # number of edges - @adjust_nvar_warn("structural", n, 2 * M) + @adjust_nvar_warn("structural", n_orig, 2 * M) # EDGES: columns are the indices of the nodes at either end edges = Array{Int}(zeros(M, 2)) diff --git a/src/PureJuMP/watson.jl b/src/PureJuMP/watson.jl index 7ab786ba5..4dcca0612 100644 --- a/src/PureJuMP/watson.jl +++ b/src/PureJuMP/watson.jl @@ -17,8 +17,9 @@ export watson function watson(args...; n::Int = default_nvar, kwargs...) + n_org = n n = min(max(n, 2), 31) - @adjust_nvar_warn("watson", n, n) + @adjust_nvar_warn("watson", n_org, n) m = 31 nlp = Model() diff --git a/src/PureJuMP/woods.jl b/src/PureJuMP/woods.jl index 1cec7a722..253de4349 100644 --- a/src/PureJuMP/woods.jl +++ b/src/PureJuMP/woods.jl @@ -39,8 +39,9 @@ export woods "The extended Woods problem `n` " function woods(args...; n::Int = default_nvar, kwargs...) + n_org = n n = 4 * max(1, div(n, 4)) - @adjust_nvar_warn("woods", n, n) + @adjust_nvar_warn("woods", n_org, n) nlp = Model() From 3529f84316533ef7f57573ce0c23fce48bdc3ebd Mon Sep 17 00:00:00 2001 From: arnavk23 Date: Sun, 10 May 2026 17:11:13 +0530 Subject: [PATCH 30/31] Update catenary.jl --- src/PureJuMP/catenary.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/PureJuMP/catenary.jl b/src/PureJuMP/catenary.jl index 7acffa67b..d25daaa0c 100644 --- a/src/PureJuMP/catenary.jl +++ b/src/PureJuMP/catenary.jl @@ -17,8 +17,8 @@ export catenary function catenary(args...; n::Int = default_nvar, Bl = 1.0, FRACT = 0.6, kwargs...) - n = 3 * max(1, div(n, 3)) n_org = n + n = 3 * max(1, div(n, 3)) n = max(n, 6) @adjust_nvar_warn("catenary", n_org, n) From af173ac578935cbaa75b96a42cf613119481c4c0 Mon Sep 17 00:00:00 2001 From: Arnav Kapoor Date: Sun, 10 May 2026 21:46:34 +0530 Subject: [PATCH 31/31] Update powellsg.jl --- src/ADNLPProblems/powellsg.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ADNLPProblems/powellsg.jl b/src/ADNLPProblems/powellsg.jl index 50400b97f..a1bb3c1b4 100644 --- a/src/ADNLPProblems/powellsg.jl +++ b/src/ADNLPProblems/powellsg.jl @@ -6,7 +6,7 @@ function powellsg(; use_nls::Bool = false, kwargs...) end function powellsg(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} - n = 4 * max(1, div(n, 4)) + n = 4 * max(1, div(n, 4)) # number of variables adjusted to be a multiple of 4 @adjust_nvar_warn("powellsg", n, n) function f(x; n = length(x)) return sum(