Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add maxiters to some tests avoid timeout #249

Open
wants to merge 10 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ jobs:
strategy:
matrix:
group:
- ODEs
- All
version:
- '1'
- '1.6'
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v1
Expand Down
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ PreallocationTools = "0.2, 0.3, 0.4"
RecursiveArrayTools = "1.0, 2.0, 3"
SciMLBase = "1.69, 2"
SciMLSensitivity = "7"
julia = "1.6"
julia = "1.10"

[extras]
BlackBoxOptim = "a134a8b2-14d6-55f6-9291-3336d3ab0209"
Expand Down
18 changes: 9 additions & 9 deletions test/likelihood.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@ end
aggregate_data = convert(Array, VectorOfArray([generate_data(sol, t) for i in 1:100]))

distributions = [fit_mle(Normal, aggregate_data[i, j, :]) for i in 1:2, j in 1:200]
obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions), maxiters = 10000,
obj = build_loss_objective(prob1, Tsit5(), LogLikeLoss(t, distributions), maxiters = 1000,
verbose = false)

optprob = Optimization.OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5],
ub = [5.0, 5.0])
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3)
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 1e3)
@test result.original.archive_output.best_candidate≈[1.5, 1.0] atol=1e-1

data_distributions = [fit_mle(Normal, aggregate_data[i, j, :]) for i in 1:2, j in 1:200]
Expand All @@ -33,11 +33,11 @@ diff_distributions = [fit_mle(Normal,
for j in 2:200, i in 1:2]
obj = build_loss_objective(prob1, Tsit5(),
LogLikeLoss(t, data_distributions, diff_distributions),
Optimization.AutoForwardDiff(), maxiters = 10000,
Optimization.AutoForwardDiff(), maxiters = 1000,
verbose = false)
optprob = Optimization.OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5],
ub = [5.0, 5.0])
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3)
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 1e3)
@test result.original.archive_output.best_candidate≈[1.5, 1.0] atol=1e-1

data_distributions = [fit_mle(Normal, aggregate_data[i, j, :]) for i in 1:2, j in 1:200]
Expand All @@ -46,14 +46,14 @@ diff_distributions = [fit_mle(Normal,
for j in 2:200, i in 1:2]
obj = build_loss_objective(prob1, Tsit5(),
LogLikeLoss(t, data_distributions, diff_distributions, 0.3),
Optimization.AutoForwardDiff(), maxiters = 10000,
Optimization.AutoForwardDiff(), maxiters = 1000,
verbose = false)
optprob = Optimization.OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5],
ub = [5.0, 5.0])
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3)
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 1e3)
@test result.u≈[1.5, 1.0] atol=1e-1
using OptimizationBBO.BlackBoxOptim
result = bboptimize(obj, SearchRange = [(0.5, 5.0), (0.5, 5.0)], MaxSteps = 11e3)
result = bboptimize(obj, SearchRange = [(0.5, 5.0), (0.5, 5.0)], MaxSteps = 1e3)
@test result.archive_output.best_candidate≈[1.5, 1.0] atol=1e-1

distributions = [fit_mle(MvNormal, aggregate_data[:, j, :]) for j in 1:200]
Expand All @@ -63,9 +63,9 @@ diff_distributions = [fit_mle(MvNormal,
priors = [Truncated(Normal(1.5, 0.1), 0, 2), Truncated(Normal(1.0, 0.1), 0, 1.5)]
obj = build_loss_objective(prob1, Tsit5(),
LogLikeLoss(t, distributions, diff_distributions),
Optimization.AutoForwardDiff(), maxiters = 10000,
Optimization.AutoForwardDiff(), maxiters = 1000,
verbose = false, priors = priors)
optprob = Optimization.OptimizationProblem(obj, [2.0, 2.0], lb = [0.5, 0.5],
ub = [5.0, 5.0])
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 11e3)
result = solve(optprob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 1e3)
@test result.u≈[1.5, 1.0] atol=1e-1
2 changes: 1 addition & 1 deletion test/multiple_shooting_objective_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ ms_obj = multiple_shooting_objective(ms_prob, Tsit5(), L2Loss(t, data),
Optimization.AutoZygote();
discontinuity_weight = 1.0, abstol = 1e-12,
reltol = 1e-12)
result = bboptimize(ms_obj; SearchRange = bound, MaxSteps = 21e3)
result = bboptimize(ms_obj; SearchRange = bound, MaxSteps = 1e3)
@test result.archive_output.best_candidate[(end - 1):end]≈[1.5, 1.0] atol=2e-1

priors = [Truncated(Normal(1.5, 0.5), 0, 2), Truncated(Normal(1.0, 0.5), 0, 1.5)]
Expand Down
70 changes: 37 additions & 33 deletions test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,36 +1,40 @@
using DiffEqParamEstim, Test
using DiffEqParamEstim, Test, Optimization, BlackBoxOptim, Optim

@time @testset "Tests on ODEs" begin
include("tests_on_odes/test_problems.jl")
include("tests_on_odes/l2loss_test.jl")
include("tests_on_odes/optim_test.jl")
include("tests_on_odes/nlopt_test.jl")
include("tests_on_odes/two_stage_method_test.jl")
include("tests_on_odes/regularization_test.jl")
include("tests_on_odes/blackboxoptim_test.jl")
include("tests_on_odes/weighted_loss_test.jl")
include("tests_on_odes/l2_colloc_grad_test.jl")
#include("tests_on_odes/genetic_algorithm_test.jl") # Not updated to v0.6
end
const GROUP = get(ENV, "GROUP", "All")

@time @testset "Multiple Shooting Objective" begin
include("multiple_shooting_objective_test.jl")
end
@time @testset "Likelihood Loss" begin
include("likelihood.jl")
end
@time @testset "Out-of-place ODE Tests" begin
include("out_of_place_odes.jl")
end
@time @testset "Steady State Tests" begin
include("steady_state_tests.jl")
end
@time @testset "DAE Tests" begin
include("dae_tests.jl")
end
@time @testset "DDE Tests" begin
include("dde_tests.jl")
end
@time @testset "Test on Monte" begin
include("test_on_monte.jl")
if GROUP == "ODEs"
@time @testset "Tests on ODEs" begin
include("tests_on_odes/test_problems.jl")
include("tests_on_odes/l2loss_test.jl")
include("tests_on_odes/optim_test.jl")
include("tests_on_odes/nlopt_test.jl")
include("tests_on_odes/two_stage_method_test.jl")
include("tests_on_odes/regularization_test.jl")
include("tests_on_odes/blackboxoptim_test.jl")
include("tests_on_odes/weighted_loss_test.jl")
include("tests_on_odes/l2_colloc_grad_test.jl")
#include("tests_on_odes/genetic_algorithm_test.jl") # Not updated to v0.6
end
else
@time @testset "Multiple Shooting Objective" begin
include("multiple_shooting_objective_test.jl")
end
@time @testset "Likelihood Loss" begin
include("likelihood.jl")
end
@time @testset "Out-of-place ODE Tests" begin
include("out_of_place_odes.jl")
end
@time @testset "Steady State Tests" begin
include("steady_state_tests.jl")
end
@time @testset "DAE Tests" begin
include("dae_tests.jl")
end
@time @testset "DDE Tests" begin
include("dde_tests.jl")
end
@time @testset "Test on Monte" begin
include("test_on_monte.jl")
end
end
10 changes: 5 additions & 5 deletions test/tests_on_odes/blackboxoptim_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,19 @@ using BlackBoxOptim

println("Use BlackBoxOptim to fit the parameter")
cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
bound1 = Tuple{Float64, Float64}[(1, 2)]
result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 11e3)
result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 1e3)
@test result.archive_output.best_candidate[1]≈1.5 atol=3e-1

cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
bound2 = Tuple{Float64, Float64}[(1, 2), (2, 4)]
result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 11e3)
result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 1e3)
@test result.archive_output.best_candidate≈[1.5; 3.0] atol=3e-1

cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (2, 4), (0, 2)]
result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3)
@test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=5e-1
6 changes: 3 additions & 3 deletions test/tests_on_odes/genetic_algorithm_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ println("Use Genetic Algorithm to fit the parameter")
# Floating number specifies fraction of population.

cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
N = 1
result, fitness, cnt = ga(cost_function, N;
initPopulation = Float64[1.2],
Expand All @@ -26,7 +26,7 @@ result, fitness, cnt = ga(cost_function, N;
@test result[1]≈1.5 atol=3e-1

cost_function = build_loss_objective(prob2, Tsit5(), L2Loss(t, data),
maxiters = 10000)
maxiters = 1000)
N = 2
result, fitness, cnt = ga(cost_function, N;
initPopulation = Float64[1.2, 2.8],
Expand All @@ -38,7 +38,7 @@ result, fitness, cnt = ga(cost_function, N;
@test result≈[1.5; 3.0] atol=3e-1

cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data),
maxiters = 10000)
maxiters = 1000)
N = 4
result, fitness, cnt = ga(cost_function, N;
initPopulation = Float64[1.3, 0.8, 2.8, 1.2],
Expand Down
8 changes: 4 additions & 4 deletions test/tests_on_odes/l2_colloc_grad_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,30 +2,30 @@ weight = 1.0e-6

cost_function = build_loss_objective(prob1, Tsit5(),
L2Loss(t, data, colloc_grad = colloc_grad(t, data)),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
result = Optim.optimize(cost_function, 1.0, 2.0)
@test result.minimizer≈1.5 atol=3e-1

cost_function = build_loss_objective(prob2, Tsit5(),
L2Loss(t, data,
differ_weight = weight, data_weight = weight,
colloc_grad = colloc_grad(t, data)),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
result = Optim.optimize(cost_function, [1.3, 2.8], Optim.BFGS())
@test result.minimizer≈[1.5; 3.0] atol=3e-1

cost_function = build_loss_objective(prob3, Tsit5(),
L2Loss(t, data,
differ_weight = weight,
colloc_grad = colloc_grad(t, data)),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
result = Optim.optimize(cost_function, [1.4, 0.9, 2.9, 1.2], Optim.BFGS())
@test result.minimizer≈[1.5, 1.0, 3.0, 1.0] atol=3e-1

cost_function = build_loss_objective(prob1, Tsit5(),
L2Loss(t, data,
data_weight = weight,
colloc_grad = colloc_grad(t, data)),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
result = Optim.optimize(cost_function, 1.0, 2)
@test result.minimizer≈1.5 atol=3e-1
8 changes: 4 additions & 4 deletions test/tests_on_odes/l2loss_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,27 +3,27 @@ using BlackBoxOptim, Optim
cost_function = build_loss_objective(prob1, Tsit5(), L2Loss(t, data),
maxiters = 10000, verbose = false)
bound1 = Tuple{Float64, Float64}[(1, 2)]
result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 11e3)
result = bboptimize(cost_function; SearchRange = bound1, MaxSteps = 1e3)
@test result.archive_output.best_candidate[1]≈1.5 atol=3e-1

cost_function = build_loss_objective(prob2, Tsit5(),
L2Loss(t, data, differ_weight = nothing,
data_weight = 1.0),
maxiters = 10000, verbose = false)
bound2 = Tuple{Float64, Float64}[(1, 2), (1, 4)]
result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 11e3)
result = bboptimize(cost_function; SearchRange = bound2, MaxSteps = 1e3)
@test result.archive_output.best_candidate≈[1.5; 3.0] atol=3e-1

cost_function = build_loss_objective(prob3, Tsit5(), L2Loss(t, data, differ_weight = 10),
maxiters = 10000, verbose = false)
bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (2, 4), (0, 2)]
result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3)
result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 1e3)
@test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=5e-1

cost_function = build_loss_objective(prob3, Tsit5(),
L2Loss(t, data, differ_weight = 0.3,
data_weight = 0.7),
maxiters = 10000, verbose = false)
bound3 = Tuple{Float64, Float64}[(1, 2), (0, 2), (1, 4), (0, 2)]
result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 11e3)
result = bboptimize(cost_function; SearchRange = bound3, MaxSteps = 1e3)
@test result.archive_output.best_candidate≈[1.5; 1.0; 3.0; 1.0] atol=5e-1
10 changes: 5 additions & 5 deletions test/tests_on_odes/nlopt_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ using OptimizationNLopt, Zygote
println("Use NLOpt to fit the parameter")

obj = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), Optimization.AutoZygote(),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)

opt = Opt(:LN_COBYLA, 1)
optprob = OptimizationNLopt.OptimizationProblem(obj, [1.4])
Expand All @@ -14,25 +14,25 @@ opt = Opt(:GN_ESCH, 1)
lower_bounds!(opt, [1.0])
upper_bounds!(opt, [3.0])
xtol_rel!(opt, 1e-3)
maxeval!(opt, 10000)
maxeval!(opt, 1000)
res = solve(optprob, opt)
@test res.u[1]≈1.5 atol=1e-1

opt = Opt(:GN_ISRES, 1)
lower_bounds!(opt, [1.0])
upper_bounds!(opt, [3.0])
xtol_rel!(opt, 1e-4)
maxeval!(opt, 100 - 000)
maxeval!(opt, 1000)
res = solve(optprob, opt)
@test res.u[1]≈1.5 atol=1e-1

# test differentiation

obj = build_loss_objective(prob1, Tsit5(), L2Loss(t, data), Optimization.AutoForwardDiff();
maxiters = 10000) #zygote behaves weirdly here
maxiters = 1000) #zygote behaves weirdly here
opt = Opt(:LD_MMA, 1)
xtol_rel!(opt, 1e-3)
maxeval!(opt, 10000)
maxeval!(opt, 1000)
optprob = OptimizationNLopt.OptimizationProblem(obj, [1.3])
res = solve(optprob, opt)
@test res.u[1]≈1.5 atol=1e-1
2 changes: 1 addition & 1 deletion test/tests_on_odes/optim_test.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
using Optim, Random
obj = build_loss_objective(prob1, Tsit5(), L2Loss(t, data),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)

### Optim Method

Expand Down
6 changes: 3 additions & 3 deletions test/tests_on_odes/regularization_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,22 @@ using PenaltyFunctions, OptimizationOptimJL, LinearAlgebra, SciMLSensitivity

cost_function_1 = build_loss_objective(prob1, Tsit5(), L2Loss(t, data),
Optimization.AutoZygote(),
Regularization(0.6, L2Penalty()), maxiters = 10000,
Regularization(0.6, L2Penalty()), maxiters = 1000,
verbose = false, abstol = 1e-8, reltol = 1e-8)
cost_function_2 = build_loss_objective(prob2, Tsit5(), L2Loss(t, data),
Optimization.AutoZygote(),
Regularization(0.1,
MahalanobisPenalty(Matrix(1.0I, 2, 2))),
verbose = false,
abstol = 1e-8, reltol = 1e-8,
maxiters = 10000)
maxiters = 1000)
cost_function_3 = build_loss_objective(prob3, Tsit5(), L2Loss(t, data),
Optimization.AutoZygote(),
Regularization(0.1,
MahalanobisPenalty(Matrix(1.0I, 4, 4))),
verbose = false,
abstol = 1e-8, reltol = 1e-8,
maxiters = 10000)
maxiters = 1000)

println("Use Optim BFGS to fit the parameter")
optprob = Optimization.OptimizationProblem(cost_function_1, [1.0])
Expand Down
2 changes: 1 addition & 1 deletion test/tests_on_odes/weighted_loss_test.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ weighted_data = original_solution_matrix_form + error
weighted_cost_function = build_loss_objective(prob1, Tsit5(),
L2Loss(t, weighted_data,
data_weight = weight),
maxiters = 10000, verbose = false)
maxiters = 1000, verbose = false)
opt = Opt(:LN_COBYLA, 1)
min_objective!(opt, weighted_cost_function)
(minf, minx, ret) = NLopt.optimize(opt, [1.3])
Expand Down
Loading