diff --git a/Project.toml b/Project.toml index e6914e024..9c18f8c7e 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ModelPredictiveControl" uuid = "61f9bdb8-6ae4-484a-811f-bbf86720c31c" authors = ["Francis Gagnon"] -version = "1.9.1" +version = "1.9.2" [deps] ControlSystemsBase = "aaaaaaaa-a6ca-5380-bf3e-84a91bcd477e" diff --git a/src/controller/construct.jl b/src/controller/construct.jl index fd977b602..46db1d19f 100644 --- a/src/controller/construct.jl +++ b/src/controller/construct.jl @@ -221,15 +221,20 @@ constraints are all soft by default. See Extended Help for time-varying constrai julia> mpc = LinMPC(setop!(LinModel(tf(3, [30, 1]), 4), uop=[50], yop=[25])); julia> mpc = setconstraint!(mpc, umin=[0], umax=[100], Δumin=[-10], Δumax=[+10]) -LinMPC controller with a sample time Ts = 4.0 s, OSQP optimizer, SingleShooting transcription, SteadyKalmanFilter estimator and: - 10 prediction steps Hp - 2 control steps Hc - 1 slack variable ϵ (control constraints) - 1 manipulated inputs u (0 integrating states) - 2 estimated states x̂ - 1 measured outputs ym (1 integrating states) - 0 unmeasured outputs yu - 0 measured disturbances d +LinMPC controller with a sample time Ts = 4.0 s: +├ estimator: SteadyKalmanFilter +├ model: LinModel +├ optimizer: OSQP +├ transcription: SingleShooting +└ dimensions: + ├ 10 prediction steps Hp + ├ 2 control steps Hc + ├ 1 slack variable ϵ (control constraints) + ├ 1 manipulated inputs u (0 integrating states) + ├ 2 estimated states x̂ + ├ 1 measured outputs ym (1 integrating states) + ├ 0 unmeasured outputs yu + └ 0 measured disturbances d ``` # Extended Help diff --git a/src/controller/explicitmpc.jl b/src/controller/explicitmpc.jl index bf07765d1..c5eaa2722 100644 --- a/src/controller/explicitmpc.jl +++ b/src/controller/explicitmpc.jl @@ -117,14 +117,15 @@ arguments. julia> model = LinModel([tf(3, [30, 1]); tf(-2, [5, 1])], 4); julia> mpc = ExplicitMPC(model, Mwt=[0, 1], Nwt=[0.5], Hp=30, Hc=1) -ExplicitMPC controller with a sample time Ts = 4.0 s, SteadyKalmanFilter estimator and: - 30 prediction steps Hp - 1 control steps Hc - 1 manipulated inputs u (0 integrating states) - 4 estimated states x̂ - 2 measured outputs ym (2 integrating states) - 0 unmeasured outputs yu - 0 measured disturbances d +ExplicitMPC controller with a sample time Ts = 4.0 s: +├ estimator: SteadyKalmanFilter +├ model: LinModel +└ dimensions: + ├ 1 manipulated inputs u (0 integrating states) + ├ 4 estimated states x̂ + ├ 2 measured outputs ym (2 integrating states) + ├ 0 unmeasured outputs yu + └ 0 measured disturbances d ``` """ @@ -177,15 +178,15 @@ end setconstraint!(::ExplicitMPC; kwargs...) = error("ExplicitMPC does not support constraints.") function Base.show(io::IO, mpc::ExplicitMPC) - Hp, Hc = mpc.Hp, mpc.Hc - nu, nd = mpc.estim.model.nu, mpc.estim.model.nd - nx̂, nym, nyu = mpc.estim.nx̂, mpc.estim.nym, mpc.estim.nyu + estim, model = mpc.estim, mpc.estim.model + Hp, Hc, nϵ = mpc.Hp, mpc.Hc, mpc.nϵ + nu, nd = model.nu, model.nd + nx̂, nym, nyu = estim.nx̂, estim.nym, estim.nyu n = maximum(ndigits.((Hp, Hc, nu, nx̂, nym, nyu, nd))) + 1 - println(io, "$(nameof(typeof(mpc))) controller with a sample time Ts = "* - "$(mpc.estim.model.Ts) s, "* - "$(nameof(typeof(mpc.estim))) estimator and:") - println(io, "$(lpad(Hp, n)) prediction steps Hp") - println(io, "$(lpad(Hc, n)) control steps Hc") + println(io, "$(nameof(typeof(mpc))) controller with a sample time Ts = $(model.Ts) s:") + println(io, "├ estimator: $(nameof(typeof(mpc.estim)))") + println(io, "├ model: $(nameof(typeof(model)))") + println(io, "└ dimensions:") print_estim_dim(io, mpc.estim, n) end diff --git a/src/controller/linmpc.jl b/src/controller/linmpc.jl index 104080ce1..6eec082a3 100644 --- a/src/controller/linmpc.jl +++ b/src/controller/linmpc.jl @@ -168,15 +168,20 @@ arguments. This controller allocates memory at each time step for the optimizati julia> model = LinModel([tf(3, [30, 1]); tf(-2, [5, 1])], 4); julia> mpc = LinMPC(model, Mwt=[0, 1], Nwt=[0.5], Hp=30, Hc=1) -LinMPC controller with a sample time Ts = 4.0 s, OSQP optimizer, SingleShooting transcription, SteadyKalmanFilter estimator and: - 30 prediction steps Hp - 1 control steps Hc - 1 slack variable ϵ (control constraints) - 1 manipulated inputs u (0 integrating states) - 4 estimated states x̂ - 2 measured outputs ym (2 integrating states) - 0 unmeasured outputs yu - 0 measured disturbances d +LinMPC controller with a sample time Ts = 4.0 s: +├ estimator: SteadyKalmanFilter +├ model: LinModel +├ optimizer: OSQP +├ transcription: SingleShooting +└ dimensions: + ├ 30 prediction steps Hp + ├ 1 control steps Hc + ├ 1 slack variable ϵ (control constraints) + ├ 1 manipulated inputs u (0 integrating states) + ├ 4 estimated states x̂ + ├ 2 measured outputs ym (2 integrating states) + ├ 0 unmeasured outputs yu + └ 0 measured disturbances d ``` # Extended Help @@ -238,15 +243,20 @@ Use custom state estimator `estim` to construct `LinMPC`. julia> estim = KalmanFilter(LinModel([tf(3, [30, 1]); tf(-2, [5, 1])], 4), i_ym=[2]); julia> mpc = LinMPC(estim, Mwt=[0, 1], Nwt=[0.5], Hp=30, Hc=1) -LinMPC controller with a sample time Ts = 4.0 s, OSQP optimizer, SingleShooting transcription, KalmanFilter estimator and: - 30 prediction steps Hp - 1 control steps Hc - 1 slack variable ϵ (control constraints) - 1 manipulated inputs u (0 integrating states) - 3 estimated states x̂ - 1 measured outputs ym (1 integrating states) - 1 unmeasured outputs yu - 0 measured disturbances d +LinMPC controller with a sample time Ts = 4.0 s: +├ estimator: KalmanFilter +├ model: LinModel +├ optimizer: OSQP +├ transcription: SingleShooting +└ dimensions: + ├ 30 prediction steps Hp + ├ 1 control steps Hc + ├ 1 slack variable ϵ (control constraints) + ├ 1 manipulated inputs u (0 integrating states) + ├ 3 estimated states x̂ + ├ 1 measured outputs ym (1 integrating states) + ├ 1 unmeasured outputs yu + └ 0 measured disturbances d ``` """ function LinMPC( diff --git a/src/controller/nonlinmpc.jl b/src/controller/nonlinmpc.jl index 76dad045a..bf8b15976 100644 --- a/src/controller/nonlinmpc.jl +++ b/src/controller/nonlinmpc.jl @@ -223,16 +223,23 @@ This controller allocates memory at each time step for the optimization. ```jldoctest julia> model = NonLinModel((x,u,_,_)->0.5x+u, (x,_,_)->2x, 10.0, 1, 1, 1, solver=nothing); -julia> mpc = NonLinMPC(model, Hp=20, Hc=1, Cwt=1e6) -NonLinMPC controller with a sample time Ts = 10.0 s, Ipopt optimizer, SingleShooting transcription, UnscentedKalmanFilter estimator and: - 20 prediction steps Hp - 1 control steps Hc - 1 slack variable ϵ (control constraints) - 1 manipulated inputs u (0 integrating states) - 2 estimated states x̂ - 1 measured outputs ym (1 integrating states) - 0 unmeasured outputs yu - 0 measured disturbances d +julia> mpc = NonLinMPC(model, Hp=20, Hc=10, transcription=MultipleShooting()) +NonLinMPC controller with a sample time Ts = 10.0 s: +├ estimator: UnscentedKalmanFilter +├ model: NonLinModel +├ optimizer: Ipopt +├ transcription: MultipleShooting +├ gradient: AutoForwardDiff +├ jacobian: AutoSparse (AutoForwardDiff, TracerSparsityDetector, GreedyColoringAlgorithm) +└ dimensions: + ├ 20 prediction steps Hp + ├ 10 control steps Hc + ├ 1 slack variable ϵ (control constraints) + ├ 1 manipulated inputs u (0 integrating states) + ├ 2 estimated states x̂ + ├ 1 measured outputs ym (1 integrating states) + ├ 0 unmeasured outputs yu + └ 0 measured disturbances d ``` # Extended Help @@ -327,16 +334,23 @@ julia> model = NonLinModel((x,u,_,_)->0.5x+u, (x,_,_)->2x, 10.0, 1, 1, 1, solver julia> estim = UnscentedKalmanFilter(model, σQint_ym=[0.05]); -julia> mpc = NonLinMPC(estim, Hp=20, Hc=1, Cwt=1e6) -NonLinMPC controller with a sample time Ts = 10.0 s, Ipopt optimizer, SingleShooting transcription, UnscentedKalmanFilter estimator and: - 20 prediction steps Hp - 1 control steps Hc - 1 slack variable ϵ (control constraints) - 1 manipulated inputs u (0 integrating states) - 2 estimated states x̂ - 1 measured outputs ym (1 integrating states) - 0 unmeasured outputs yu - 0 measured disturbances d +julia> mpc = NonLinMPC(estim, Hp=20, Cwt=1e6) +NonLinMPC controller with a sample time Ts = 10.0 s: +├ estimator: UnscentedKalmanFilter +├ model: NonLinModel +├ optimizer: Ipopt +├ transcription: SingleShooting +├ gradient: AutoForwardDiff +├ jacobian: AutoForwardDiff +└ dimensions: + ├ 20 prediction steps Hp + ├ 2 control steps Hc + ├ 1 slack variable ϵ (control constraints) + ├ 1 manipulated inputs u (0 integrating states) + ├ 2 estimated states x̂ + ├ 1 measured outputs ym (1 integrating states) + ├ 0 unmeasured outputs yu + └ 0 measured disturbances d ``` """ function NonLinMPC( @@ -738,8 +752,14 @@ end "Evaluate the economic term `E*JE` of the objective function for [`NonLinMPC`](@ref)." function obj_econ( - mpc::NonLinMPC, model::SimModel, Ue, Ŷe::AbstractVector{NT} + mpc::NonLinMPC, ::SimModel, Ue, Ŷe::AbstractVector{NT} ) where NT<:Real E_JE = mpc.weights.iszero_E ? zero(NT) : mpc.weights.E*mpc.JE(Ue, Ŷe, mpc.D̂e, mpc.p) return E_JE -end \ No newline at end of file +end + +"Print the differentiation backends of a [`NonLinMPC`](@ref) controller." +function print_backends(io::IO, mpc::NonLinMPC) + println(io, "├ gradient: $(backend_str(mpc.gradient))") + println(io, "├ jacobian: $(backend_str(mpc.jacobian))") +end diff --git a/src/estimator/internal_model.jl b/src/estimator/internal_model.jl index c5963f2d2..64111944b 100644 --- a/src/estimator/internal_model.jl +++ b/src/estimator/internal_model.jl @@ -80,12 +80,14 @@ estimator is allocation-free if `model` simulations do not allocate. # Examples ```jldoctest julia> estim = InternalModel(LinModel([tf(3, [30, 1]); tf(-2, [5, 1])], 0.5), i_ym=[2]) -InternalModel estimator with a sample time Ts = 0.5 s, LinModel and: - 1 manipulated inputs u - 2 estimated states x̂ - 1 measured outputs ym - 1 unmeasured outputs yu - 0 measured disturbances d +InternalModel estimator with a sample time Ts = 0.5 s: +├ model: LinModel +└ dimensions: + ├ 1 manipulated inputs u + ├ 2 estimated states x̂ + ├ 1 measured outputs ym + ├ 1 unmeasured outputs yu + └ 0 measured disturbances d ``` # Extended Help @@ -355,10 +357,10 @@ end function print_estim_dim(io::IO, estim::InternalModel, n) nu, nd = estim.model.nu, estim.model.nd nx̂, nym, nyu = estim.nx̂, estim.nym, estim.nyu - println(io, "$(lpad(nu, n)) manipulated inputs u") - println(io, "$(lpad(nx̂, n)) estimated states x̂") - println(io, "$(lpad(nym, n)) measured outputs ym") - println(io, "$(lpad(nyu, n)) unmeasured outputs yu") - print(io, "$(lpad(nd, n)) measured disturbances d") + println(io, " ├$(lpad(nu, n)) manipulated inputs u") + println(io, " ├$(lpad(nx̂, n)) estimated states x̂") + println(io, " ├$(lpad(nym, n)) measured outputs ym") + println(io, " ├$(lpad(nyu, n)) unmeasured outputs yu") + print(io, " └$(lpad(nd, n)) measured disturbances d") end diff --git a/src/estimator/kalman.jl b/src/estimator/kalman.jl index cb8e18af3..b7b04315b 100644 --- a/src/estimator/kalman.jl +++ b/src/estimator/kalman.jl @@ -130,12 +130,14 @@ state of the next time step ``\mathbf{x̂}_k(k+1)``. This estimator is allocatio julia> model = LinModel([tf(3, [30, 1]); tf(-2, [5, 1])], 0.5); julia> estim = SteadyKalmanFilter(model, i_ym=[2], σR=[1], σQint_ym=[0.01]) -SteadyKalmanFilter estimator with a sample time Ts = 0.5 s, LinModel and: - 1 manipulated inputs u (0 integrating states) - 3 estimated states x̂ - 1 measured outputs ym (1 integrating states) - 1 unmeasured outputs yu - 0 measured disturbances d +SteadyKalmanFilter estimator with a sample time Ts = 0.5 s: +├ model: LinModel +└ dimensions: + ├ 1 manipulated inputs u (0 integrating states) + ├ 3 estimated states x̂ + ├ 1 measured outputs ym (1 integrating states) + ├ 1 unmeasured outputs yu + └ 0 measured disturbances d ``` # Extended Help @@ -395,12 +397,14 @@ This estimator is allocation-free. julia> model = LinModel([tf(3, [30, 1]); tf(-2, [5, 1])], 0.5); julia> estim = KalmanFilter(model, i_ym=[2], σR=[1], σP_0=[100, 100], σQint_ym=[0.01]) -KalmanFilter estimator with a sample time Ts = 0.5 s, LinModel and: - 1 manipulated inputs u (0 integrating states) - 3 estimated states x̂ - 1 measured outputs ym (1 integrating states) - 1 unmeasured outputs yu - 0 measured disturbances d +KalmanFilter estimator with a sample time Ts = 0.5 s: +├ model: LinModel +└ dimensions: + ├ 1 manipulated inputs u (0 integrating states) + ├ 3 estimated states x̂ + ├ 1 measured outputs ym (1 integrating states) + ├ 1 unmeasured outputs yu + └ 0 measured disturbances d ``` """ function KalmanFilter( @@ -639,12 +643,14 @@ This estimator is allocation-free if `model` simulations do not allocate. julia> model = NonLinModel((x,u,_,_)->0.1x+u, (x,_,_)->2x, 10.0, 1, 1, 1, solver=nothing); julia> estim = UnscentedKalmanFilter(model, σR=[1], nint_ym=[2], σPint_ym_0=[1, 1]) -UnscentedKalmanFilter estimator with a sample time Ts = 10.0 s, NonLinModel and: - 1 manipulated inputs u (0 integrating states) - 3 estimated states x̂ - 1 measured outputs ym (2 integrating states) - 0 unmeasured outputs yu - 0 measured disturbances d +UnscentedKalmanFilter estimator with a sample time Ts = 10.0 s: +├ model: NonLinModel +└ dimensions: + ├ 1 manipulated inputs u (0 integrating states) + ├ 3 estimated states x̂ + ├ 1 measured outputs ym (2 integrating states) + ├ 0 unmeasured outputs yu + └ 0 measured disturbances d ``` # Extended Help @@ -1002,12 +1008,15 @@ differentiation. This estimator is allocation-free if `model` simulations do not julia> model = NonLinModel((x,u,_,_)->0.2x+u, (x,_,_)->-3x, 5.0, 1, 1, 1, solver=nothing); julia> estim = ExtendedKalmanFilter(model, σQ=[2], σQint_ym=[2], σP_0=[0.1], σPint_ym_0=[0.1]) -ExtendedKalmanFilter estimator with a sample time Ts = 5.0 s, NonLinModel and: - 1 manipulated inputs u (0 integrating states) - 2 estimated states x̂ - 1 measured outputs ym (1 integrating states) - 0 unmeasured outputs yu - 0 measured disturbances d +ExtendedKalmanFilter estimator with a sample time Ts = 5.0 s: +├ model: NonLinModel +├ jacobian: AutoForwardDiff +└ dimensions: + ├ 1 manipulated inputs u (0 integrating states) + ├ 2 estimated states x̂ + ├ 1 measured outputs ym (1 integrating states) + ├ 0 unmeasured outputs yu + └ 0 measured disturbances d ``` """ function ExtendedKalmanFilter( @@ -1169,6 +1178,10 @@ function update_estimate!(estim::ExtendedKalmanFilter{NT}, y0m, d0, u0) where NT return predict_estimate_kf!(estim, u0, d0, F̂) end +function print_details(io::IO, estim::ExtendedKalmanFilter) + println(io, "├ jacobian: $(backend_str(estim.jacobian))") +end + "Set `estim.cov.P̂` to `estim.cov.P̂_0` for the time-varying Kalman Filters." function init_estimate_cov!( estim::Union{KalmanFilter, UnscentedKalmanFilter, ExtendedKalmanFilter}, _ , _ , _ diff --git a/src/estimator/luenberger.jl b/src/estimator/luenberger.jl index c63e7f4a1..d07038f78 100644 --- a/src/estimator/luenberger.jl +++ b/src/estimator/luenberger.jl @@ -83,12 +83,14 @@ is allocation-free. julia> model = LinModel([tf(3, [30, 1]); tf(-2, [5, 1])], 0.5); julia> estim = Luenberger(model, nint_ym=[1, 1], poles=[0.61, 0.62, 0.63, 0.64]) -Luenberger estimator with a sample time Ts = 0.5 s, LinModel and: - 1 manipulated inputs u (0 integrating states) - 4 estimated states x̂ - 2 measured outputs ym (2 integrating states) - 0 unmeasured outputs yu - 0 measured disturbances d +Luenberger estimator with a sample time Ts = 0.5 s: +├ model: LinModel +└ dimensions: + ├ 1 manipulated inputs u (0 integrating states) + ├ 4 estimated states x̂ + ├ 2 measured outputs ym (2 integrating states) + ├ 0 unmeasured outputs yu + └ 0 measured disturbances d ``` """ function Luenberger( diff --git a/src/estimator/manual.jl b/src/estimator/manual.jl index bc848385d..c18b11838 100644 --- a/src/estimator/manual.jl +++ b/src/estimator/manual.jl @@ -77,12 +77,14 @@ examples. julia> model = LinModel([tf(3, [30, 1]); tf(-2, [5, 1])], 0.5); julia> estim = ManualEstimator(model, nint_ym=0) # disable augmentation with integrators -ManualEstimator estimator with a sample time Ts = 0.5 s, LinModel and: - 1 manipulated inputs u (0 integrating states) - 2 estimated states x̂ - 2 measured outputs ym (0 integrating states) - 0 unmeasured outputs yu - 0 measured disturbances d +ManualEstimator estimator with a sample time Ts = 0.5 s: +├ model: LinModel +└ dimensions: + ├ 1 manipulated inputs u (0 integrating states) + ├ 2 estimated states x̂ + ├ 2 measured outputs ym (0 integrating states) + ├ 0 unmeasured outputs yu + └ 0 measured disturbances d ``` # Extended Help diff --git a/src/estimator/mhe.jl b/src/estimator/mhe.jl index 6123ce81a..3ae49ea2a 100644 --- a/src/estimator/mhe.jl +++ b/src/estimator/mhe.jl @@ -1,26 +1,31 @@ include("mhe/construct.jl") include("mhe/execute.jl") -function Base.show(io::IO, estim::MovingHorizonEstimator) - nu, nd = estim.model.nu, estim.model.nd - nx̂, nym, nyu = estim.nx̂, estim.nym, estim.nyu - n = maximum(ndigits.((nu, nx̂, nym, nyu, nd))) + 1 - println(io, "$(nameof(typeof(estim))) estimator with a sample time "* - "Ts = $(estim.model.Ts) s, $(JuMP.solver_name(estim.optim)) optimizer, "* - "$(nameof(typeof(estim.model))) and:") - print_estim_dim(io, estim, n) +"Print optimizer and other information for `MovingHorizonEstimator`." +function print_details(io::IO, estim::MovingHorizonEstimator) + println(io, "├ optimizer: $(JuMP.solver_name(estim.optim)) ") + print_backends(io, estim, estim.model) +end + +"Print the differentiation backends for `SimModel`." +function print_backends(io::IO, estim::MovingHorizonEstimator, ::SimModel) + println(io, "├ gradient: $(backend_str(estim.gradient))") + println(io, "├ jacobian: $(backend_str(estim.jacobian))") end +"No differentiation backends to print for `LinModel`." +print_backends(::IO, ::MovingHorizonEstimator, ::LinModel) = nothing "Print the overall dimensions of the MHE `estim` with left padding `n`." function print_estim_dim(io::IO, estim::MovingHorizonEstimator, n) nu, nd = estim.model.nu, estim.model.nd nx̂, nym, nyu = estim.nx̂, estim.nym, estim.nyu - He, nϵ = estim.He, estim.nϵ - println(io, "$(lpad(He, n)) estimation steps He") - println(io, "$(lpad(nϵ, n)) slack variable ϵ (estimation constraints)") - println(io, "$(lpad(nu, n)) manipulated inputs u ($(sum(estim.nint_u)) integrating states)") - println(io, "$(lpad(nx̂, n)) estimated states x̂") - println(io, "$(lpad(nym, n)) measured outputs ym ($(sum(estim.nint_ym)) integrating states)") - println(io, "$(lpad(nyu, n)) unmeasured outputs yu") - print(io, "$(lpad(nd, n)) measured disturbances d") + He, nε = estim.He, estim.nε + niu, niym = sum(estim.nint_u), sum(estim.nint_ym) + println(io, " ├$(lpad(He, n)) estimation steps He") + println(io, " ├$(lpad(nε, n)) slack variable ε (estimation constraints)") + println(io, " ├$(lpad(nu, n)) manipulated inputs u ($niu integrating states)") + println(io, " ├$(lpad(nx̂, n)) estimated states x̂") + println(io, " ├$(lpad(nym, n)) measured outputs ym ($niym integrating states)") + println(io, " ├$(lpad(nyu, n)) unmeasured outputs yu") + print(io, " └$(lpad(nd, n)) measured disturbances d") end \ No newline at end of file diff --git a/src/estimator/mhe/construct.jl b/src/estimator/mhe/construct.jl index 4a3c18730..c5e9fd5c8 100644 --- a/src/estimator/mhe/construct.jl +++ b/src/estimator/mhe/construct.jl @@ -10,7 +10,7 @@ The bounds on the estimated state at arrival ``\mathbf{x̂}_k(k-N_k+1)`` is sepa the other state constraints ``\mathbf{x̂}_k(k-N_k+2), \mathbf{x̂}_k(k-N_k+3), ...`` since the former is always a linear inequality constraint (it's a decision variable). The fields `x̃min` and `x̃max` refer to the bounds at the arrival (augmented with the slack variable -ϵ), and `X̂min` and `X̂max`, the others. +ε), and `X̂min` and `X̂max`, the others. """ struct EstimatorConstraint{NT<:Real} Ẽx̂ ::Matrix{NT} @@ -68,7 +68,7 @@ struct MovingHorizonEstimator{ f̂op::Vector{NT} x̂0 ::Vector{NT} He::Int - nϵ::Int + nε::Int i_ym::Vector{Int} nx̂ ::Int nym::Int @@ -140,7 +140,7 @@ struct MovingHorizonEstimator{ ) # dummy values (updated just before optimization): F, fx̄, Fx̂ = zeros(NT, nym*He), zeros(NT, nx̂), zeros(NT, nx̂*He) - con, nϵ, Ẽ, ẽx̄ = init_defaultcon_mhe( + con, nε, Ẽ, ẽx̄ = init_defaultcon_mhe( model, He, Cwt, nx̂, nym, E, ex̄, Ex̂, Fx̂, Gx̂, Jx̂, Bx̂ ) nZ̃ = size(Ẽ, 2) @@ -164,7 +164,7 @@ struct MovingHorizonEstimator{ gradient, jacobian, covestim, Z̃, lastu0, x̂op, f̂op, x̂0, - He, nϵ, + He, nε, i_ym, nx̂, nym, nyu, nxs, As, Cs_u, Cs_y, nint_u, nint_ym, Â, B̂u, Ĉ, B̂d, D̂d, Ĉm, D̂dm, @@ -192,10 +192,10 @@ distribution is not approximated like the [`UnscentedKalmanFilter`](@ref). The c costs are drastically higher, however, since it minimizes the following objective function at each discrete time ``k``: ```math -\min_{\mathbf{x̂}_k(k-N_k+p), \mathbf{Ŵ}, ϵ} \mathbf{x̄}' \mathbf{P̄}^{-1} \mathbf{x̄} +\min_{\mathbf{x̂}_k(k-N_k+p), \mathbf{Ŵ}, ε} \mathbf{x̄}' \mathbf{P̄}^{-1} \mathbf{x̄} + \mathbf{Ŵ}' \mathbf{Q̂}_{N_k}^{-1} \mathbf{Ŵ} + \mathbf{V̂}' \mathbf{R̂}_{N_k}^{-1} \mathbf{V̂} - + C ϵ^2 + + C ε^2 ``` in which the arrival costs are evaluated from the states estimated at time ``k-N_k``: ```math @@ -220,7 +220,7 @@ N_k = \begin{cases} The vectors ``\mathbf{Ŵ}`` and ``\mathbf{V̂}`` respectively encompass the estimated process noises ``\mathbf{ŵ}(k-j+p)`` from ``j=N_k`` to ``1`` and sensor noises ``\mathbf{v̂}(k-j+1)`` from ``j=N_k`` to ``1``. The Extended Help defines the two vectors, the slack variable -``ϵ``, and the estimation of the covariance at arrival ``\mathbf{P̂}_{k-N_k}(k-N_k+p)``. If +``ε``, and the estimation of the covariance at arrival ``\mathbf{P̂}_{k-N_k}(k-N_k+p)``. If the keyword argument `direct=true` (default value), the constant ``p=0`` in the equations above, and the MHE is in the current form. Else ``p=1``, leading to the prediction form. @@ -275,14 +275,19 @@ transcription for now. julia> model = NonLinModel((x,u,_,_)->0.1x+u, (x,_,_)->2x, 10.0, 1, 1, 1, solver=nothing); julia> estim = MovingHorizonEstimator(model, He=5, σR=[1], σP_0=[0.01]) -MovingHorizonEstimator estimator with a sample time Ts = 10.0 s, Ipopt optimizer, NonLinModel and: - 5 estimation steps He - 0 slack variable ϵ (estimation constraints) - 1 manipulated inputs u (0 integrating states) - 2 estimated states x̂ - 1 measured outputs ym (1 integrating states) - 0 unmeasured outputs yu - 0 measured disturbances d +MovingHorizonEstimator estimator with a sample time Ts = 10.0 s: +├ model: NonLinModel +├ optimizer: Ipopt +├ gradient: AutoForwardDiff +├ jacobian: AutoForwardDiff +└ dimensions: + ├ 5 estimation steps He + ├ 0 slack variable ε (estimation constraints) + ├ 1 manipulated inputs u (0 integrating states) + ├ 2 estimated states x̂ + ├ 1 measured outputs ym (1 integrating states) + ├ 0 unmeasured outputs yu + └ 0 measured disturbances d ``` # Extended Help @@ -355,12 +360,12 @@ MovingHorizonEstimator estimator with a sample time Ts = 10.0 s, Ipopt optimizer for common mistakes when writing these functions. Also, an [`UnscentedKalmanFilter`](@ref) estimates the arrival covariance by default. - The slack variable ``ϵ`` relaxes the constraints if enabled, see [`setconstraint!`](@ref). + The slack variable ``ε`` relaxes the constraints if enabled, see [`setconstraint!`](@ref). It is disabled by default for the MHE (from `Cwt=Inf`) but it should be activated for problems with two or more types of bounds, to ensure feasibility (e.g. on the estimated state ``\mathbf{x̂}`` and sensor noise ``\mathbf{v̂}``). Note that if `Cwt≠Inf`, the attribute `nlp_scaling_max_gradient` of `Ipopt` is set to `10/Cwt` (if not already set), - to scale the small values of ``ϵ``. Use the second constructor to specify the arrival + to scale the small values of ``ε``. Use the second constructor to specify the arrival covariance estimation method. """ function MovingHorizonEstimator( @@ -455,12 +460,12 @@ It supports both soft and hard constraints on the estimated state ``\mathbf{x̂} noise ``\mathbf{ŵ}`` and sensor noise ``\mathbf{v̂}``: ```math \begin{alignat*}{3} - \mathbf{x̂_{min} - c_{x̂_{min}}} ϵ ≤&&\ \mathbf{x̂}_k(k-j+p) &≤ \mathbf{x̂_{max} + c_{x̂_{max}}} ϵ &&\qquad j = N_k, N_k - 1, ... , 0 \\ - \mathbf{ŵ_{min} - c_{ŵ_{min}}} ϵ ≤&&\ \mathbf{ŵ}(k-j+p) &≤ \mathbf{ŵ_{max} + c_{ŵ_{max}}} ϵ &&\qquad j = N_k, N_k - 1, ... , 1 \\ - \mathbf{v̂_{min} - c_{v̂_{min}}} ϵ ≤&&\ \mathbf{v̂}(k-j+1) &≤ \mathbf{v̂_{max} + c_{v̂_{max}}} ϵ &&\qquad j = N_k, N_k - 1, ... , 1 + \mathbf{x̂_{min} - c_{x̂_{min}}} ε ≤&&\ \mathbf{x̂}_k(k-j+p) &≤ \mathbf{x̂_{max} + c_{x̂_{max}}} ε &&\qquad j = N_k, N_k - 1, ... , 0 \\ + \mathbf{ŵ_{min} - c_{ŵ_{min}}} ε ≤&&\ \mathbf{ŵ}(k-j+p) &≤ \mathbf{ŵ_{max} + c_{ŵ_{max}}} ε &&\qquad j = N_k, N_k - 1, ... , 1 \\ + \mathbf{v̂_{min} - c_{v̂_{min}}} ε ≤&&\ \mathbf{v̂}(k-j+1) &≤ \mathbf{v̂_{max} + c_{v̂_{max}}} ε &&\qquad j = N_k, N_k - 1, ... , 1 \end{alignat*} ``` -and also ``ϵ ≥ 0``. All the constraint parameters are vector. Use `±Inf` values when there +and also ``ε ≥ 0``. All the constraint parameters are vector. Use `±Inf` values when there is no bound. The constraint softness parameters ``\mathbf{c}``, also called equal concern for relaxation, are non-negative values that specify the softness of the associated bound. Use `0.0` values for hard constraints (default for all of them). Notice that constraining @@ -490,14 +495,17 @@ the constant ``p``, on model augmentation and on time-varying constraints. julia> estim = MovingHorizonEstimator(LinModel(ss(0.5,1,1,0,1)), He=3); julia> estim = setconstraint!(estim, x̂min=[-50, -50], x̂max=[50, 50]) -MovingHorizonEstimator estimator with a sample time Ts = 1.0 s, OSQP optimizer, LinModel and: - 3 estimation steps He - 0 slack variable ϵ (estimation constraints) - 1 manipulated inputs u (0 integrating states) - 2 estimated states x̂ - 1 measured outputs ym (1 integrating states) - 0 unmeasured outputs yu - 0 measured disturbances d +MovingHorizonEstimator estimator with a sample time Ts = 1.0 s: +├ model: LinModel +├ optimizer: OSQP +└ dimensions: + ├ 3 estimation steps He + ├ 0 slack variable ε (estimation constraints) + ├ 1 manipulated inputs u (0 integrating states) + ├ 2 estimated states x̂ + ├ 1 measured outputs ym (1 integrating states) + ├ 0 unmeasured outputs yu + └ 0 measured disturbances d ``` # Extended Help @@ -510,9 +518,9 @@ MovingHorizonEstimator estimator with a sample time Ts = 1.0 s, OSQP optimizer, estimation horizon ``H_e`` are also possible, mathematically defined as: ```math \begin{alignat*}{3} - \mathbf{X̂_{min} - C_{x̂_{min}}} ϵ ≤&&\ \mathbf{X̂} &≤ \mathbf{X̂_{max} + C_{x̂_{max}}} ϵ \\ - \mathbf{Ŵ_{min} - C_{ŵ_{min}}} ϵ ≤&&\ \mathbf{Ŵ} &≤ \mathbf{Ŵ_{max} + C_{ŵ_{max}}} ϵ \\ - \mathbf{V̂_{min} - C_{v̂_{min}}} ϵ ≤&&\ \mathbf{V̂} &≤ \mathbf{V̂_{max} + C_{v̂_{max}}} ϵ + \mathbf{X̂_{min} - C_{x̂_{min}}} ε ≤&&\ \mathbf{X̂} &≤ \mathbf{X̂_{max} + C_{x̂_{max}}} ε \\ + \mathbf{Ŵ_{min} - C_{ŵ_{min}}} ε ≤&&\ \mathbf{Ŵ} &≤ \mathbf{Ŵ_{max} + C_{ŵ_{max}}} ε \\ + \mathbf{V̂_{min} - C_{v̂_{min}}} ε ≤&&\ \mathbf{V̂} &≤ \mathbf{V̂_{max} + C_{v̂_{max}}} ε \end{alignat*} ``` For this, use the same keyword arguments as above but with a first capital letter: @@ -554,7 +562,7 @@ function setconstraint!( C = estim.C if isnothing(X̂min) && !isnothing(x̂min) size(x̂min) == (nx̂,) || throw(ArgumentError("x̂min size must be $((nx̂,))")) - con.x̃0min[end-nx̂+1:end] .= x̂min .- estim.x̂op # if C is finite : x̃ = [ϵ; x̂] + con.x̃0min[end-nx̂+1:end] .= x̂min .- estim.x̂op # if C is finite : x̃ = [ε; x̂] for i in 1:nx̂*He con.X̂0min[i] = x̂min[(i-1) % nx̂ + 1] - estim.X̂op[i] end @@ -565,7 +573,7 @@ function setconstraint!( end if isnothing(X̂max) && !isnothing(x̂max) size(x̂max) == (nx̂,) || throw(ArgumentError("x̂max size must be $((nx̂,))")) - con.x̃0max[end-nx̂+1:end] .= x̂max .- estim.x̂op # if C is finite : x̃ = [ϵ; x̂] + con.x̃0max[end-nx̂+1:end] .= x̂max .- estim.x̂op # if C is finite : x̃ = [ε; x̂] for i in 1:nx̂*He con.X̂0max[i] = x̂max[(i-1) % nx̂ + 1] - estim.X̂op[i] end @@ -628,7 +636,7 @@ function setconstraint!( if !isnothing(C_x̂min) size(C_x̂min) == (nX̂con,) || throw(ArgumentError("C_x̂min size must be $((nX̂con,))")) any(C_x̂min .< 0) && error("C_x̂min weights should be non-negative") - # if C is finite : x̃ = [ϵ; x̂] + # if C is finite : x̃ = [ε; x̂] con.A_x̃min[end-nx̂+1:end, end] .= @views -C_x̂min[1:nx̂] con.C_x̂min .= @views C_x̂min[nx̂+1:end] size(con.A_X̂min, 1) ≠ 0 && (con.A_X̂min[:, end] = -con.C_x̂min) # for LinModel @@ -636,7 +644,7 @@ function setconstraint!( if !isnothing(C_x̂max) size(C_x̂max) == (nX̂con,) || throw(ArgumentError("C_x̂max size must be $((nX̂con,))")) any(C_x̂max .< 0) && error("C_x̂max weights should be non-negative") - # if C is finite : x̃ = [ϵ; x̂] : + # if C is finite : x̃ = [ε; x̂] : con.A_x̃max[end-nx̂+1:end, end] .= @views -C_x̂max[1:nx̂] con.C_x̂max .= @views C_x̂max[nx̂+1:end] size(con.A_X̂max, 1) ≠ 0 && (con.A_X̂max[:, end] = -con.C_x̂max) # for LinModel @@ -755,7 +763,7 @@ function init_defaultcon_mhe( ) where {NT<:Real} nŵ = nx̂ nZ̃, nX̂, nŴ, nYm = nx̂+nŵ*He, nx̂*He, nŵ*He, nym*He - nϵ = isinf(C) ? 0 : 1 + nε = isinf(C) ? 0 : 1 x̂min, x̂max = fill(convert(NT,-Inf), nx̂), fill(convert(NT,+Inf), nx̂) X̂min, X̂max = fill(convert(NT,-Inf), nX̂), fill(convert(NT,+Inf), nX̂) Ŵmin, Ŵmax = fill(convert(NT,-Inf), nŴ), fill(convert(NT,+Inf), nŴ) @@ -764,10 +772,10 @@ function init_defaultcon_mhe( C_x̂min, C_x̂max = fill(0.0, nX̂), fill(0.0, nX̂) C_ŵmin, C_ŵmax = fill(0.0, nŴ), fill(0.0, nŴ) C_v̂min, C_v̂max = fill(0.0, nYm), fill(0.0, nYm) - A_x̃min, A_x̃max, x̃min, x̃max, ẽx̄ = relaxarrival(model, nϵ, c_x̂min, c_x̂max, x̂min, x̂max, ex̄) - A_X̂min, A_X̂max, Ẽx̂ = relaxX̂(model, nϵ, C_x̂min, C_x̂max, Ex̂) - A_Ŵmin, A_Ŵmax = relaxŴ(model, nϵ, C_ŵmin, C_ŵmax, nx̂) - A_V̂min, A_V̂max, Ẽ = relaxV̂(model, nϵ, C_v̂min, C_v̂max, E) + A_x̃min, A_x̃max, x̃min, x̃max, ẽx̄ = relaxarrival(model, nε, c_x̂min, c_x̂max, x̂min, x̂max, ex̄) + A_X̂min, A_X̂max, Ẽx̂ = relaxX̂(model, nε, C_x̂min, C_x̂max, Ex̂) + A_Ŵmin, A_Ŵmax = relaxŴ(model, nε, C_ŵmin, C_ŵmax, nx̂) + A_V̂min, A_V̂max, Ẽ = relaxV̂(model, nε, C_v̂min, C_v̂max, E) i_x̃min, i_x̃max = .!isinf.(x̃min), .!isinf.(x̃max) i_X̂min, i_X̂max = .!isinf.(X̂min), .!isinf.(X̂max) i_Ŵmin, i_Ŵmax = .!isinf.(Ŵmin), .!isinf.(Ŵmax) @@ -785,18 +793,18 @@ function init_defaultcon_mhe( C_x̂min, C_x̂max, C_v̂min, C_v̂max, i_b, i_g ) - return con, nϵ, Ẽ, ẽx̄ + return con, nε, Ẽ, ẽx̄ end @doc raw""" relaxarrival( - model::SimModel, nϵ, c_x̂min, c_x̂max, x̂min, x̂max, ex̄ + model::SimModel, nε, c_x̂min, c_x̂max, x̂min, x̂max, ex̄ ) -> A_x̃min, A_x̃max, x̃min, x̃max, ẽx̄ -Augment arrival state constraints with slack variable ϵ for softening the MHE. +Augment arrival state constraints with slack variable ε for softening the MHE. Denoting the MHE decision variable augmented with the slack variable ``\mathbf{Z̃} = -[\begin{smallmatrix} ϵ \\ \mathbf{Z} \end{smallmatrix}]``, it returns the ``\mathbf{ẽ_x̄}`` +[\begin{smallmatrix} ε \\ \mathbf{Z} \end{smallmatrix}]``, it returns the ``\mathbf{ẽ_x̄}`` matrix that appears in the estimation error at arrival equation ``\mathbf{x̄} = \mathbf{ẽ_x̄ Z̃ + f_x̄}``. It also returns the augmented constraints ``\mathbf{x̃_{min}}`` and ``\mathbf{x̃_{max}}``, and the ``\mathbf{A}`` matrices for the inequality constraints: @@ -815,14 +823,14 @@ in which ``\mathbf{x̃_{max}} = [\begin{smallmatrix} ∞ \\ \mathbf{x̂_{max}} \end{smallmatrix}]`` and ``\mathbf{x̃_{op}} = [\begin{smallmatrix} 0 \\ \mathbf{x̂_{op}} \end{smallmatrix}]`` """ -function relaxarrival(::SimModel{NT}, nϵ, c_x̂min, c_x̂max, x̂min, x̂max, ex̄) where {NT<:Real} +function relaxarrival(::SimModel{NT}, nε, c_x̂min, c_x̂max, x̂min, x̂max, ex̄) where {NT<:Real} ex̂ = -ex̄ - if nϵ ≠ 0 # Z̃ = [ϵ; Z] + if nε ≠ 0 # Z̃ = [ε; Z] x̃min, x̃max = [NT[0.0]; x̂min], [NT[Inf]; x̂max] - A_ϵ = [NT[1.0] zeros(NT, 1, size(ex̂, 2))] - # ϵ impacts arrival state constraint calculations: - A_x̃min, A_x̃max = -[A_ϵ; c_x̂min ex̂], [A_ϵ; -c_x̂max ex̂] - # ϵ has no impact on estimation error at arrival: + A_ε = [NT[1.0] zeros(NT, 1, size(ex̂, 2))] + # ε impacts arrival state constraint calculations: + A_x̃min, A_x̃max = -[A_ε; c_x̂min ex̂], [A_ε; -c_x̂max ex̂] + # ε has no impact on estimation error at arrival: ẽx̄ = [zeros(NT, size(ex̄, 1), 1) ex̄] else # Z̃ = Z (only hard constraints) x̃min, x̃max = x̂min, x̂max @@ -833,12 +841,12 @@ function relaxarrival(::SimModel{NT}, nϵ, c_x̂min, c_x̂max, x̂min, x̂max, e end @doc raw""" - relaxX̂(model::SimModel, nϵ, C_x̂min, C_x̂max, Ex̂) -> A_X̂min, A_X̂max, Ẽx̂ + relaxX̂(model::SimModel, nε, C_x̂min, C_x̂max, Ex̂) -> A_X̂min, A_X̂max, Ẽx̂ -Augment estimated state constraints with slack variable ϵ for softening the MHE. +Augment estimated state constraints with slack variable ε for softening the MHE. Denoting the MHE decision variable augmented with the slack variable ``\mathbf{Z̃} = -[\begin{smallmatrix} ϵ \\ \mathbf{Z} \end{smallmatrix}]``, it returns the ``\mathbf{Ẽ_x̂}`` +[\begin{smallmatrix} ε \\ \mathbf{Z} \end{smallmatrix}]``, it returns the ``\mathbf{Ẽ_x̂}`` matrix that appears in estimated states equation ``\mathbf{X̂} = \mathbf{Ẽ_x̂ Z̃ + F_x̂}``. It also returns the ``\mathbf{A}`` matrices for the inequality constraints: ```math @@ -854,11 +862,11 @@ also returns the ``\mathbf{A}`` matrices for the inequality constraints: in which ``\mathbf{X̂_{min}, X̂_{max}}`` and ``\mathbf{X̂_{op}}`` vectors respectively contains ``\mathbf{x̂_{min}, x̂_{max}}`` and ``\mathbf{x̂_{op}}`` repeated ``H_e`` times. """ -function relaxX̂(::LinModel{NT}, nϵ, C_x̂min, C_x̂max, Ex̂) where {NT<:Real} - if nϵ ≠ 0 # Z̃ = [ϵ; Z] - # ϵ impacts estimated process noise constraint calculations: +function relaxX̂(::LinModel{NT}, nε, C_x̂min, C_x̂max, Ex̂) where {NT<:Real} + if nε ≠ 0 # Z̃ = [ε; Z] + # ε impacts estimated process noise constraint calculations: A_X̂min, A_X̂max = -[C_x̂min Ex̂], [-C_x̂max Ex̂] - # ϵ has no impact on estimated process noises: + # ε has no impact on estimated process noises: Ẽx̂ = [zeros(NT, size(Ex̂, 1), 1) Ex̂] else # Z̃ = Z (only hard constraints) Ẽx̂ = Ex̂ @@ -868,19 +876,19 @@ function relaxX̂(::LinModel{NT}, nϵ, C_x̂min, C_x̂max, Ex̂) where {NT<:Real end "Return empty matrices if model is not a [`LinModel`](@ref)" -function relaxX̂(::SimModel{NT}, nϵ, C_x̂min, C_x̂max, Ex̂) where {NT<:Real} - Ẽx̂ = [zeros(NT, 0, nϵ) Ex̂] +function relaxX̂(::SimModel{NT}, nε, C_x̂min, C_x̂max, Ex̂) where {NT<:Real} + Ẽx̂ = [zeros(NT, 0, nε) Ex̂] A_X̂min, A_X̂max = -Ẽx̂, Ẽx̂ return A_X̂min, A_X̂max, Ẽx̂ end @doc raw""" - relaxŴ(model::SimModel, nϵ, C_ŵmin, C_ŵmax, nx̂) -> A_Ŵmin, A_Ŵmax + relaxŴ(model::SimModel, nε, C_ŵmin, C_ŵmax, nx̂) -> A_Ŵmin, A_Ŵmax -Augment estimated process noise constraints with slack variable ϵ for softening the MHE. +Augment estimated process noise constraints with slack variable ε for softening the MHE. Denoting the MHE decision variable augmented with the slack variable ``\mathbf{Z̃} = -[\begin{smallmatrix} ϵ \\ \mathbf{Z} \end{smallmatrix}]``, it returns the ``\mathbf{A}`` +[\begin{smallmatrix} ε \\ \mathbf{Z} \end{smallmatrix}]``, it returns the ``\mathbf{A}`` matrices for the inequality constraints: ```math \begin{bmatrix} @@ -893,9 +901,9 @@ matrices for the inequality constraints: \end{bmatrix} ``` """ -function relaxŴ(::SimModel{NT}, nϵ, C_ŵmin, C_ŵmax, nx̂) where {NT<:Real} +function relaxŴ(::SimModel{NT}, nε, C_ŵmin, C_ŵmax, nx̂) where {NT<:Real} A = [zeros(NT, length(C_ŵmin), nx̂) I] - if nϵ ≠ 0 # Z̃ = [ϵ; Z] + if nε ≠ 0 # Z̃ = [ε; Z] A_Ŵmin, A_Ŵmax = -[C_ŵmin A], [-C_ŵmax A] else # Z̃ = Z (only hard constraints) A_Ŵmin, A_Ŵmax = -A, A @@ -904,12 +912,12 @@ function relaxŴ(::SimModel{NT}, nϵ, C_ŵmin, C_ŵmax, nx̂) where {NT<:Real end @doc raw""" - relaxV̂(model::SimModel, nϵ, C_v̂min, C_v̂max, E) -> A_V̂min, A_V̂max, Ẽ + relaxV̂(model::SimModel, nε, C_v̂min, C_v̂max, E) -> A_V̂min, A_V̂max, Ẽ -Augment estimated sensor noise constraints with slack variable ϵ for softening the MHE. +Augment estimated sensor noise constraints with slack variable ε for softening the MHE. Denoting the MHE decision variable augmented with the slack variable ``\mathbf{Z̃} = -[\begin{smallmatrix} ϵ \\ \mathbf{Z} \end{smallmatrix}]``, it returns the ``\mathbf{Ẽ}`` +[\begin{smallmatrix} ε \\ \mathbf{Z} \end{smallmatrix}]``, it returns the ``\mathbf{Ẽ}`` matrix that appears in estimated sensor noise equation ``\mathbf{V̂} = \mathbf{Ẽ Z̃ + F}``. It also returns the ``\mathbf{A}`` matrices for the inequality constraints: ```math @@ -923,11 +931,11 @@ also returns the ``\mathbf{A}`` matrices for the inequality constraints: \end{bmatrix} ``` """ -function relaxV̂(::LinModel{NT}, nϵ, C_v̂min, C_v̂max, E) where {NT<:Real} - if nϵ ≠ 0 # Z̃ = [ϵ; Z] - # ϵ impacts estimated sensor noise constraint calculations: +function relaxV̂(::LinModel{NT}, nε, C_v̂min, C_v̂max, E) where {NT<:Real} + if nε ≠ 0 # Z̃ = [ε; Z] + # ε impacts estimated sensor noise constraint calculations: A_V̂min, A_V̂max = -[C_v̂min E], [-C_v̂max E] - # ϵ has no impact on estimated sensor noises: + # ε has no impact on estimated sensor noises: Ẽ = [zeros(NT, size(E, 1), 1) E] else # Z̃ = Z (only hard constraints) Ẽ = E @@ -937,8 +945,8 @@ function relaxV̂(::LinModel{NT}, nϵ, C_v̂min, C_v̂max, E) where {NT<:Real} end "Return empty matrices if model is not a [`LinModel`](@ref)" -function relaxV̂(::SimModel{NT}, nϵ, C_v̂min, C_v̂max, E) where {NT<:Real} - Ẽ = [zeros(NT, 0, nϵ) E] +function relaxV̂(::SimModel{NT}, nε, C_v̂min, C_v̂max, E) where {NT<:Real} + Ẽ = [zeros(NT, 0, nε) E] A_V̂min, A_V̂max = -Ẽ, Ẽ return A_V̂min, A_V̂max, Ẽ end @@ -1320,7 +1328,7 @@ function get_optim_functions( # ----------- common cache for Jfunc and gfuncs -------------------------------------- model, con = estim.model, estim.con grad, jac = estim.gradient, estim.jacobian - nx̂, nym, nŷ, nu, nϵ, nk = estim.nx̂, estim.nym, model.ny, model.nu, estim.nϵ, model.nk + nx̂, nym, nŷ, nu, nε, nk = estim.nx̂, estim.nym, model.ny, model.nu, estim.nε, model.nk He = estim.He nV̂, nX̂, ng, nZ̃ = He*nym, He*nx̂, length(con.i_g), length(estim.Z̃) strict = Val(true) diff --git a/src/estimator/mhe/execute.jl b/src/estimator/mhe/execute.jl index 414b1c86a..fca3645ee 100644 --- a/src/estimator/mhe/execute.jl +++ b/src/estimator/mhe/execute.jl @@ -79,7 +79,7 @@ following fields: Fields with *`emphasis`* are non-Unicode alternatives. - `:Ŵ` or *`:What`* : optimal estimated process noise over ``N_k``, ``\mathbf{Ŵ}`` -- `:ϵ` or *`:epsilon`* : optimal slack variable, ``ϵ`` +- `:ε` or *`:epsilon`* : optimal slack variable, ``ε`` - `:X̂` or *`:Xhat`* : optimal estimated states over ``N_k+1``, ``\mathbf{X̂}`` - `:x̂` or *`:xhat`* : optimal estimated state, ``\mathbf{x̂}_k(k+p)`` - `:V̂` or *`:Vhat`* : optimal estimated sensor noise over ``N_k``, ``\mathbf{V̂}`` @@ -110,8 +110,8 @@ julia> round.(getinfo(estim)[:Ŷ], digits=3) function getinfo(estim::MovingHorizonEstimator{NT}) where NT<:Real model, buffer, Nk = estim.model, estim.buffer, estim.Nk[] nu, ny, nd = model.nu, model.ny, model.nd - nx̂, nym, nŵ, nϵ = estim.nx̂, estim.nym, estim.nx̂, estim.nϵ - nx̃ = nϵ + nx̂ + nx̂, nym, nŵ, nε = estim.nx̂, estim.nym, estim.nx̂, estim.nε + nx̃ = nε + nx̂ info = Dict{Symbol, Any}() V̂, X̂0 = similar(estim.Y0m[1:nym*Nk]), similar(estim.X̂0[1:nx̂*Nk]) û0, k0, ŷ0 = buffer.û, buffer.k, buffer.ŷ @@ -137,7 +137,7 @@ function getinfo(estim::MovingHorizonEstimator{NT}) where NT<:Real end info[:Ŵ] = estim.Ŵ[1:Nk*nŵ] info[:x̂arr] = x̂0arr + estim.x̂op - info[:ϵ] = nϵ ≠ 0 ? estim.Z̃[begin] : zero(NT) + info[:ε] = nε ≠ 0 ? estim.Z̃[begin] : zero(NT) info[:J] = obj_nonlinprog!(x̄, estim, estim.model, V̂, estim.Z̃) info[:X̂] = X̂0 .+ @views [estim.x̂op; estim.X̂op[1:nx̂*Nk]] info[:x̂] = estim.x̂0 .+ estim.x̂op @@ -153,7 +153,7 @@ function getinfo(estim::MovingHorizonEstimator{NT}) where NT<:Real # --- non-Unicode fields --- info[:What] = info[:Ŵ] info[:xhatarr] = info[:x̂arr] - info[:epsilon] = info[:ϵ] + info[:epsilon] = info[:ε] info[:Xhat] = info[:X̂] info[:xhat] = info[:x̂] info[:Vhat] = info[:V̂] @@ -161,16 +161,18 @@ function getinfo(estim::MovingHorizonEstimator{NT}) where NT<:Real info[:xbar] = info[:x̄] info[:Yhat] = info[:Ŷ] info[:Yhatm] = info[:Ŷm] + # --- deprecated fields --- + info[:ϵ] = info[:ε] return info end """ - getϵ(estim::MovingHorizonEstimator, Z̃) -> ϵ + getε(estim::MovingHorizonEstimator, Z̃) -> ε -Get the slack `ϵ` from the decision vector `Z̃` if present, otherwise return 0. +Get the slack `ε` from the decision vector `Z̃` if present, otherwise return 0. """ -function getϵ(estim::MovingHorizonEstimator, Z̃::AbstractVector{NT}) where NT<:Real - return estim.nϵ ≠ 0 ? Z̃[begin] : zero(NT) +function getε(estim::MovingHorizonEstimator, Z̃::AbstractVector{NT}) where NT<:Real + return estim.nε ≠ 0 ? Z̃[begin] : zero(NT) end """ @@ -230,7 +232,7 @@ also inits `estim.optim` objective function, expressed as the quadratic general ```math J = \min_{\mathbf{Z̃}} \frac{1}{2}\mathbf{Z̃' H̃ Z̃} + \mathbf{q̃' Z̃} + r ``` -in which ``\mathbf{Z̃} = [\begin{smallmatrix} ϵ \\ \mathbf{Z} \end{smallmatrix}]``. Note that +in which ``\mathbf{Z̃} = [\begin{smallmatrix} ε \\ \mathbf{Z} \end{smallmatrix}]``. Note that ``r`` is useless at optimization but required to evaluate the objective minima ``J``. The Hessian ``\mathbf{H̃}`` matrix of the quadratic general form is not constant here because of the time-varying ``\mathbf{P̄}`` covariance . The computed variables are: @@ -251,9 +253,9 @@ of the time-varying ``\mathbf{P̄}`` covariance . The computed variables are: function initpred!(estim::MovingHorizonEstimator, model::LinModel) invP̄, invQ̂_He, invR̂_He = estim.cov.invP̄, estim.cov.invQ̂_He, estim.cov.invR̂_He F, C, optim = estim.F, estim.C, estim.optim - nx̂, nŵ, nym, nϵ, Nk = estim.nx̂, estim.nx̂, estim.nym, estim.nϵ, estim.Nk[] + nx̂, nŵ, nym, nε, Nk = estim.nx̂, estim.nx̂, estim.nym, estim.nε, estim.Nk[] nYm, nŴ = nym*Nk, nŵ*Nk - nZ̃ = nϵ + nx̂ + nŴ + nZ̃ = nε + nx̂ + nŴ # --- update F and fx̄ vectors for MHE predictions --- F .= estim.Y0m .+ estim.B mul!(F, estim.G, estim.U0, 1, 1) @@ -266,7 +268,7 @@ function initpred!(estim::MovingHorizonEstimator, model::LinModel) FZ̃ = @views [estim.fx̄; estim.F[1:nYm]] invQ̂_Nk, invR̂_Nk = @views invQ̂_He[1:nŴ, 1:nŴ], invR̂_He[1:nYm, 1:nYm] M_Nk = [invP̄ zeros(nx̂, nYm); zeros(nYm, nx̂) invR̂_Nk] - Ñ_Nk = [fill(C, nϵ, nϵ) zeros(nϵ, nx̂+nŴ); zeros(nx̂, nϵ+nx̂+nŴ); zeros(nŴ, nϵ+nx̂) invQ̂_Nk] + Ñ_Nk = [fill(C, nε, nε) zeros(nε, nx̂+nŴ); zeros(nx̂, nε+nx̂+nŴ); zeros(nŴ, nε+nx̂) invQ̂_Nk] M_Nk_ẼZ̃ = M_Nk*ẼZ̃ @views mul!(estim.q̃[1:nZ̃], M_Nk_ẼZ̃', FZ̃) @views lmul!(2, estim.q̃[1:nZ̃]) @@ -374,8 +376,8 @@ If first warm-starts the solver with [`set_warmstart_mhe!`](@ref). It then calls """ function optim_objective!(estim::MovingHorizonEstimator{NT}) where NT<:Real model, optim, buffer = estim.model, estim.optim, estim.buffer - nym, nx̂, nŵ, nϵ, Nk = estim.nym, estim.nx̂, estim.nx̂, estim.nϵ, estim.Nk[] - nx̃ = nϵ + nx̂ + nym, nx̂, nŵ, nε, Nk = estim.nym, estim.nx̂, estim.nx̂, estim.nε, estim.Nk[] + nx̃ = nε + nx̂ Z̃var::Vector{JuMP.VariableRef} = optim[:Z̃var] V̂ = Vector{NT}(undef, nym*Nk) # TODO: remove this allocation X̂0 = Vector{NT}(undef, nx̂*Nk) # TODO: remove this allocation @@ -433,7 +435,7 @@ If supported by `estim.optim`, it warm-starts the solver at: ```math \mathbf{Z̃_s} = \begin{bmatrix} - ϵ_{k-1} \\ + ε_{k-1} \\ \mathbf{x̂}_{k-1}(k-N_k+p) \\ \mathbf{ŵ}_{k-1}(k-N_k+p+0) \\ \mathbf{ŵ}_{k-1}(k-N_k+p+1) \\ @@ -442,7 +444,7 @@ If supported by `estim.optim`, it warm-starts the solver at: \mathbf{0} \\ \end{bmatrix} ``` -where ``ϵ(k-1)``, ``\mathbf{x̂}_{k-1}(k-N_k+p)`` and ``\mathbf{ŵ}_{k-1}(k-j)`` are +where ``ε(k-1)``, ``\mathbf{x̂}_{k-1}(k-N_k+p)`` and ``\mathbf{ŵ}_{k-1}(k-j)`` are respectively the slack variable, the arrival state estimate and the process noise estimates computed at the last time step ``k-1``. If the objective function is not finite at this point, all the process noises ``\mathbf{ŵ}_{k-1}(k-j)`` are warm-started at zeros. The @@ -450,14 +452,14 @@ method mutates all the arguments. """ function set_warmstart_mhe!(V̂, X̂0, estim::MovingHorizonEstimator{NT}, Z̃var) where NT<:Real model, buffer = estim.model, estim.buffer - nϵ, nx̂, nŵ, nZ̃, Nk = estim.nϵ, estim.nx̂, estim.nx̂, length(estim.Z̃), estim.Nk[] - nx̃ = nϵ + nx̂ + nε, nx̂, nŵ, nZ̃, Nk = estim.nε, estim.nx̂, estim.nx̂, length(estim.Z̃), estim.Nk[] + nx̃ = nε + nx̂ Z̃s = Vector{NT}(undef, nZ̃) # TODO: remove this allocation û0, ŷ0, x̄, k0 = buffer.û, buffer.ŷ, buffer.x̂, buffer.k - # --- slack variable ϵ --- - estim.nϵ == 1 && (Z̃s[begin] = estim.Z̃[begin]) + # --- slack variable ε --- + estim.nε == 1 && (Z̃s[begin] = estim.Z̃[begin]) # --- arrival state estimate x̂0arr --- - Z̃s[nϵ+1:nx̃] = estim.x̂0arr_old + Z̃s[nε+1:nx̃] = estim.x̂0arr_old # --- process noise estimates Ŵ --- Z̃s[nx̃+1:end] = estim.Ŵ # verify definiteness of objective function: @@ -568,14 +570,14 @@ The function `dot(x, A, x)` is a performant way of calculating `x'*A*x`. This me function obj_nonlinprog!( x̄, estim::MovingHorizonEstimator, ::SimModel, V̂, Z̃::AbstractVector{NT} ) where NT<:Real - nϵ, Nk = estim.nϵ, estim.Nk[] + nε, Nk = estim.nε, estim.Nk[] nYm, nŴ, nx̂, invP̄ = Nk*estim.nym, Nk*estim.nx̂, estim.nx̂, estim.cov.invP̄ - nx̃ = nϵ + nx̂ + nx̃ = nε + nx̂ invQ̂_Nk, invR̂_Nk = @views estim.cov.invQ̂_He[1:nŴ, 1:nŴ], estim.cov.invR̂_He[1:nYm, 1:nYm] x̂0arr, Ŵ, V̂ = @views Z̃[nx̃-nx̂+1:nx̃], Z̃[nx̃+1:nx̃+nŴ], V̂[1:nYm] x̄ .= estim.x̂0arr_old .- x̂0arr - Jϵ = nϵ ≠ 0 ? estim.C*Z̃[begin]^2 : zero(NT) - return dot(x̄, invP̄, x̄) + dot(Ŵ, invQ̂_Nk, Ŵ) + dot(V̂, invR̂_Nk, V̂) + Jϵ + Jε = nε ≠ 0 ? estim.C*Z̃[begin]^2 : zero(NT) + return dot(x̄, invP̄, x̄) + dot(Ŵ, invQ̂_Nk, Ŵ) + dot(V̂, invR̂_Nk, V̂) + Jε end @doc raw""" @@ -594,9 +596,9 @@ noises from ``k-N_k+1`` to ``k``. The `X̂0` vector is estimated states from ``k ``` """ function predict_mhe!(V̂, X̂0, _ , _ , _ , estim::MovingHorizonEstimator, ::LinModel, Z̃) - nϵ, Nk = estim.nϵ, estim.Nk[] + nε, Nk = estim.nε, estim.Nk[] nX̂, nŴ, nYm = estim.nx̂*Nk, estim.nx̂*Nk, estim.nym*Nk - nZ̃ = nϵ + estim.nx̂ + nŴ + nZ̃ = nε + estim.nx̂ + nŴ V̂[1:nYm] .= @views estim.Ẽ[1:nYm, 1:nZ̃]*Z̃[1:nZ̃] + estim.F[1:nYm] X̂0[1:nX̂] .= @views estim.con.Ẽx̂[1:nX̂, 1:nZ̃]*Z̃[1:nZ̃] + estim.con.Fx̂[1:nX̂] return V̂, X̂0 @@ -612,9 +614,9 @@ The function mutates `V̂`, `X̂0`, `û0` and `ŷ0` vector arguments. The augm and by adding the estimated process noise ``\mathbf{ŵ}``. """ function predict_mhe!(V̂, X̂0, û0, k0, ŷ0, estim::MovingHorizonEstimator, model::SimModel, Z̃) - nϵ, Nk = estim.nϵ, estim.Nk[] + nε, Nk = estim.nε, estim.Nk[] nu, nd, nx̂, nŵ, nym = model.nu, model.nd, estim.nx̂, estim.nx̂, estim.nym - nx̃ = nϵ + nx̂ + nx̃ = nε + nx̂ x̂0 = @views Z̃[nx̃-nx̂+1:nx̃] if estim.direct ŷ0next = ŷ0 @@ -661,17 +663,17 @@ The method mutates all the arguments before `estim` argument. function update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim::MovingHorizonEstimator, Z̃) model = estim.model V̂, X̂0 = predict_mhe!(V̂, X̂0, û0, k0, ŷ0, estim, model, Z̃) - ϵ = getϵ(estim, Z̃) - g = con_nonlinprog_mhe!(g, estim, model, X̂0, V̂, ϵ) + ε = getε(estim, Z̃) + g = con_nonlinprog_mhe!(g, estim, model, X̂0, V̂, ε) return nothing end """ - con_nonlinprog_mhe!(g, estim::MovingHorizonEstimator, model::SimModel, X̂0, V̂, ϵ) -> g + con_nonlinprog_mhe!(g, estim::MovingHorizonEstimator, model::SimModel, X̂0, V̂, ε) -> g Compute nonlinear constrains `g` in-place for [`MovingHorizonEstimator`](@ref). """ -function con_nonlinprog_mhe!(g, estim::MovingHorizonEstimator, ::SimModel, X̂0, V̂, ϵ) +function con_nonlinprog_mhe!(g, estim::MovingHorizonEstimator, ::SimModel, X̂0, V̂, ε) nX̂con, nX̂ = length(estim.con.X̂0min), estim.nx̂ *estim.Nk[] nV̂con, nV̂ = length(estim.con.V̂min), estim.nym*estim.Nk[] for i in eachindex(g) @@ -679,19 +681,19 @@ function con_nonlinprog_mhe!(g, estim::MovingHorizonEstimator, ::SimModel, X̂0, if i ≤ nX̂con j = i jcon = nX̂con-nX̂+j - g[i] = j > nX̂ ? 0 : estim.con.X̂0min[jcon] - X̂0[j] - ϵ*estim.con.C_x̂min[jcon] + g[i] = j > nX̂ ? 0 : estim.con.X̂0min[jcon] - X̂0[j] - ε*estim.con.C_x̂min[jcon] elseif i ≤ 2nX̂con j = i - nX̂con jcon = nX̂con-nX̂+j - g[i] = j > nX̂ ? 0 : X̂0[j] - estim.con.X̂0max[jcon] - ϵ*estim.con.C_x̂max[jcon] + g[i] = j > nX̂ ? 0 : X̂0[j] - estim.con.X̂0max[jcon] - ε*estim.con.C_x̂max[jcon] elseif i ≤ 2nX̂con + nV̂con j = i - 2nX̂con jcon = nV̂con-nV̂+j - g[i] = j > nV̂ ? 0 : estim.con.V̂min[jcon] - V̂[j] - ϵ*estim.con.C_v̂min[jcon] + g[i] = j > nV̂ ? 0 : estim.con.V̂min[jcon] - V̂[j] - ε*estim.con.C_v̂min[jcon] else j = i - 2nX̂con - nV̂con jcon = nV̂con-nV̂+j - g[i] = j > nV̂ ? 0 : V̂[j] - estim.con.V̂max[jcon] - ϵ*estim.con.C_v̂max[jcon] + g[i] = j > nV̂ ? 0 : V̂[j] - estim.con.V̂max[jcon] - ε*estim.con.C_v̂max[jcon] end end return g @@ -711,7 +713,7 @@ function setmodel_estimator!( estim::MovingHorizonEstimator, model, uop_old, yop_old, dop_old, Q̂, R̂ ) con = estim.con - nx̂, nym, nu, nd, He, nϵ = estim.nx̂, estim.nym, model.nu, model.nd, estim.He, estim.nϵ + nx̂, nym, nu, nd, He, nε = estim.nx̂, estim.nym, model.nu, model.nd, estim.He, estim.nε As, Cs_u, Cs_y = estim.As, estim.Cs_u, estim.Cs_y Â, B̂u, Ĉ, B̂d, D̂d, x̂op, f̂op = augment_model(model, As, Cs_u, Cs_y, verify_obsv=false) # --- update augmented state-space matrices --- @@ -735,8 +737,8 @@ function setmodel_estimator!( estim.Â, estim.B̂u, estim.Ĉm, estim.B̂d, estim.D̂dm, estim.x̂op, estim.f̂op, p ) - A_X̂min, A_X̂max, Ẽx̂ = relaxX̂(model, nϵ, con.C_x̂min, con.C_x̂max, Ex̂) - A_V̂min, A_V̂max, Ẽ = relaxV̂(model, nϵ, con.C_v̂min, con.C_v̂max, E) + A_X̂min, A_X̂max, Ẽx̂ = relaxX̂(model, nε, con.C_x̂min, con.C_x̂max, Ex̂) + A_V̂min, A_V̂max, Ẽ = relaxV̂(model, nε, con.C_v̂min, con.C_v̂max, E) estim.Ẽ .= Ẽ estim.G .= G estim.J .= J @@ -801,10 +803,10 @@ function setmodel_estimator!( estim.D0[(1+nd*(i-1)):(nd*i)] .-= model.dop end estim.lastu0 .+= uop_old - estim.Z̃[nϵ+1:nϵ+nx̂] .+= x̂op_old + estim.Z̃[nε+1:nε+nx̂] .+= x̂op_old estim.x̂0arr_old .+= x̂op_old estim.lastu0 .-= model.uop - estim.Z̃[nϵ+1:nϵ+nx̂] .-= x̂op + estim.Z̃[nε+1:nε+nx̂] .-= x̂op estim.x̂0arr_old .-= x̂op # --- covariance matrices --- if !isnothing(Q̂) diff --git a/src/general.jl b/src/general.jl index f5e8ecb95..cf059257b 100644 --- a/src/general.jl +++ b/src/general.jl @@ -61,6 +61,14 @@ end init_diffmat(T, backend::AbstractADType, _ , nx , ny) = Matrix{T}(undef, ny, nx) init_diffmat(T, backend::AutoSparse ,prep , _ , _ ) = similar(sparsity_pattern(prep), T) +backend_str(backend::AbstractADType) = string(nameof(typeof(backend))) +function backend_str(backend::AutoSparse) + str = "AutoSparse ($(nameof(typeof(backend.dense_ad))),"* + " $(nameof(typeof(backend.sparsity_detector))),"* + " $(nameof(typeof(backend.coloring_algorithm))))" + return str +end + "Verify that x and y elements are different using `!==`." isdifferent(x, y) = any(xi !== yi for (xi, yi) in zip(x, y)) diff --git a/src/model/linmodel.jl b/src/model/linmodel.jl index a6fe34c39..18ddc21a8 100644 --- a/src/model/linmodel.jl +++ b/src/model/linmodel.jl @@ -91,18 +91,20 @@ See also [`ss`](@extref ControlSystemsBase.ss) # Examples ```jldoctest julia> model1 = LinModel(ss(-0.1, 1.0, 1.0, 0), 2.0) # continuous-time StateSpace -LinModel with a sample time Ts = 2.0 s and: - 1 manipulated inputs u - 1 states x - 1 outputs y - 0 measured disturbances d +LinModel with a sample time Ts = 2.0 s: +└ dimensions: + ├ 1 manipulated inputs u + ├ 1 states x + ├ 1 outputs y + └ 0 measured disturbances d julia> model2 = LinModel(ss(0.4, 0.2, 0.3, 0, 0.1)) # discrete-time StateSpace -LinModel with a sample time Ts = 0.1 s and: - 1 manipulated inputs u - 1 states x - 1 outputs y - 0 measured disturbances d +LinModel with a sample time Ts = 0.1 s: +└ dimensions: + ├ 1 manipulated inputs u + ├ 1 states x + ├ 1 outputs y + └ 0 measured disturbances d ``` # Extended Help @@ -200,11 +202,12 @@ See also [`tf`](@extref ControlSystemsBase.tf) # Examples ```jldoctest julia> model = LinModel([tf(3, [30, 1]) tf(-2, [5, 1])], 0.5, i_d=[2]) -LinModel with a sample time Ts = 0.5 s and: - 1 manipulated inputs u - 2 states x - 1 outputs y - 1 measured disturbances d +LinModel with a sample time Ts = 0.5 s: +└ dimensions: + ├ 1 manipulated inputs u + ├ 2 states x + ├ 1 outputs y + └ 1 measured disturbances d ``` """ function LinModel(sys::TransferFunction, Ts::Union{Real,Nothing} = nothing; kwargs...) diff --git a/src/model/nonlinmodel.jl b/src/model/nonlinmodel.jl index 1015c0fae..7b73b745e 100644 --- a/src/model/nonlinmodel.jl +++ b/src/model/nonlinmodel.jl @@ -152,22 +152,28 @@ julia> f!(ẋ, x, u, _ , p) = (ẋ .= p*x .+ u; nothing); julia> h!(y, x, _ , _ ) = (y .= 0.1x; nothing); julia> model1 = NonLinModel(f!, h!, 5.0, 1, 1, 1, p=-0.2) # continuous dynamics -NonLinModel with a sample time Ts = 5.0 s, RungeKutta(4) solver and: - 1 manipulated inputs u - 1 states x - 1 outputs y - 0 measured disturbances d +NonLinModel with a sample time Ts = 5.0 s: +├ solver: RungeKutta(4) +├ jacobian: AutoForwardDiff +└ dimensions: + ├ 1 manipulated inputs u + ├ 1 states x + ├ 1 outputs y + └ 0 measured disturbances d julia> f(x, u, _ , _ ) = 0.1x + u; julia> h(x, _ , _ ) = 2x; julia> model2 = NonLinModel(f, h, 2.0, 1, 1, 1, solver=nothing) # discrete dynamics -NonLinModel with a sample time Ts = 2.0 s, empty solver and: - 1 manipulated inputs u - 1 states x - 1 outputs y - 0 measured disturbances d +NonLinModel with a sample time Ts = 2.0 s: +├ solver: EmptySolver +├ jacobian: AutoForwardDiff +└ dimensions: + ├ 1 manipulated inputs u + ├ 1 states x + ├ 1 outputs y + └ 0 measured disturbances d ``` # Extended Help @@ -307,8 +313,11 @@ h!(y0, model::NonLinModel, x0, d0, p) = model.h!(y0, x0, d0, p) include("solver.jl") -function detailstr(model::NonLinModel{<:Real, <:RungeKutta{N}}) where N - return ", $(nameof(typeof(model.solver)))($N) solver" +function print_details(io::IO, model::NonLinModel{<:Real, <:RungeKutta{N}}) where N + println(io, "├ solver: $(nameof(typeof(model.solver)))($N)") + println(io, "├ jacobian: $(backend_str(model.jacobian))") +end +function print_details(io::IO, model::NonLinModel) + println(io, "├ solver: $(nameof(typeof(model.solver)))") + println(io, "├ jacobian: $(backend_str(model.jacobian))") end -detailstr(::NonLinModel{<:Real, <:EmptySolver}) = ", empty solver" -detailstr(::NonLinModel) = "" \ No newline at end of file diff --git a/src/predictive_control.jl b/src/predictive_control.jl index 2563d0261..3579d8402 100644 --- a/src/predictive_control.jl +++ b/src/predictive_control.jl @@ -27,20 +27,27 @@ include("controller/linmpc.jl") include("controller/nonlinmpc.jl") function Base.show(io::IO, mpc::PredictiveController) + estim, model = mpc.estim, mpc.estim.model Hp, Hc, nϵ = mpc.Hp, mpc.Hc, mpc.nϵ - nu, nd = mpc.estim.model.nu, mpc.estim.model.nd - nx̂, nym, nyu = mpc.estim.nx̂, mpc.estim.nym, mpc.estim.nyu + nu, nd = model.nu, model.nd + nx̂, nym, nyu = estim.nx̂, estim.nym, estim.nyu n = maximum(ndigits.((Hp, Hc, nu, nx̂, nym, nyu, nd))) + 1 - println(io, "$(nameof(typeof(mpc))) controller with a sample time Ts = "* - "$(mpc.estim.model.Ts) s, $(JuMP.solver_name(mpc.optim)) optimizer, "* - "$(nameof(typeof(mpc.transcription))) transcription, "* - "$(nameof(typeof(mpc.estim))) estimator and:") - println(io, "$(lpad(Hp, n)) prediction steps Hp") - println(io, "$(lpad(Hc, n)) control steps Hc") - println(io, "$(lpad(nϵ, n)) slack variable ϵ (control constraints)") + println(io, "$(nameof(typeof(mpc))) controller with a sample time Ts = $(model.Ts) s:") + println(io, "├ estimator: $(nameof(typeof(mpc.estim)))") + println(io, "├ model: $(nameof(typeof(model)))") + println(io, "├ optimizer: $(JuMP.solver_name(mpc.optim)) ") + println(io, "├ transcription: $(nameof(typeof(mpc.transcription)))") + print_backends(io, mpc) + println(io, "└ dimensions:") + println(io, " ├$(lpad(Hp, n)) prediction steps Hp") + println(io, " ├$(lpad(Hc, n)) control steps Hc") + println(io, " ├$(lpad(nϵ, n)) slack variable ϵ (control constraints)") print_estim_dim(io, mpc.estim, n) end +"No differentiation backends to print for a `PredictiveController` by default." +print_backends(::IO, ::PredictiveController) = nothing + "Functor allowing callable `PredictiveController` object as an alias for `moveinput!`." function (mpc::PredictiveController)( ry::Vector = mpc.estim.model.yop, diff --git a/src/sim_model.jl b/src/sim_model.jl index ebb006f2a..a7bfa7eb9 100644 --- a/src/sim_model.jl +++ b/src/sim_model.jl @@ -85,11 +85,12 @@ The state `xop` and the additional `fop` operating points are frequently zero e. # Examples ```jldoctest julia> model = setop!(LinModel(tf(3, [10, 1]), 2.0), uop=[50], yop=[20]) -LinModel with a sample time Ts = 2.0 s and: - 1 manipulated inputs u - 1 states x - 1 outputs y - 0 measured disturbances d +LinModel with a sample time Ts = 2.0 s: +└ dimensions: + ├ 1 manipulated inputs u + ├ 1 states x + ├ 1 outputs y + └ 0 measured disturbances d julia> y = model() 1-element Vector{Float64}: @@ -134,11 +135,12 @@ used in the plotting functions. # Examples ```jldoctest julia> model = setname!(LinModel(tf(3, [10, 1]), 2.0), u=["\$A\$ (%)"], y=["\$T\$ (∘C)"]) -LinModel with a sample time Ts = 2.0 s and: - 1 manipulated inputs u - 1 states x - 1 outputs y - 0 measured disturbances d +LinModel with a sample time Ts = 2.0 s: +└ dimensions: + ├ 1 manipulated inputs u + ├ 1 states x + ├ 1 outputs y + └ 0 measured disturbances d ``` """ function setname!(model::SimModel; u=nothing, y=nothing, d=nothing, x=nothing) @@ -172,8 +174,6 @@ function setstate!(model::SimModel, x) return model end -detailstr(model::SimModel) = "" - @doc raw""" initstate!(model::SimModel, u, d=[]) -> x @@ -372,13 +372,17 @@ function Base.show(io::IO, model::SimModel) nu, nd = model.nu, model.nd nx, ny = model.nx, model.ny n = maximum(ndigits.((nu, nx, ny, nd))) + 1 - println(io, "$(nameof(typeof(model))) with a sample time Ts = $(model.Ts) s"* - "$(detailstr(model)) and:") - println(io, "$(lpad(nu, n)) manipulated inputs u") - println(io, "$(lpad(nx, n)) states x") - println(io, "$(lpad(ny, n)) outputs y") - print(io, "$(lpad(nd, n)) measured disturbances d") + println(io, "$(nameof(typeof(model))) with a sample time Ts = $(model.Ts) s:") + print_details(io, model) + println(io, "└ dimensions:") + println(io, " ├$(lpad(nu, n)) manipulated inputs u") + println(io, " ├$(lpad(nx, n)) states x") + println(io, " ├$(lpad(ny, n)) outputs y") + print(io, " └$(lpad(nd, n)) measured disturbances d") end +"Print additional details of `model` if any (no details by default)." +print_details(::IO, ::SimModel) = nothing + "Functor allowing callable `SimModel` object as an alias for `evaloutput`." (model::SimModel)(d=model.buffer.empty) = evaloutput(model::SimModel, d) \ No newline at end of file diff --git a/src/state_estim.jl b/src/state_estim.jl index 691393d42..778c96406 100644 --- a/src/state_estim.jl +++ b/src/state_estim.jl @@ -29,21 +29,28 @@ include("estimator/internal_model.jl") include("estimator/manual.jl") function Base.show(io::IO, estim::StateEstimator) - nu, nd = estim.model.nu, estim.model.nd + model = estim.model + nu, nd = model.nu, model.nd nx̂, nym, nyu = estim.nx̂, estim.nym, estim.nyu n = maximum(ndigits.((nu, nx̂, nym, nyu, nd))) + 1 - println(io, "$(nameof(typeof(estim))) estimator with a sample time "* - "Ts = $(estim.model.Ts) s, $(nameof(typeof(estim.model))) and:") + println(io, "$(nameof(typeof(estim))) estimator with a sample time Ts = $(model.Ts) s:") + println(io, "├ model: $(nameof(typeof(estim.model)))") + print_details(io, estim) + println(io, "└ dimensions:") print_estim_dim(io, estim, n) end +"Print additional details of `estim` if any (no details by default)." +print_details(::IO, ::StateEstimator) = nothing + "Print the overall dimensions of the state estimator `estim` with left padding `n`." function print_estim_dim(io::IO, estim::StateEstimator, n) nu, nd = estim.model.nu, estim.model.nd nx̂, nym, nyu = estim.nx̂, estim.nym, estim.nyu - println(io, "$(lpad(nu, n)) manipulated inputs u ($(sum(estim.nint_u)) integrating states)") - println(io, "$(lpad(nx̂, n)) estimated states x̂") - println(io, "$(lpad(nym, n)) measured outputs ym ($(sum(estim.nint_ym)) integrating states)") - println(io, "$(lpad(nyu, n)) unmeasured outputs yu") - print(io, "$(lpad(nd, n)) measured disturbances d") + niu, niym = sum(estim.nint_u), sum(estim.nint_ym) + println(io, " ├$(lpad(nu, n)) manipulated inputs u ($niu integrating states)") + println(io, " ├$(lpad(nx̂, n)) estimated states x̂") + println(io, " ├$(lpad(nym, n)) measured outputs ym ($niym integrating states)") + println(io, " ├$(lpad(nyu, n)) unmeasured outputs yu") + print(io, " └$(lpad(nd, n)) measured disturbances d") end \ No newline at end of file