Optimization

2024-09-23

Optimization

Optimization

\[ \max_{x} f(x) \]

Overview of Algorithms

Example Problem

Code
using Statistics, LinearAlgebra

function cueiv(β; n=1000, σ=0.1, γ=[I; ones(2,length(β))], ρ=0.5)
  z = randn(n, size(γ)[1])
  endo = randn(n, length(β))
  x = z*γ .+ endo
  ϵ = σ*(randn(n)*sqrt(1.0-ρ^2).+endo[:,1]*ρ)
  y =  x*β .+ ϵ
  g(β) = (y - x*β).*z
  function cueobj(β)
    G = g(β)
    Eg=mean(G,dims=1)
    W = inv(cov(G))
    (n*Eg*W*Eg')[1]
  end
  return(cueobj, x, y, z)
end
cueiv (generic function with 1 method)
Code
using Plots
β = ones(2)
f, x, y ,z = cueiv(β;σ=0.1=0.9)
b1 = range(0,2,length=100)
b2 = range(0,2,length=100)
fval = ((x,y)->f([x,y])).(b1,b2')
Plots.plotly()
contourf(b1,b2,fval)
┌ Warning: Failed to load integration with PlotlyBase & PlotlyKaleido.
│   exception =
│    ArgumentError: Package PlotlyKaleido not found in current path.
│    - Run `import Pkg; Pkg.add("PlotlyKaleido")` to install the PlotlyKaleido package.
│    Stacktrace:
│      [1] macro expansion
│        @ ./loading.jl:1772 [inlined]
│      [2] macro expansion
│        @ ./lock.jl:267 [inlined]
│      [3] __require(into::Module, mod::Symbol)
│        @ Base ./loading.jl:1753
│      [4] #invoke_in_world#3
│        @ ./essentials.jl:926 [inlined]
│      [5] invoke_in_world
│        @ ./essentials.jl:923 [inlined]
│      [6] require(into::Module, mod::Symbol)
│        @ Base ./loading.jl:1746
│      [7] top-level scope
│        @ ~/.julia/packages/Plots/kLeqV/src/backends.jl:569
│      [8] eval
│        @ ./boot.jl:385 [inlined]
│      [9] _initialize_backend(pkg::Plots.PlotlyBackend)
│        @ Plots ~/.julia/packages/Plots/kLeqV/src/backends.jl:567
│     [10] backend
│        @ ~/.julia/packages/Plots/kLeqV/src/backends.jl:245 [inlined]
│     [11] plotly()
│        @ Plots ~/.julia/packages/Plots/kLeqV/src/backends.jl:86
│     [12] top-level scope
│        @ ~/compecon/ECON622/qmd/optimization.qmd:69
│     [13] eval
│        @ ./boot.jl:385 [inlined]
│     [14] (::QuartoNotebookWorker.var"#17#18"{Module, Expr})()
│        @ QuartoNotebookWorker ~/.julia/packages/QuartoNotebookRunner/VUFgu/src/QuartoNotebookWorker/src/render.jl:219
│     [15] (::QuartoNotebookWorker.Packages.IOCapture.var"#5#9"{DataType, QuartoNotebookWorker.var"#17#18"{Module, Expr}, IOContext{Base.PipeEndpoint}, IOContext{Base.PipeEndpoint}, IOContext{IOStream}, IOContext{Base.PipeEndpoint}})()
│        @ QuartoNotebookWorker.Packages.IOCapture ~/.julia/packages/IOCapture/Y5rEA/src/IOCapture.jl:170
│     [16] with_logstate(f::Function, logstate::Any)
│        @ Base.CoreLogging ./logging.jl:515
│     [17] with_logger
│        @ ./logging.jl:627 [inlined]
│     [18] capture(f::QuartoNotebookWorker.var"#17#18"{Module, Expr}; rethrow::Type, color::Bool, passthrough::Bool, capture_buffer::IOBuffer, io_context::Vector{Pair{Symbol, Any}})
│        @ QuartoNotebookWorker.Packages.IOCapture ~/.julia/packages/IOCapture/Y5rEA/src/IOCapture.jl:167
│     [19] include_str(mod::Module, code::String; file::String, line::Int64, cell_options::Dict{String, Any})
│        @ QuartoNotebookWorker ~/.julia/packages/QuartoNotebookRunner/VUFgu/src/QuartoNotebookWorker/src/render.jl:199
│     [20] #invokelatest#2
│        @ ./essentials.jl:894 [inlined]
│     [21] invokelatest
│        @ ./essentials.jl:889 [inlined]
│     [22] #6
│        @ ~/.julia/packages/QuartoNotebookRunner/VUFgu/src/QuartoNotebookWorker/src/render.jl:18 [inlined]
│     [23] with_inline_display(f::QuartoNotebookWorker.var"#6#7"{String, String, Int64, Dict{String, Any}}, cell_options::Dict{String, Any})
│        @ QuartoNotebookWorker ~/.julia/packages/QuartoNotebookRunner/VUFgu/src/QuartoNotebookWorker/src/InlineDisplay.jl:26
│     [24] _render_thunk(thunk::Function, code::String, cell_options::Dict{String, Any}, is_expansion_ref::Base.RefValue{Bool}; inline::Bool)
│        @ QuartoNotebookWorker ~/.julia/packages/QuartoNotebookRunner/VUFgu/src/QuartoNotebookWorker/src/render.jl:42
│     [25] _render_thunk
│        @ ~/.julia/packages/QuartoNotebookRunner/VUFgu/src/QuartoNotebookWorker/src/render.jl:35 [inlined]
│     [26] #render#5
│        @ ~/.julia/packages/QuartoNotebookRunner/VUFgu/src/QuartoNotebookWorker/src/render.jl:15 [inlined]
│     [27] render(code::String, file::String, line::Int64, cell_options::Dict{String, Any})
│        @ QuartoNotebookWorker ~/.julia/packages/QuartoNotebookRunner/VUFgu/src/QuartoNotebookWorker/src/render.jl:1
│     [28] render(::String, ::Vararg{Any}; kwargs::@Kwargs{})
│        @ Main ~/.julia/packages/QuartoNotebookRunner/VUFgu/src/worker.jl:14
│     [29] top-level scope
│        @ none:1
│     [30] eval
│        @ ./boot.jl:385 [inlined]
│     [31] (::Main.var"#1#2"{Sockets.TCPSocket, UInt64, Bool, @Kwargs{}, Tuple{Module, Expr}, typeof(Core.eval)})()
│        @ Main ~/.julia/packages/Malt/Z3YQq/src/worker.jl:120
└ @ Plots ~/.julia/packages/Plots/kLeqV/src/backends.jl:577

Packages

Optim.jl

Code
import Optim
β = ones(2)
f = cueiv(β;σ=0.1=0.5)[1]
β0 = zero(β)

sol=Optim.optimize(f, β0, Optim.NelderMead())
sol=Optim.optimize(f,β0, Optim.LBFGS(), autodiff=:forward)
sol=Optim.optimize(f,β0, Optim.LBFGS(m=20, linesearch=Optim.LineSearches.BackTracking()), autodiff=:forward)
sol=Optim.optimize(f,β0,Optim.NewtonTrustRegion(), autodiff=:forward)

β = ones(20)
f =  cueiv(β)[1]
β0 = zero(β)
sol=Optim.optimize(f, β0, Optim.NelderMead())
sol=Optim.optimize(f,β0, Optim.LBFGS(), autodiff=:forward)
sol=Optim.optimize(f,β0, Optim.LBFGS(m=20, linesearch=Optim.LineSearches.BackTracking()), autodiff=:forward)
sol=Optim.optimize(f,β0,Optim.NewtonTrustRegion(), autodiff=:forward)
 * Status: success

 * Candidate solution
    Final objective value:     1.925800e+02

 * Found with
    Algorithm:     Newton's Method (Trust Region)

 * Convergence measures
    |x - x'|               = 4.93e+01 ≰ 0.0e+00
    |x - x'|/|x'|          = 1.08e-02 ≰ 0.0e+00
    |f(x) - f(x')|         = 2.04e-06 ≰ 0.0e+00
    |f(x) - f(x')|/|f(x')| = 1.06e-08 ≰ 0.0e+00
    |g(x)|                 = 9.94e-09 ≤ 1.0e-08

 * Work counters
    Seconds run:   28  (vs limit Inf)
    Iterations:    115
    f(x) calls:    116
    ∇f(x) calls:   116
    ∇²f(x) calls:  113

Optimization.jl

  • Optimization docs
  • Unified interface to many packages (like NonlinearSolve.jl, but less polished)

JuliaSmoothOptimizers

using ADNLPModels
import DCISolver, JSOSolvers, Percival
β = ones(2)
f =  cueiv(β; γ = I + zeros(2,2))[1]
β0 = zero(β)

nlp = ADNLPModel(f, β0)
stats=JSOSolvers.lbfgs(nlp)
print(stats)

stats=JSOSolvers.tron(nlp)
print(stats)

ecnlp = ADNLPModel(f,β0, b->(length0) - sum(b)), zeros(1), zeros(1))
stats=DCISolver.dci(ecnlp)
print(stats)

icnlp = ADNLPModel(f,β0, b->(length0) - sum(b)), -ones(1), ones(1))
stats=Percival.percival(icnlp)
print(stats)

JuMP

  • JuMP is a modelling language for optimization in Julia
  • many solvers
  • best when write problem in special JuMP syntax
    • efficiently calculate derivatives and recognize special problem structure such as linearity, sparsity, etc.
Code
using JuMP
import MadNLP, Ipopt
k = 10
β = ones(k)
f,x,y,z =  cueiv(β; γ = I + zeros(k,k))
β0 = zero(β)
n,k = size(x)

# sub-optimal usage of JuMP
m = Model(()->MadNLP.Optimizer(print_level=MadNLP.INFO, max_iter=100))
@variable(m, β[1:k])
@operator(m, op_f, k, (x...)->f(vcat(x...))) # hides internals of f from JuMP
@objective(m, Min, op_f...))
optimize!(m)
JuMP.value.(β)
┌ Warning: Replacing docs for `SolverCore.solve! :: Union{}` in module `MadNLP`
└ @ Base.Docs docs/Docs.jl:243
This is MadNLP version v0.8.4, running with umfpack

Number of nonzeros in constraint Jacobian............:        0
Number of nonzeros in Lagrangian Hessian.............:        0

Total number of variables............................:       10
                     variables with only lower bounds:        0
                variables with lower and upper bounds:        0
                     variables with only upper bounds:        0
Total number of equality constraints.................:        0
Total number of inequality constraints...............:        0
        inequality constraints with only lower bounds:        0
   inequality constraints with lower and upper bounds:        0
        inequality constraints with only upper bounds:        0

iter    objective    inf_pr   inf_du lg(mu)  ||d||  lg(rg) alpha_du alpha_pr  ls
   0  3.4959747e+02 0.00e+00 3.18e+01  -1.0 0.00e+00    -  0.00e+00 0.00e+00   0
   1  3.4275708e+02 0.00e+00 2.30e+01  -1.0 9.55e+01    -  1.00e+00 3.12e-02f  6
   2  3.0505724e+02 0.00e+00 1.46e+01  -1.0 9.01e-01    -  1.00e+00 1.00e+00f  1
   3  2.7079028e+02 0.00e+00 1.28e+01  -1.0 2.76e+00    -  1.00e+00 1.00e+00f  1
   4  2.3816600e+02 0.00e+00 7.48e+00  -1.0 1.75e+00    -  1.00e+00 1.00e+00f  1
   5  2.1025967e+02 0.00e+00 6.09e+00  -1.0 4.38e+00    -  1.00e+00 1.00e+00f  1
   6  2.0408235e+02 0.00e+00 1.53e+00  -1.0 2.87e+00    -  1.00e+00 5.00e-01f  2
   7  2.0282469e+02 0.00e+00 9.75e-01  -1.0 5.62e-01    -  1.00e+00 1.00e+00f  1
   8  2.0218956e+02 0.00e+00 7.06e-01  -1.7 4.74e-01    -  1.00e+00 1.00e+00h  1
   9  2.0183708e+02 0.00e+00 6.60e-01  -1.7 8.32e-01    -  1.00e+00 1.00e+00h  1
iter    objective    inf_pr   inf_du lg(mu)  ||d||  lg(rg) alpha_du alpha_pr  ls
  10  2.0165613e+02 0.00e+00 2.50e-01  -1.7 2.94e-01    -  1.00e+00 1.00e+00h  1
  11  2.0163690e+02 0.00e+00 3.78e-02  -1.7 8.30e-02    -  1.00e+00 1.00e+00h  1
  12  2.0163549e+02 0.00e+00 1.66e-02  -2.5 3.03e-02    -  1.00e+00 1.00e+00h  1
  13  2.0163497e+02 0.00e+00 1.74e-02  -3.8 2.80e-02    -  1.00e+00 1.00e+00h  1
  14  2.0163465e+02 0.00e+00 2.12e-02  -3.8 6.11e-02    -  1.00e+00 5.00e-01h  2
  15  2.0163363e+02 0.00e+00 3.13e-02  -3.8 6.35e-02    -  1.00e+00 1.00e+00h  1
  16  2.0162529e+02 0.00e+00 1.38e-01  -3.8 2.92e+00    -  1.00e+00 2.50e-01h  3
  17  2.0161175e+02 0.00e+00 3.45e-01  -3.8 8.80e+01    -  1.00e+00 1.56e-02f  7
  18  2.0160805e+02 0.00e+00 2.65e-01  -3.8 1.05e+02    -  1.00e+00 3.91e-03f  9
  19  2.0160487e+02 0.00e+00 2.96e-01  -3.8 1.81e+01    -  1.00e+00 1.56e-02h  7
iter    objective    inf_pr   inf_du lg(mu)  ||d||  lg(rg) alpha_du alpha_pr  ls
  20  2.0154197e+02 0.00e+00 1.57e+00  -3.8 1.25e+02    -  1.00e+00 3.12e-02f  6
  21  2.0154053e+02 0.00e+00 1.58e+00  -3.8 3.34e+02    -  1.00e+00 4.88e-04f  12
  22  2.0153986e+02 0.00e+00 1.98e+00  -3.8 6.29e+01    -  1.00e+00 7.81e-03f  8
  23  2.0153471e+02 0.00e+00 2.84e+00  -3.8 1.57e+02    -  1.00e+00 1.95e-03f  10
  24  2.0153063e+02 0.00e+00 3.14e+00  -3.8 1.11e+02    -  1.00e+00 1.95e-03f  10
  25  2.0152695e+02 0.00e+00 3.37e+00  -3.8 2.89e+01    -  1.00e+00 3.91e-03f  9
  26  2.0152681e+02 0.00e+00 7.14e+00  -3.8 1.37e+01    -  1.00e+00 6.25e-02f  5
  27  2.0150544e+02 0.00e+00 8.95e+00  -3.8 5.10e+02    -  1.00e+00 4.88e-04f  12
  28  2.0149840e+02 0.00e+00 1.11e+01  -3.8 8.14e+01    -  1.00e+00 1.95e-03f  10
  29  2.0148435e+02 0.00e+00 1.43e+01  -3.8 4.83e+01    -  1.00e+00 3.91e-03f  9
iter    objective    inf_pr   inf_du lg(mu)  ||d||  lg(rg) alpha_du alpha_pr  ls
  30  2.0146691e+02 0.00e+00 1.77e+01  -3.8 5.70e+01    -  1.00e+00 1.95e-03f  10
  31  2.0146042e+02 0.00e+00 2.65e+01  -3.8 4.39e+01    -  1.00e+00 3.91e-03f  9
  32  2.0145505e+02 0.00e+00 2.79e+01  -3.8 1.61e+01    -  1.00e+00 3.91e-03f  9
  33  2.0142697e+02 0.00e+00 3.27e+01  -3.8 4.23e+01    -  1.00e+00 1.95e-03f  10
  34  2.0134141e+02 0.00e+00 4.76e+01  -3.8 1.28e+02    -  1.00e+00 9.77e-04f  11
  35  2.0131932e+02 0.00e+00 7.14e+01  -3.8 1.23e+02    -  1.00e+00 9.77e-04f  11
  36  2.0129103e+02 0.00e+00 9.40e+01  -3.8 3.12e+01    -  1.00e+00 1.95e-03f  10
  37  2.0127522e+02 0.00e+00 1.50e+02  -3.8 2.53e+01    -  1.00e+00 3.91e-03f  9
  38  2.0123251e+02 0.00e+00 1.72e+02  -3.8 3.04e+01    -  1.00e+00 9.77e-04f  11
  39  2.0002288e+02 0.00e+00 3.75e+02  -3.8 2.47e+01    -  1.00e+00 3.91e-03f  9
iter    objective    inf_pr   inf_du lg(mu)  ||d||  lg(rg) alpha_du alpha_pr  ls
  40  1.9736971e+02 0.00e+00 6.32e+02  -3.8 4.02e+02    -  1.00e+00 1.22e-04f  14
  41  1.9716351e+02 0.00e+00 7.10e+02  -3.8 1.46e+01    -  1.00e+00 4.88e-04f  12
  42  1.9689903e+02 0.00e+00 7.34e+02  -3.8 8.41e-02    -  1.00e+00 3.12e-02f  6
  43  1.9451806e+02 0.00e+00 6.09e+02  -3.8 3.68e-02    -  1.00e+00 5.00e-01f  2
  44  1.9169873e+02 0.00e+00 7.76e+02  -3.8 2.93e-02    -  1.00e+00 1.00e+00f  1
  45  1.7128022e+02 0.00e+00 1.21e+03  -3.8 7.14e-02    -  1.00e+00 5.00e-01f  2
  46  1.6238472e+02 0.00e+00 1.51e+03  -3.8 5.23e-02    -  1.00e+00 1.00e+00f  1
  47  1.4215950e+02 0.00e+00 7.00e+02  -3.8 3.85e-02    -  1.00e+00 1.00e+00f  1
  48  8.6258499e+01 0.00e+00 2.69e+03  -3.8 4.42e-02    -  1.00e+00 1.00e+00f  1
  49  5.6918124e+01 0.00e+00 2.54e+03  -3.8 5.08e-02    -  1.00e+00 2.50e-01f  3
iter    objective    inf_pr   inf_du lg(mu)  ||d||  lg(rg) alpha_du alpha_pr  ls
  50  2.8932737e+01 0.00e+00 1.58e+03  -3.8 4.72e-02    -  1.00e+00 5.00e-01f  2
  51  2.9319167e+00 0.00e+00 6.21e+02  -3.8 8.41e-03    -  1.00e+00 1.00e+00f  1
  52  6.2044148e-01 0.00e+00 2.61e+02  -3.8 5.42e-03    -  1.00e+00 5.00e-01f  2
  53  7.8362428e-02 0.00e+00 1.11e+02  -3.8 1.80e-03    -  1.00e+00 1.00e+00h  1
  54  1.6383381e-02 0.00e+00 8.09e+01  -3.8 6.76e-04    -  1.00e+00 1.00e+00h  1
  55  1.4911326e-03 0.00e+00 1.51e+01  -3.8 3.93e-04    -  1.00e+00 1.00e+00h  1
  56  4.5174273e-05 0.00e+00 2.35e+00  -3.8 6.10e-05    -  1.00e+00 1.00e+00h  1
  57  3.0187950e-06 0.00e+00 6.10e-01  -3.8 1.32e-05    -  1.00e+00 1.00e+00h  1
  58  7.2713672e-08 0.00e+00 1.28e-01  -3.8 2.86e-06    -  1.00e+00 1.00e+00h  1
  59  1.4860218e-09 0.00e+00 1.86e-02  -3.8 7.46e-07    -  1.00e+00 1.00e+00h  1
iter    objective    inf_pr   inf_du lg(mu)  ||d||  lg(rg) alpha_du alpha_pr  ls
  60  3.1452161e-10 0.00e+00 6.85e-03  -3.8 1.15e-07    -  1.00e+00 1.00e+00h  1
  61  2.5026853e-12 0.00e+00 7.91e-04  -3.8 2.61e-08    -  1.00e+00 1.00e+00h  1
  62  9.9820837e-14 0.00e+00 1.29e-04  -5.7 2.89e-09    -  1.00e+00 1.00e+00h  1
  63  7.0509290e-15 0.00e+00 3.11e-05  -5.7 5.54e-10    -  1.00e+00 1.00e+00h  1
  64  7.7629621e-17 0.00e+00 4.03e-06  -5.7 2.05e-10    -  1.00e+00 1.00e+00h  1
  65  1.3438193e-17 0.00e+00 1.82e-06  -8.6 2.27e-11    -  1.00e+00 1.00e+00h  1
  66  9.6249111e-21 0.00e+00 3.70e-08  -8.6 7.33e-12    -  1.00e+00 1.00e+00h  1
  67  5.1326728e-22 0.00e+00 5.44e-09  -8.6 1.59e-13    -  1.00e+00 1.00e+00h  1

Number of Iterations....: 67

                                   (scaled)                 (unscaled)
Objective...............:   5.1326727756299644e-22    5.1326727756299644e-22
Dual infeasibility......:   5.4439742191250211e-09    5.4439742191250211e-09
Constraint violation....:   0.0000000000000000e+00    0.0000000000000000e+00
Complementarity.........:   0.0000000000000000e+00    0.0000000000000000e+00
Overall NLP error.......:   5.4439742191250211e-09    5.4439742191250211e-09

Number of objective function evaluations             = 442
Number of objective gradient evaluations             = 68
Number of constraint evaluations                     = 442
Number of constraint Jacobian evaluations            = 1
Number of Lagrangian Hessian evaluations             = 0
Total wall-clock secs in solver (w/o fun. eval./lin. alg.)  = 19.761
Total wall-clock secs in linear solver                      =  0.478
Total wall-clock secs in NLP function evaluations           =  0.255
Total wall-clock secs                                       = 20.494

EXIT: Optimal Solution Found (tol = 1.0e-08).
10-element Vector{Float64}:
 1.0019790066553154
 0.9972041660398521
 0.9983436538794038
 1.0038204311153165
 0.9945321338917558
 0.998154052014748
 1.0039342503480513
 1.0039944742522768
 0.995661392828938
 0.9993280224181564
Code
# better usage of JuMP
m2 = Model(()->MadNLP.Optimizer(print_level=MadNLP.INFO, max_iter=100))
#m2 = Model(Ipopt.Optimizer)
#set_attribute(m2, "print_level", 5)
n, k = size(x)
@variable(m2, β2[1:k])
g = (y - x*β2).*z
Eg=mean(g,dims=1)
invW = cov(g)
@variable(m2, Wg[1:k])
@constraint(m2, invW*Wg .== Eg')
@objective(m2, Min, n*dot(Eg,Wg))
optimize!(m2)
JuMP.value.(β2)
This is MadNLP version v0.8.4, running with umfpack

Number of nonzeros in constraint Jacobian............:      200
Number of nonzeros in Lagrangian Hessian.............:     1750

Total number of variables............................:       20
                     variables with only lower bounds:        0
                variables with lower and upper bounds:        0
                     variables with only upper bounds:        0
Total number of equality constraints.................:       10
Total number of inequality constraints...............:        0
        inequality constraints with only lower bounds:        0
   inequality constraints with lower and upper bounds:        0
        inequality constraints with only upper bounds:        0

iter    objective    inf_pr   inf_du lg(mu)  ||d||  lg(rg) alpha_du alpha_pr  ls
   0  0.0000000e+00 1.30e+00 4.04e+00  -1.0 0.00e+00    -  0.00e+00 0.00e+00   0
   1  3.6578937e+02 7.43e-01 1.56e+01  -1.0 6.25e-01    -  1.00e+00 1.00e+00h  1
   2  2.9062703e+02 5.18e-03 2.39e+00  -1.0 2.42e-02   2.0 1.00e+00 1.00e+00h  1
   3 -8.8949112e+00 3.10e+00 2.26e+02  -1.0 1.68e+00    -  1.00e+00 1.00e+00h  1
   4  1.2385249e+00 1.04e+00 7.97e+01  -1.0 3.94e+00    -  1.00e+00 1.00e+00h  1
   5 -3.2203872e-02 8.29e-03 6.32e-01  -1.0 9.60e-01    -  1.00e+00 1.00e+00h  1
   6  6.2813486e-08 5.95e-06 4.69e-04  -1.7 9.45e-03    -  1.00e+00 1.00e+00h  1
   7  2.9877420e-14 3.09e-12 7.05e-06  -5.7 6.90e-04    -  1.00e+00 1.00e+00h  1
   8  4.8467614e-27 3.29e-16 4.10e-14  -8.6 4.77e-08    -  1.00e+00 1.00e+00h  1

Number of Iterations....: 8

                                   (scaled)                 (unscaled)
Objective...............:   3.7198411363219821e-28    4.8467614016778965e-27
Dual infeasibility......:   4.0998006929452738e-14    5.3418291332695970e-13
Constraint violation....:   3.2864463628030602e-16    3.2864463628030602e-16
Complementarity.........:   0.0000000000000000e+00    0.0000000000000000e+00
Overall NLP error.......:   3.1465562268156216e-15    4.0998006929452738e-14

Number of objective function evaluations             = 9
Number of objective gradient evaluations             = 9
Number of constraint evaluations                     = 9
Number of constraint Jacobian evaluations            = 9
Number of Lagrangian Hessian evaluations             = 8
Total wall-clock secs in solver (w/o fun. eval./lin. alg.)  =  3.851
Total wall-clock secs in linear solver                      =  0.001
Total wall-clock secs in NLP function evaluations           =  0.452
Total wall-clock secs                                       =  4.304

EXIT: Optimal Solution Found (tol = 1.0e-08).
10-element Vector{Float64}:
 1.0019790066553267
 0.9972041660398772
 0.9983436538793767
 1.0038204311153398
 0.9945321338917862
 0.9981540520147151
 1.0039342503480337
 1.0039944742522884
 0.9956613928289493
 0.9993280224181971
Code
# or Empirical Likelihood
m3 = Model(()->MadNLP.Optimizer(print_level=MadNLP.INFO, max_iter=100))
@variable(m3, β3[1:k])
@variable(m3, 1e-8 <= p[1:n] <= 1-1e-8)
@constraint(m3, sum(p) == 1)
@objective(m3, Max, 1/n*sum(log.(p)))
JuMP.set_start_value.(p, 1/n)
g3 = (y - x*β3).*z
@constraint(m3, p'*g3 .== 0)
optimize!(m3)
JuMP.value.(β3)
This is MadNLP version v0.8.4, running with umfpack

Number of nonzeros in constraint Jacobian............:   211000
Number of nonzeros in Lagrangian Hessian.............:   101000

Total number of variables............................:     1010
                     variables with only lower bounds:        0
                variables with lower and upper bounds:     1000
                     variables with only upper bounds:        0
Total number of equality constraints.................:       11
Total number of inequality constraints...............:        0
        inequality constraints with only lower bounds:        0
   inequality constraints with lower and upper bounds:        0
        inequality constraints with only upper bounds:        0

iter    objective    inf_pr   inf_du lg(mu)  ||d||  lg(rg) alpha_du alpha_pr  ls
   0  4.6051702e+00 1.30e+01 7.73e-15  -1.0 0.00e+00    -  0.00e+00 0.00e+00   0
   1  6.9077553e+00 1.17e+00 8.10e-01  -1.0 1.00e-01    -  1.00e+00 1.00e+00h  1
   2  6.9077553e+00 1.87e-12 4.26e-14  -1.0 9.04e-01    -  1.00e+00 1.00e+00h  1
   3  6.9077553e+00 1.39e-14 3.51e-14  -2.5 1.45e-12    -  1.00e+00 1.00e+00h  1
   4  6.9077553e+00 3.84e-15 1.11e-15  -5.7 1.34e-14    -  1.00e+00 1.00e+00h  1
   5  6.9077553e+00 3.58e-15 2.22e-16  -9.0 3.79e-15    -  1.00e+00 1.00e+00h  1

Number of Iterations....: 5

                                   (scaled)                 (unscaled)
Objective...............:   6.9077552789822088e+00    6.9077552789822088e+00
Dual infeasibility......:   2.2204460492503131e-16    2.2204460492503131e-16
Constraint violation....:   3.5752650839881994e-15    3.5752650839881994e-15
Complementarity.........:   9.0909090940771483e-10    9.0909090940771483e-10
Overall NLP error.......:   9.0909090940771483e-10    9.0909090940771483e-10

Number of objective function evaluations             = 6
Number of objective gradient evaluations             = 6
Number of constraint evaluations                     = 6
Number of constraint Jacobian evaluations            = 6
Number of Lagrangian Hessian evaluations             = 5
Total wall-clock secs in solver (w/o fun. eval./lin. alg.)  =  0.022
Total wall-clock secs in linear solver                      =  0.011
Total wall-clock secs in NLP function evaluations           =  0.005
Total wall-clock secs                                       =  0.038

EXIT: Optimal Solution Found (tol = 1.0e-08).
10-element Vector{Float64}:
 1.001979006655325
 0.9972041660398774
 0.9983436538793784
 1.0038204311153387
 0.9945321338917851
 0.9981540520147166
 1.0039342503480353
 1.0039944742522933
 0.9956613928289548
 0.9993280224181954

Ipopt

An open source solver written that works well on large constrained nonlinear problems. Can be used directly or through JuMP, Optimization.jl or JuliaSmoothOptimizers interfaces.

MadNLP

Interior point solver written in Julia.

Knitro

A commercial solver with state of the art performance on many problems. Maybe worth the cost for large constrained nonlinear problems. Can be used directly or through JuMP, Optimization.jl or JuliaSmoothOptimizers interfaces.

Optimisers

Optimisers.jl has a collection of gradient descent variations designed for use in machine learning. A reasonable choice for problems with a very large number of variables.

Covariance Matrix Adaption Evolution Strategy

import CMAEvolutionStrategy, GCMAES
β = ones(10)
f =  cueiv(β; γ = I + zeros(length(β),length(β)))[1]
β0 = zero(β)

out = CMAEvolutionStrategy.minimize(f, β0, 1.0;
                                    lower = nothing,
                                    upper = nothing,
                                    popsize = 4 + floor(Int, 3*log(length0))),
                                    verbosity = 1,
                                    seed = rand(UInt),
                                    maxtime = nothing,
                                    maxiter = 500,
                                    maxfevals = nothing,
                                    ftarget = nothing,
                                    xtol = nothing,
                                    ftol = 1e-11)
@show CMAEvolutionStrategy.xbest(out)

xmin, fmin, status = GCMAES.minimize(f, β0, 1.0, fill(-10.,length0)), fill(10., length0)), maxiter=500)
@show xmin

import ForwardDiff
∇f(β) = ForwardDiff.gradient(f, β)
xmin, fmin, status=GCMAES.minimize((f, ∇f), β0, 1.0, fill(-10.,length0)), fill(10., length0)), maxiter=500)
@show xmin