Documentation
¶
Index ¶
- Constants
- func AlgorithmName(algorithm int) string
- func Srand(seed uint64)
- func SrandTime()
- func Version() string
- type Func
- type Mfunc
- type NLopt
- func (n *NLopt) AddEqualityConstraint(h Func, tol float64) error
- func (n *NLopt) AddEqualityMConstraint(h Mfunc, tol []float64) error
- func (n *NLopt) AddInequalityConstraint(fc Func, tol float64) error
- func (n *NLopt) AddInequalityMConstraint(fc Mfunc, tol []float64) error
- func (n *NLopt) Copy() *NLopt
- func (n *NLopt) Destroy()
- func (n *NLopt) ForceStop() error
- func (n *NLopt) GetAlgorithm() int
- func (n *NLopt) GetAlgorithmName() string
- func (n *NLopt) GetDimension() uint
- func (n *NLopt) GetForceStop() int
- func (n *NLopt) GetFtolAbs() float64
- func (n *NLopt) GetFtolRel() float64
- func (n *NLopt) GetInitialStep() ([]float64, []float64, error)
- func (n *NLopt) GetLowerBounds() ([]float64, error)
- func (n *NLopt) GetMaxEval() int
- func (n *NLopt) GetMaxTime() float64
- func (n *NLopt) GetParam(name string, defaultVal float64) float64
- func (n *NLopt) GetPopulation() uint
- func (n *NLopt) GetStopVal() float64
- func (n *NLopt) GetUpperBounds() ([]float64, error)
- func (n *NLopt) GetVectorStorage() uint
- func (n *NLopt) GetXtolAbs() ([]float64, error)
- func (n *NLopt) GetXtolRel() float64
- func (n *NLopt) HasParam(name string) int
- func (n *NLopt) LastStatus() string
- func (n *NLopt) NthParam(idx uint) string
- func (n *NLopt) NumParams() uint
- func (n *NLopt) Optimize(x []float64) ([]float64, float64, error)
- func (n *NLopt) RemoveEqualityConstraints() error
- func (n *NLopt) RemoveInequalityConstraints() error
- func (n *NLopt) SetDefaultInitialStep(x []float64) error
- func (n *NLopt) SetForceStop(val int) error
- func (n *NLopt) SetFtolAbs(tol float64) error
- func (n *NLopt) SetFtolRel(tol float64) error
- func (n *NLopt) SetInitialStep(dx []float64) error
- func (n *NLopt) SetInitialStep1(dx float64) error
- func (n *NLopt) SetLocalOptimizer(localOpt *NLopt) error
- func (n *NLopt) SetLowerBounds(lb []float64) error
- func (n *NLopt) SetLowerBounds1(lb float64) error
- func (n *NLopt) SetMaxEval(maxeval int) error
- func (n *NLopt) SetMaxObjective(f Func) error
- func (n *NLopt) SetMaxTime(maxtime float64) error
- func (n *NLopt) SetMinObjective(f Func) error
- func (n *NLopt) SetParam(name string, val float64) error
- func (n *NLopt) SetPopulation(pop uint) error
- func (n *NLopt) SetStopVal(stopval float64) error
- func (n *NLopt) SetUpperBounds(ub []float64) error
- func (n *NLopt) SetUpperBounds1(ub float64) error
- func (n *NLopt) SetVectorStorage(M uint) error
- func (n *NLopt) SetXtolAbs(tol []float64) error
- func (n *NLopt) SetXtolAbs1(tol float64) error
- func (n *NLopt) SetXtolRel(tol float64) error
Constants ¶
const ( // GN_DIRECT is DIRECT (global, no-derivative) GN_DIRECT = iota // GN_DIRECT_L is DIRECT-L (global, no-derivative) GN_DIRECT_L // GN_DIRECT_L_RAND is Randomized DIRECT-L (global, no-derivative) GN_DIRECT_L_RAND // GN_DIRECT_NOSCAL is Unscaled DIRECT (global, no-derivative) GN_DIRECT_NOSCAL // GN_DIRECT_L_NOSCAL is Unscaled DIRECT-L (global, no-derivative) GN_DIRECT_L_NOSCAL // GN_DIRECT_L_RAND_NOSCAL is Unscaled Randomized DIRECT-L (global, no-derivative) GN_DIRECT_L_RAND_NOSCAL // GN_ORIG_DIRECT is Original DIRECT version (global, no-derivative) GN_ORIG_DIRECT // GN_ORIG_DIRECT_L is Original DIRECT-L version (global, no-derivative) GN_ORIG_DIRECT_L // GD_STOGO is StoGO (NOT COMPILED) GD_STOGO // GD_STOGO_RAND is StoGO randomized (NOT COMPILED) GD_STOGO_RAND // LD_LBFGS_NOCEDAL is original L-BFGS code by Nocedal et al. (NOT COMPILED) LD_LBFGS_NOCEDAL // LD_LBFGS is Limited-memory BFGS (L-BFGS) (local, derivative-based) LD_LBFGS // LN_PRAXIS is Principal-axis, praxis (local, no-derivative) LN_PRAXIS // LD_VAR1 is Limited-memory variable-metric, rank 1 (local, derivative-based) LD_VAR1 // LD_VAR2 is Limited-memory variable-metric, rank 2 (local, derivative-based) LD_VAR2 // LD_TNEWTON is Truncated Newton (local, derivative-based) LD_TNEWTON // LD_TNEWTON_RESTART is Truncated Newton with restarting (local, derivative-based) LD_TNEWTON_RESTART // LD_TNEWTON_PRECOND is Preconditioned truncated Newton (local, derivative-based) LD_TNEWTON_PRECOND // LD_TNEWTON_PRECOND_RESTART is Preconditioned truncated Newton with restarting (local, derivative-based) LD_TNEWTON_PRECOND_RESTART // GN_CRS2_LM is Controlled random search (CRS2) with local mutation (global, no-derivative) GN_CRS2_LM // GN_MLSL is Multi-level single-linkage (MLSL), random (global, no-derivative) GN_MLSL // GD_MLSL is Multi-level single-linkage (MLSL), random (global, derivative) GD_MLSL // GN_MLSL_LDS is Multi-level single-linkage (MLSL), quasi-random (global, no-derivative) GN_MLSL_LDS // GD_MLSL_LDS is Multi-level single-linkage (MLSL), quasi-random (global, derivative) GD_MLSL_LDS // LD_MMA is Method of Moving Asymptotes (MMA) (local, derivative) LD_MMA // LN_COBYLA is COBYLA (Constrained Optimization BY Linear Approximations) (local, no-derivative) LN_COBYLA // LN_NEWUOA is NEWUOA unconstrained optimization via quadratic models (local, no-derivative) LN_NEWUOA // LN_NEWUOA_BOUND is Bound-constrained optimization via NEWUOA-based quadratic models (local, no-derivative) LN_NEWUOA_BOUND // LN_NELDERMEAD is Nelder-Mead simplex algorithm (local, no-derivative) LN_NELDERMEAD // LN_SBPLX is Sbplx variant of Nelder-Mead (re-implementation of Rowan's Subplex) (local, no-derivative) LN_SBPLX // LN_AUGLAG is Augmented Lagrangian method (local, no-derivative) LN_AUGLAG // LD_AUGLAG is Augmented Lagrangian method (local, derivative) LD_AUGLAG // LN_AUGLAG_EQ is Augmented Lagrangian method for equality constraints (local, no-derivative) LN_AUGLAG_EQ // LD_AUGLAG_EQ is Augmented Lagrangian method for equality constraints (local, derivative) LD_AUGLAG_EQ // LN_BOBYQA is BOBYQA bound-constrained optimization via quadratic models (local, no-derivative) LN_BOBYQA // GN_ISRES is ISRES evolutionary constrained optimization (global, no-derivative) GN_ISRES // AUGLAG is Augmented Lagrangian method (needs sub-algorithm) AUGLAG // AUGLAG_EQ is Augmented Lagrangian method for equality constraints (needs sub-algorithm) AUGLAG_EQ // G_MLSL is Multi-level single-linkage (MLSL), random (global, needs sub-algorithm) G_MLSL // G_MLSL_LDS is Multi-level single-linkage (MLSL), quasi-random (global, needs sub-algorithm) G_MLSL_LDS // LD_SLSQP is Sequential Quadratic Programming (SQP) (local, derivative) LD_SLSQP // LD_CCSAQ is CCSA (Conservative Convex Separable Approximations) with simple quadratic approximations (local, derivative) LD_CCSAQ // GN_ESCH is ESCH evolutionary strategy GN_ESCH // NUM_ALGORITHMS is number of algorithms NUM_ALGORITHMS )
Variables ¶
This section is empty.
Functions ¶
func AlgorithmName ¶
AlgorithmName returns a descriptive string corresponding to a particular algorithm `algorithm`
func Srand ¶
func Srand(seed uint64)
Srand allows to use a "deterministic" sequence of pseudorandom numbers, i.e. the same sequence from run to run. For stochastic optimization algorithms, a pseudorandom numbers generated by the Mersenne Twister algorithm are used. By default, the seed for the random numbers is generated from the system time, so that you will get a different sequence of pseudorandom numbers each time you run your program.
Some of the algorithms also support using low-discrepancy sequences (LDS), sometimes known as quasi-random numbers. NLopt uses the Sobol LDS, which is implemented for up to 1111 dimensions.
Types ¶
type Func ¶
Func is an objective function to minimize or maximize. The return should be the value of the function at the point x, where x points to a slice of length n of the optimization parameters. If the argument gradient is not <nil> or empty then it points to a slice of length n, which should (upon return) be set in-place to the gradient of the function with respect to the optimization parameters at x
type Mfunc ¶
type Mfunc func(result, x, gradient []float64)
Mfunc is a vector-valued objective function for applications where it is more convenient to define a single function that returns the values (and gradients) of all constraints at once. Upon return the output value of the constraints should be stored in result, a slice of length m (the same as the dimension passed to nlopt..Add*MConstraint). In addition, if gradient is non-<nil>, then gradient points to a slice of length m*n which should, upon return, be set to the gradients of the constraint functions with respect to x.
type NLopt ¶
type NLopt struct {
// contains filtered or unexported fields
}
NLopt keeps a C.nlopt_opt "object" (an opaque pointer), then set various optimization parameters, and then execute the algorithm.
func NewNLopt ¶
NewNLopt returns a newly allocated nlopt_opt object given an algorithm and the dimensionality of the problem `n` (the number of optimization parameters)
func (*NLopt) AddEqualityConstraint ¶
AddEqualityConstraint adds an arbitrary nonlinear equality constraint h. The functionality is supported by ISRES and AUGLAG algorithms. The parameter tol is a tolerance used for the purpose of stopping criteria only.
func (*NLopt) AddEqualityMConstraint ¶
AddEqualityMConstraint adds vector-valued equality constraint h. Slice tol points to a slice of length m of the tolerances in each constraint dimension (or <nil> for zero tolerances)
func (*NLopt) AddInequalityConstraint ¶
AddInequalityConstraint adds an arbitrary nonlinear inequality constraint fc. The functionality is supported by MMA, COBYLA and ORIG_DIRECT algorithms. The parameter tol is a tolerance used for the purpose of stopping criteria only.
func (*NLopt) AddInequalityMConstraint ¶
AddInequalityMConstraint adds vector-valued inequality constraint fc. Slice tol points to a slice of length m of the tolerances in each constraint dimension (or <nil> for zero tolerances)
func (*NLopt) Destroy ¶
func (n *NLopt) Destroy()
Destroy deallocates nlopt_opt object and frees all reserved resources
func (*NLopt) ForceStop ¶
ForceStop allows caller to force the optimization to halt, for some reason unknown to NLopt. This causes nlopt..Optimize to halt, returning the FORCED_STOP error. It has no effect if not called during nlopt..Optimize.
func (*NLopt) GetAlgorithm ¶
GetAlgorithm returns an immutable algorithm id parameter for this instance
func (*NLopt) GetAlgorithmName ¶
GetAlgorithm returns a descriptive immutable algorithm name for this instance
func (*NLopt) GetDimension ¶
GetDimension returns an immutable dimension parameter for this instance
func (*NLopt) GetForceStop ¶
GetForceStop retrieves last forced-stop value that was set since the last nlopt..Optimize. The force-stop value is reset to zero at the beginning of nlopt..Optimize.
func (*NLopt) GetFtolAbs ¶
GetFtolAbs retrieves the current value for absolute function value tolerance criterion
func (*NLopt) GetFtolRel ¶
GetFtolRel retrieves the current value for relative function value tolerance criterion
func (*NLopt) GetInitialStep ¶
GetInitialStep retrieves the initial step size. The first alice is the same as the initial guess that you plan to pass to nlopt.NLopt.Optimize – if you have not set the initial step and NLopt is using its heuristics, its heuristic step size may depend on the initial x, which is why you must pass it here. Both slices are of length n (the dimension of the problem from nlopt.NewNLopt), where the latter on successful return contains the initial step sizes.
func (*NLopt) GetLowerBounds ¶
GetLowerBounds returns lower bounds. It is possible not to have lower bounds set. The size of return slice is n (the dimension of the problem)
func (*NLopt) GetMaxEval ¶
GetMaxEval retrieves the current value for maxeval criterion
func (*NLopt) GetMaxTime ¶
GetMaxTime retrieves the current value for maxtime criterion
func (*NLopt) GetPopulation ¶
GetPopulation retrieves initial "population" of random points x
func (*NLopt) GetStopVal ¶
GetStopVal retrieves the current value for stopval criterion
func (*NLopt) GetUpperBounds ¶
GetUpperBounds returns upper bounds. It is possible not to have upper bounds set. The size of return slice is n (the dimension of the problem)
func (*NLopt) GetVectorStorage ¶
GetVectorStorage retrieves size of vector storage
func (*NLopt) GetXtolAbs ¶
GetXtolAbs retrieves the current value for absolute tolerances on optimization parameters criterion
func (*NLopt) GetXtolRel ¶
GetXtolRel retrieves the current value for relative tolerance on optimization parameters criterion
func (*NLopt) LastStatus ¶
func (*NLopt) Optimize ¶
Optimize performs optimization once all of the desired optimization parameters have been specified in a given object. Input, x is a slice of length n (the dimension of the problem from nlopt..NewNLopt) giving an initial guess for the optimization parameters. On successful return, a slice contains the optimized values of the parameters, and value contains the corresponding value of the objective function.
func (*NLopt) RemoveEqualityConstraints ¶
RemoveEqualityConstraints removes all equality constraints
func (*NLopt) RemoveInequalityConstraints ¶
RemoveEqualityConstraints removes all inequality constraints
func (*NLopt) SetDefaultInitialStep ¶
func (*NLopt) SetForceStop ¶
SetForceStop sets a forced-stop integer value val, which can be later retrieved. Passing val=0 to nlopt..SetForceStop tells NLopt not to force a halt.
func (*NLopt) SetFtolAbs ¶
SetFtolAbs sets absolute tolerance on function value: stop when an optimization step (or an estimate of the optimum) changes the function value by less than tol. Criterion is disabled if tol is non-positive.
func (*NLopt) SetFtolRel ¶
SetFtolRel sets relative tolerance on function value: stop when an optimization step (or an estimate of the optimum) changes the objective function value by less than tol multiplied by the absolute value of the function value. Criterion is disabled if tol is non-positive.
func (*NLopt) SetInitialStep ¶
SetInitialStep sets initial step size to perturb x by when optimizer begins the optimization for derivative-free local-optimization algorithms. This step size should be big enough that the value of the objective changes significantly, but not too big if you want to find the local optimum nearest to x. By default, NLopt chooses this initial step size heuristically from the bounds, tolerances, and other information, but this may not always be the best choice. Parameter dx is a slice of length n (the dimension of the problem from nlopt..NewNLopt) containing the (nonzero) initial step size for each component of the optimization parameters x. If you pass <nil> for dx, then NLopt will use its heuristics to determine the initial step size.
func (*NLopt) SetInitialStep1 ¶
SetInitialStep1 sets initial step size to perturb x by when optimizer begins the optimization for derivative-free local-optimization algorithms to the same value in every direction.
func (*NLopt) SetLocalOptimizer ¶
SetLocalOptimizer sets a different optimization algorithm as a subroutine for algorithms like MLSL and AUGLAG. Here localOpt is another nlopt.NLopt object whose parameters are used to determine the local search algorithm, its stopping criteria, and other algorithm parameters. (However, the objective function, bounds, and nonlinear-constraint parameters of localOpt are ignored.) The dimension n of localOpt must match that of opt.
func (*NLopt) SetLowerBounds ¶
SetLowerBounds sets lower bounds that an objective function and any nonlinear constraints will never be evaluated outside of these bounds. Bounds are set by passing a slice lb of length n (the dimension of the problem)
func (*NLopt) SetLowerBounds1 ¶
SetLowerBounds1 sets lower bounds to a single constant for all optimization parameters
func (*NLopt) SetMaxEval ¶
SetMaxEval sets a criterion to stop when the number of function evaluations exceeds maxeval. Criterion is disabled if maxeval is non-positive.
func (*NLopt) SetMaxObjective ¶
SetMaxObjective sets the objective function f to maximize
func (*NLopt) SetMaxTime ¶
SetMaxTime sets a criterion to stop when the optimization time (in seconds) exceeds maxtime. Criterion is disabled if maxtime is non-positive.
func (*NLopt) SetMinObjective ¶
SetMinObjective sets the objective function f to minimize
func (*NLopt) SetPopulation ¶
SetPopulation sets an initial "population" of random points x for several of the stochastic search algorithms (e.g., CRS, MLSL, and ISRES). By default, this initial population size is chosen heuristically in some algorithm-specific way. A pop of zero implies that the heuristic default will be used.
func (*NLopt) SetStopVal ¶
SetStopVal sets a criterion to stop when an objective value of at least stopval is found: stop minimizing when an objective value ≤ stopval is found, or stop maximizing a value ≥ stopval is found.
func (*NLopt) SetUpperBounds ¶
SetUpperBounds sets upper bounds that an objective function and any nonlinear constraints will never be evaluated outside of these bounds. Bounds are set by passing a slice lb of length n (the dimension of the problem)
func (*NLopt) SetUpperBounds1 ¶
SetLowerBounds1 sets upper bounds to a single constant for all optimization parameters
func (*NLopt) SetVectorStorage ¶
SetVectorStorage for some of the NLopt algorithms that are limited-memory "quasi-Newton" algorithms, which "remember" the gradients from a finite number M of the previous optimization steps in order to construct an approximate 2nd derivative matrix. The bigger M is, the more storage the algorithms require, but on the other hand they may converge faster for larger M. By default, NLopt chooses a heuristic value of M.
Passing M=0 (the default) tells NLopt to use a heuristic value. By default, NLopt currently sets M to 10 or at most 10 MiB worth of vectors, whichever is larger.
func (*NLopt) SetXtolAbs ¶
SetXtolAbs sets absolute tolerances on optimization parameters. tol is a slice of length n (the dimension from NewNLopt) giving the tolerances: stop when an optimization step (or an estimate of the optimum) changes every parameter x[i] by less than tol[i]
func (*NLopt) SetXtolAbs1 ¶
SetXtolAbs1 sets the absolute tolerances in all n optimization parameters to the same value tol. Criterion is disabled if tol is non-positive.
func (*NLopt) SetXtolRel ¶
SetXtolRel sets relative tolerance on optimization parameters: stop when an optimization step (or an estimate of the optimum) changes every parameter by less than tol multiplied by the absolute value of the parameter. Criterion is disabled if tol is non-positive.