-
Notifications
You must be signed in to change notification settings - Fork 45
/
Copy pathfoceiControl.Rd
801 lines (611 loc) · 27.7 KB
/
foceiControl.Rd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/foceiFit.R
\name{foceiControl}
\alias{foceiControl}
\title{Control Options for FOCEi}
\usage{
foceiControl(
sigdig = 3,
...,
epsilon = NULL,
maxInnerIterations = 1000,
maxOuterIterations = 5000,
n1qn1nsim = NULL,
method = c("liblsoda", "lsoda", "dop853"),
transitAbs = NULL,
atol = NULL,
rtol = NULL,
atolSens = NULL,
rtolSens = NULL,
ssAtol = NULL,
ssRtol = NULL,
ssAtolSens = NULL,
ssRtolSens = NULL,
minSS = 10L,
maxSS = 1000L,
maxstepsOde = 500000L,
hmin = 0L,
hmax = NA_real_,
hini = 0,
maxordn = 12L,
maxords = 5L,
cores,
covsInterpolation = c("locf", "linear", "nocb", "midpoint"),
print = 1L,
printNcol = floor((getOption("width") - 23)/12),
scaleTo = 1,
scaleObjective = 0,
normType = c("rescale2", "mean", "rescale", "std", "len", "constant"),
scaleType = c("nlmixr", "norm", "mult", "multAdd"),
scaleCmax = 1e+05,
scaleCmin = 1e-05,
scaleC = NULL,
scaleC0 = 1e+05,
derivEps = rep(20 * sqrt(.Machine$double.eps), 2),
derivMethod = c("switch", "forward", "central"),
derivSwitchTol = NULL,
covDerivMethod = c("central", "forward"),
covMethod = c("r,s", "r", "s", ""),
hessEps = (.Machine$double.eps)^(1/3),
eventFD = sqrt(.Machine$double.eps),
eventType = c("gill", "central", "forward"),
centralDerivEps = rep(20 * sqrt(.Machine$double.eps), 2),
lbfgsLmm = 7L,
lbfgsPgtol = 0,
lbfgsFactr = NULL,
eigen = TRUE,
addPosthoc = TRUE,
diagXform = c("sqrt", "log", "identity"),
sumProd = FALSE,
optExpression = TRUE,
ci = 0.95,
useColor = crayon::has_color(),
boundTol = NULL,
calcTables = TRUE,
noAbort = TRUE,
interaction = TRUE,
cholSEtol = (.Machine$double.eps)^(1/3),
cholAccept = 0.001,
resetEtaP = 0.15,
resetThetaP = 0.05,
resetThetaFinalP = 0.15,
diagOmegaBoundUpper = 5,
diagOmegaBoundLower = 100,
cholSEOpt = FALSE,
cholSECov = FALSE,
fo = FALSE,
covTryHarder = FALSE,
outerOpt = c("nlminb", "bobyqa", "lbfgsb3c", "L-BFGS-B", "mma", "lbfgsbLG", "slsqp",
"Rvmmin"),
innerOpt = c("n1qn1", "BFGS"),
rhobeg = 0.2,
rhoend = NULL,
npt = NULL,
rel.tol = NULL,
x.tol = NULL,
eval.max = 4000,
iter.max = 2000,
abstol = NULL,
reltol = NULL,
resetHessianAndEta = FALSE,
stateTrim = Inf,
gillK = 10L,
gillStep = 4,
gillFtol = 0,
gillRtol = sqrt(.Machine$double.eps),
gillKcov = 10L,
gillStepCov = 2,
gillFtolCov = 0,
rmatNorm = TRUE,
smatNorm = TRUE,
covGillF = TRUE,
optGillF = TRUE,
covSmall = 1e-05,
adjLik = TRUE,
gradTrim = Inf,
maxOdeRecalc = 5,
odeRecalcFactor = 10^(0.5),
gradCalcCentralSmall = 1e-04,
gradCalcCentralLarge = 10000,
etaNudge = qnorm(1 - 0.05/2)/sqrt(3),
etaNudge2 = qnorm(1 - 0.05/2) * sqrt(3/5),
stiff,
nRetries = 3,
seed = 42,
resetThetaCheckPer = 0.1,
etaMat = NULL,
repeatGillMax = 3,
stickyRecalcN = 5,
gradProgressOfvTime = 10,
addProp = c("combined2", "combined1"),
singleOde = TRUE,
badSolveObjfAdj = 100
)
}
\arguments{
\item{sigdig}{Optimization significant digits. This controls:
\itemize{
\item The tolerance of the inner and outer optimization is \code{10^-sigdig}
\item The tolerance of the ODE solvers is
\code{0.5*10^(-sigdig-2)}; For the sensitivity equations and
steady-state solutions the default is \code{0.5*10^(-sigdig-1.5)}
(sensitivity changes only applicable for liblsoda)
\item The tolerance of the boundary check is \code{5 * 10 ^ (-sigdig + 1)}
\item The significant figures that some tables are rounded to.
}}
\item{...}{Ignored parameters}
\item{epsilon}{Precision of estimate for n1qn1 optimization.}
\item{maxInnerIterations}{Number of iterations for n1qn1
optimization.}
\item{maxOuterIterations}{Maximum number of L-BFGS-B optimization
for outer problem.}
\item{n1qn1nsim}{Number of function evaluations for n1qn1
optimization.}
\item{method}{The method for solving ODEs. Currently this supports:
\itemize{
\item \code{"liblsoda"} thread safe lsoda. This supports parallel
thread-based solving, and ignores user Jacobian specification.
\item \code{"lsoda"} -- LSODA solver. Does not support parallel thread-based
solving, but allows user Jacobian specification.
\item \code{"dop853"} -- DOP853 solver. Does not support parallel thread-based
solving nor user Jacobain specification
\item \code{"indLin"} -- Solving through inductive linearization. The RxODE dll
must be setup specially to use this solving routine.
}}
\item{transitAbs}{boolean indicating if this is a transit
compartment absorption}
\item{atol}{a numeric absolute tolerance (1e-8 by default) used
by the ODE solver to determine if a good solution has been
achieved; This is also used in the solved linear model to check
if prior doses do not add anything to the solution.}
\item{rtol}{a numeric relative tolerance (\code{1e-6} by default) used
by the ODE solver to determine if a good solution has been
achieved. This is also used in the solved linear model to check
if prior doses do not add anything to the solution.}
\item{atolSens}{Sensitivity atol, can be different than atol with
liblsoda. This allows a less accurate solve for gradients (if desired)}
\item{rtolSens}{Sensitivity rtol, can be different than rtol with
liblsoda. This allows a less accurate solve for gradients (if desired)}
\item{ssAtol}{Steady state absolute tolerance (atol) for calculating if steady-state
has been archived.}
\item{ssRtol}{Steady state relative tolerance (rtol) for
calculating if steady-state has been achieved.}
\item{ssAtolSens}{Sensitivity absolute tolerance (atol) for
calculating if steady state has been achieved for sensitivity compartments.}
\item{ssRtolSens}{Sensitivity relative tolerance (rtol) for
calculating if steady state has been achieved for sensitivity compartments.}
\item{minSS}{Minimum number of iterations for a steady-state dose}
\item{maxSS}{Maximum number of iterations for a steady-state dose}
\item{maxstepsOde}{Maximum number of steps for ODE solver.}
\item{hmin}{The minimum absolute step size allowed. The default
value is 0.}
\item{hmax}{The maximum absolute step size allowed. When
\code{hmax=NA} (default), uses the average difference +
hmaxSd*sd in times and sampling events. The \code{hmaxSd} is a user
specified parameter and which defaults to zero. When
\code{hmax=NULL} RxODE uses the maximum difference in times in
your sampling and events. The value 0 is equivalent to infinite
maximum absolute step size.}
\item{hini}{The step size to be attempted on the first step. The
default value is determined by the solver (when \code{hini = 0})}
\item{maxordn}{The maximum order to be allowed for the nonstiff
(Adams) method. The default is 12. It can be between 1 and
12.}
\item{maxords}{The maximum order to be allowed for the stiff (BDF)
method. The default value is 5. This can be between 1 and 5.}
\item{cores}{Number of cores used in parallel ODE solving. This
is equivalent to calling \code{\link[RxODE:getRxThreads]{setRxThreads()}}}
\item{covsInterpolation}{specifies the interpolation method for
time-varying covariates. When solving ODEs it often samples
times outside the sampling time specified in \code{events}.
When this happens, the time varying covariates are
interpolated. Currently this can be:
\itemize{
\item \code{"linear"} interpolation, which interpolates the covariate
by solving the line between the observed covariates and extrapolating the new
covariate value.
\item \code{"constant"} -- Last observation carried forward (the default).
\item \code{"NOCB"} -- Next Observation Carried Backward. This is the same method
that NONMEM uses.
\item \code{"midpoint"} Last observation carried forward to midpoint; Next observation
carried backward to midpoint.
}}
\item{print}{Integer representing when the outer step is
printed. When this is 0 or do not print the iterations. 1 is
print every function evaluation (default), 5 is print every 5
evaluations.}
\item{printNcol}{Number of columns to printout before wrapping
parameter estimates/gradient}
\item{scaleTo}{Scale the initial parameter estimate to this value.
By default this is 1. When zero or below, no scaling is performed.}
\item{scaleObjective}{Scale the initial objective function to this
value. By default this is 1.}
\item{normType}{This is the type of parameter
normalization/scaling used to get the scaled initial values
for nlmixr. These are used with \code{scaleType} of.
With the exception of \code{rescale2}, these come
from
\href{https://en.wikipedia.org/wiki/Feature_scaling}{Feature
Scaling}. The \code{rescale2} The rescaling is the same type
described in the
\href{http://apmonitor.com/me575/uploads/Main/optimization_book.pdf}{OptdesX}
software manual.
In general, all all scaling formula can be described by:
v_{scaled} = (v_{unscaled}-C_{1})/C_{2}
Where
The other data normalization approaches follow the following formula
v_{scaled} = (v_{unscaled}-C_{1})/C_{2};
\itemize{
\item \code{rescale2} This scales all parameters from (-1 to 1).
The relative differences between the parameters are preserved
with this approach and the constants are:
C_{1} = (max(all unscaled values)+min(all unscaled values))/2
C_{2} = (max(all unscaled values) - min(all unscaled values))/2
\item \code{rescale} or min-max normalization. This rescales all
parameters from (0 to 1). As in the \code{rescale2} the
relative differences are preserved. In this approach:
C_{1} = min(all unscaled values)
C_{2} = max(all unscaled values) - min(all unscaled values)
\item \code{mean} or mean normalization. This rescales to center
the parameters around the mean but the parameters are from 0
to 1. In this approach:
C_{1} = mean(all unscaled values)
C_{2} = max(all unscaled values) - min(all unscaled values)
\item \code{std} or standardization. This standardizes by the mean
and standard deviation. In this approach:
C_{1} = mean(all unscaled values)
C_{2} = sd(all unscaled values)
\item \code{len} or unit length scaling. This scales the
parameters to the unit length. For this approach we use the Euclidean length, that
is:
C_{1} = 0
C_{2} = sqrt(v_1^2 + v_2^2 + ... + v_n^2)
\item \code{constant} which does not perform data normalization. That is
C_{1} = 0
C_{2} = 1
}}
\item{scaleType}{The scaling scheme for nlmixr. The supported types are:
\itemize{
\item \code{nlmixr} In this approach the scaling is performed by the following equation:
v_{scaled} = (v_{current} - v_{init})/scaleC[i] + scaleTo
The \code{scaleTo} parameter is specified by the \code{normType},
and the scales are specified by \code{scaleC}.
\item \code{norm} This approach uses the simple scaling provided
by the \code{normType} argument.
\item \code{mult} This approach does not use the data
normalization provided by \code{normType}, but rather uses
multiplicative scaling to a constant provided by the \code{scaleTo}
argument.
In this case:
v_{scaled} = v_{current}/v_{init}*scaleTo
\item \code{multAdd} This approach changes the scaling based on
the parameter being specified. If a parameter is defined in an
exponential block (ie exp(theta)), then it is scaled on a
linearly, that is:
v_{scaled} = (v_{current}-v_{init}) + scaleTo
Otherwise the parameter is scaled multiplicatively.
v_{scaled} = v_{current}/v_{init}*scaleTo
}}
\item{scaleCmax}{Maximum value of the scaleC to prevent overflow.}
\item{scaleCmin}{Minimum value of the scaleC to prevent underflow.}
\item{scaleC}{The scaling constant used with
\code{scaleType=nlmixr}. When not specified, it is based on
the type of parameter that is estimated. The idea is to keep
the derivatives similar on a log scale to have similar
gradient sizes. Hence parameters like log(exp(theta)) would
have a scaling factor of 1 and log(theta) would have a scaling
factor of ini_value (to scale by 1/value; ie
d/dt(log(ini_value)) = 1/ini_value or scaleC=ini_value)
\itemize{
\item For parameters in an exponential (ie exp(theta)) or
parameters specifying powers, boxCox or yeoJohnson
transformations , this is 1.
\item For additive, proportional, lognormal error structures,
these are given by 0.5*abs(initial_estimate)
\item Factorials are scaled by abs(1/digamma(inital_estimate+1))
\item parameters in a log scale (ie log(theta)) are transformed
by log(abs(initial_estimate))*abs(initial_estimate)
}
These parameter scaling coefficients are chose to try to keep
similar slopes among parameters. That is they all follow the
slopes approximately on a log-scale.
While these are chosen in a logical manner, they may not always
apply. You can specify each parameters scaling factor by this
parameter if you wish.}
\item{scaleC0}{Number to adjust the scaling factor by if the initial
gradient is zero.}
\item{derivEps}{Forward difference tolerances, which is a
vector of relative difference and absolute difference. The
central/forward difference step size h is calculated as:
\code{h = abs(x)*derivEps[1] + derivEps[2]}}
\item{derivMethod}{indicates the method for calculating
derivatives of the outer problem. Currently supports
"switch", "central" and "forward" difference methods. Switch
starts with forward differences. This will switch to central
differences when abs(delta(OFV)) <= derivSwitchTol and switch
back to forward differences when abs(delta(OFV)) >
derivSwitchTol.}
\item{derivSwitchTol}{The tolerance to switch forward to central
differences.}
\item{covDerivMethod}{indicates the method for calculating the
derivatives while calculating the covariance components
(Hessian and S).}
\item{covMethod}{Method for calculating covariance. In this
discussion, R is the Hessian matrix of the objective
function. The S matrix is the sum of individual
gradient cross-product (evaluated at the individual empirical
Bayes estimates).
\itemize{
\item "\code{r,s}" Uses the sandwich matrix to calculate the
covariance, that is: \code{solve(R) \%*\% S \%*\% solve(R)}
\item "\code{r}" Uses the Hessian matrix to calculate the
covariance as \code{2 \%*\% solve(R)}
\item "\code{s}" Uses the cross-product matrix to calculate the
covariance as \code{4 \%*\% solve(S)}
\item "" Does not calculate the covariance step.
}}
\item{hessEps}{is a double value representing the epsilon for the Hessian calculation.}
\item{eventFD}{Finite difference step for forward or central
difference estimation of event-based gradients}
\item{eventType}{Event gradient type for dosing events; Can be
"gill", "central" or "forward"}
\item{centralDerivEps}{Central difference tolerances. This is a
numeric vector of relative difference and absolute difference.
The central/forward difference step size h is calculated as:
\code{h = abs(x)*derivEps[1] + derivEps[2]}}
\item{lbfgsLmm}{An integer giving the number of BFGS updates
retained in the "L-BFGS-B" method, It defaults to 7.}
\item{lbfgsPgtol}{is a double precision variable.
On entry pgtol >= 0 is specified by the user. The iteration
will stop when:
\code{max(\| proj g_i \| i = 1, ..., n) <= lbfgsPgtol}
where pg_i is the ith component of the projected gradient.
On exit pgtol is unchanged. This defaults to zero, when the
check is suppressed.}
\item{lbfgsFactr}{Controls the convergence of the "L-BFGS-B"
method. Convergence occurs when the reduction in the
objective is within this factor of the machine
tolerance. Default is 1e10, which gives a tolerance of about
\code{2e-6}, approximately 4 sigdigs. You can check your
exact tolerance by multiplying this value by
\code{.Machine$double.eps}}
\item{eigen}{A boolean indicating if eigenvectors are calculated
to include a condition number calculation.}
\item{addPosthoc}{Boolean indicating if posthoc parameters are
added to the table output.}
\item{diagXform}{This is the transformation used on the diagonal
of the \code{chol(solve(omega))}. This matrix and values are the
parameters estimated in FOCEi. The possibilities are:
\itemize{
\item \code{sqrt} Estimates the sqrt of the diagonal elements of \code{chol(solve(omega))}. This is the default method.
\item \code{log} Estimates the log of the diagonal elements of \code{chol(solve(omega))}
\item \code{identity} Estimates the diagonal elements without any transformations
}}
\item{sumProd}{Is a boolean indicating if the model should change
multiplication to high precision multiplication and sums to
high precision sums using the PreciseSums package. By default
this is \code{FALSE}.}
\item{optExpression}{Optimize the RxODE expression to speed up
calculation. By default this is turned on.}
\item{ci}{Confidence level for some tables. By default this is
0.95 or 95\% confidence.}
\item{useColor}{Boolean indicating if focei can use ASCII color codes}
\item{boundTol}{Tolerance for boundary issues.}
\item{calcTables}{This boolean is to determine if the foceiFit
will calculate tables. By default this is \code{TRUE}}
\item{noAbort}{Boolean to indicate if you should abort the FOCEi
evaluation if it runs into troubles. (default TRUE)}
\item{interaction}{Boolean indicate FOCEi should be used (TRUE)
instead of FOCE (FALSE)}
\item{cholSEtol}{tolerance for Generalized Cholesky
Decomposition. Defaults to suggested (.Machine$double.eps)^(1/3)}
\item{cholAccept}{Tolerance to accept a Generalized Cholesky
Decomposition for a R or S matrix.}
\item{resetEtaP}{represents the p-value for reseting the
individual ETA to 0 during optimization (instead of the saved
value). The two test statistics used in the z-test are either
chol(omega^-1) \%*\% eta or eta/sd(allEtas). A p-value of 0
indicates the ETAs never reset. A p-value of 1 indicates the
ETAs always reset.}
\item{resetThetaP}{represents the p-value for reseting the
population mu-referenced THETA parameters based on ETA drift
during optimization, and resetting the optimization. A
p-value of 0 indicates the THETAs never reset. A p-value of 1
indicates the THETAs always reset and is not allowed. The
theta reset is checked at the beginning and when nearing a
local minima. The percent change in objective function where
a theta reset check is initiated is controlled in
\code{resetThetaCheckPer}.}
\item{resetThetaFinalP}{represents the p-value for reseting the
population mu-referenced THETA parameters based on ETA drift
during optimization, and resetting the optimization one final time.}
\item{diagOmegaBoundUpper}{This represents the upper bound of the
diagonal omega matrix. The upper bound is given by
diag(omega)*diagOmegaBoundUpper. If
\code{diagOmegaBoundUpper} is 1, there is no upper bound on
Omega.}
\item{diagOmegaBoundLower}{This represents the lower bound of the
diagonal omega matrix. The lower bound is given by
diag(omega)/diagOmegaBoundUpper. If
\code{diagOmegaBoundLower} is 1, there is no lower bound on
Omega.}
\item{cholSEOpt}{Boolean indicating if the generalized Cholesky
should be used while optimizing.}
\item{cholSECov}{Boolean indicating if the generalized Cholesky
should be used while calculating the Covariance Matrix.}
\item{fo}{is a boolean indicating if this is a FO approximation routine.}
\item{covTryHarder}{If the R matrix is non-positive definite and
cannot be corrected to be non-positive definite try estimating
the Hessian on the unscaled parameter space.}
\item{outerOpt}{optimization method for the outer problem}
\item{innerOpt}{optimization method for the inner problem (not
implemented yet.)}
\item{rhobeg}{Beginning change in parameters for bobyqa algorithm
(trust region). By default this is 0.2 or 20% of the initial
parameters when the parameters are scaled to 1. rhobeg and
rhoend must be set to the initial and final values of a trust
region radius, so both must be positive with 0 < rhoend <
rhobeg. Typically rhobeg should be about one tenth of the
greatest expected change to a variable. Note also that
smallest difference abs(upper-lower) should be greater than or
equal to rhobeg*2. If this is not the case then rhobeg will be
adjusted.}
\item{rhoend}{The smallest value of the trust region radius that
is allowed. If not defined, then 10^(-sigdig-1) will be used.}
\item{npt}{The number of points used to approximate the objective
function via a quadratic approximation for bobyqa. The value
of npt must be in the interval [n+2,(n+1)(n+2)/2] where n is
the number of parameters in par. Choices that exceed 2*n+1 are
not recommended. If not defined, it will be set to 2*n + 1}
\item{rel.tol}{Relative tolerance before nlminb stops.}
\item{x.tol}{X tolerance for nlmixr optimizers}
\item{eval.max}{Number of maximum evaluations of the objective function}
\item{iter.max}{Maximum number of iterations allowed.}
\item{abstol}{Absolute tolerance for nlmixr optimizer}
\item{reltol}{tolerance for nlmixr}
\item{resetHessianAndEta}{is a boolean representing if the
individual Hessian is reset when ETAs are reset using the
option \code{resetEtaP}.}
\item{stateTrim}{Trim state amounts/concentrations to this value.}
\item{gillK}{The total number of possible steps to determine the
optimal forward/central difference step size per parameter (by
the Gill 1983 method). If 0, no optimal step size is
determined. Otherwise this is the optimal step size
determined.}
\item{gillStep}{When looking for the optimal forward difference
step size, this is This is the step size to increase the
initial estimate by. So each iteration the new step size =
(prior step size)*gillStep}
\item{gillFtol}{The gillFtol is the gradient error tolerance that
is acceptable before issuing a warning/error about the gradient estimates.}
\item{gillRtol}{The relative tolerance used for Gill 1983
determination of optimal step size.}
\item{gillKcov}{The total number of possible steps to determine
the optimal forward/central difference step size per parameter
(by the Gill 1983 method) during the covariance step. If 0,
no optimal step size is determined. Otherwise this is the
optimal step size determined.}
\item{gillStepCov}{When looking for the optimal forward difference
step size, this is This is the step size to increase the
initial estimate by. So each iteration during the covariance
step is equal to the new step size = (prior step size)*gillStepCov}
\item{gillFtolCov}{The gillFtol is the gradient error tolerance
that is acceptable before issuing a warning/error about the
gradient estimates during the covariance step.}
\item{rmatNorm}{A parameter to normalize gradient step size by the
parameter value during the calculation of the R matrix}
\item{smatNorm}{A parameter to normalize gradient step size by the
parameter value during the calculation of the S matrix}
\item{covGillF}{Use the Gill calculated optimal Forward difference
step size for the instead of the central difference step size
during the central difference gradient calculation.}
\item{optGillF}{Use the Gill calculated optimal Forward difference
step size for the instead of the central difference step size
during the central differences for optimization.}
\item{covSmall}{The covSmall is the small number to compare
covariance numbers before rejecting an estimate of the
covariance as the final estimate (when comparing sandwich vs
R/S matrix estimates of the covariance). This number controls
how small the variance is before the covariance matrix is
rejected.}
\item{adjLik}{In nlmixr, the objective function matches NONMEM's
objective function, which removes a 2*pi constant from the
likelihood calculation. If this is TRUE, the likelihood
function is adjusted by this 2*pi factor. When adjusted this
number more closely matches the likelihood approximations of
nlme, and SAS approximations. Regardless of if this is turned
on or off the objective function matches NONMEM's objective
function.}
\item{gradTrim}{The parameter to adjust the gradient to if the
|gradient| is very large.}
\item{maxOdeRecalc}{Maximum number of times to reduce the ODE
tolerances and try to resolve the system if there was a bad
ODE solve.}
\item{odeRecalcFactor}{The factor to increase the rtol/atol with
bad ODE solving.}
\item{gradCalcCentralSmall}{A small number that represents the value
where |grad| < gradCalcCentralSmall where forward differences
switch to central differences.}
\item{gradCalcCentralLarge}{A large number that represents the value
where |grad| > gradCalcCentralLarge where forward differences
switch to central differences.}
\item{etaNudge}{By default initial ETA estimates start at zero;
Sometimes this doesn't optimize appropriately. If this value is
non-zero, when the n1qn1 optimization didn't perform
appropriately, reset the Hessian, and nudge the ETA up by this
value; If the ETA still doesn't move, nudge the ETA down by this
value. By default this value is qnorm(1-0.05/2)*1/sqrt(3), the
first of the Gauss Quadrature numbers times by the 0.95\% normal
region. If this is not successful try the second eta nudge
number (below). If +-etaNudge2 is not successful, then assign
to zero and do not optimize any longer}
\item{etaNudge2}{This is the second eta nudge. By default it is
qnorm(1-0.05/2)*sqrt(3/5), which is the n=3 quadrature point
(excluding zero) times by the 0.95\% normal region}
\item{stiff}{a logical (\code{TRUE} by default) indicating whether
the ODE system is stiff or not.\preformatted{For stiff ODE systems (`stiff = TRUE`), `RxODE` uses the
LSODA (Livermore Solver for Ordinary Differential Equations)
Fortran package, which implements an automatic method switching
for stiff and non-stiff problems along the integration
interval, authored by Hindmarsh and Petzold (2003).
For non-stiff systems (`stiff = FALSE`), `RxODE` uses
DOP853, an explicit Runge-Kutta method of order 8(5, 3) of
Dormand and Prince as implemented in C by Hairer and Wanner
(1993).
If stiff is not specified, the `method` argument is used instead.
}}
\item{nRetries}{If FOCEi doesn't fit with the current parameter
estimates, randomly sample new parameter estimates and restart
the problem. This is similar to 'PsN' resampling.}
\item{seed}{seed for random number generator}
\item{resetThetaCheckPer}{represents objective function
\% percentage below which resetThetaP is checked.}
\item{etaMat}{Eta matrix for initial estimates or final estimates
of the ETAs.}
\item{repeatGillMax}{If the tolerances were reduced when
calculating the initial Gill differences, the Gill difference
is repeated up to a maximum number of times defined by this
parameter.}
\item{stickyRecalcN}{The number of bad ODE solves before reducing
the atol/rtol for the rest of the problem.}
\item{gradProgressOfvTime}{This is the time for a single objective
function evaluation (in seconds) to start progress bars on gradient evaluations}
\item{addProp}{one of "combined1" and "combined2"; These are the
two forms of additive+proportional errors supported by
monolix/nonmem:
combined1: transform(y)=transform(f)+(a+b*f^c)*eps
combined2: transform(y)=transform(f)+(a^2+b^2*f^(2c))*eps}
\item{singleOde}{This option allows a single ode model to include
the PK parameter information instead of splitting it into a
function and a RxODE model}
\item{badSolveObjfAdj}{The objective function adjustment when the
ODE system cannot be solved. It is based on each individual bad
solve.}
}
\value{
The control object that changes the options for the FOCEi
family of estimation methods
}
\description{
Control Options for FOCEi
}
\details{
Note this uses the R's L-BFGS-B in \code{\link{optim}} for the
outer problem and the BFGS \code{\link[n1qn1]{n1qn1}} with that
allows restoring the prior individual Hessian (for faster
optimization speed).
However the inner problem is not scaled. Since most eta estimates
start near zero, scaling for these parameters do not make sense.
This process of scaling can fix some ill conditioning for the
unscaled problem. The covariance step is performed on the
unscaled problem, so the condition number of that matrix may not
be reflective of the scaled problem's condition-number.
}
\seealso{
\code{\link{optim}}
\code{\link[n1qn1]{n1qn1}}
\code{\link[RxODE]{rxSolve}}
}
\author{
Matthew L. Fidler
}