diff options
-rw-r--r-- | R/estUtil.R | 28 |
1 files changed, 22 insertions, 6 deletions
diff --git a/R/estUtil.R b/R/estUtil.R index fead00b..ba9f6d1 100644 --- a/R/estUtil.R +++ b/R/estUtil.R @@ -1,27 +1,31 @@ +# Implementation of the Levenberg Marquardt Algorithm levbmqdt <- function(y,u,n,reg,theta0,opt=optimOptions()){ # Optimization Parameters tol <- opt$tol; maxIter <- opt$maxIter d <- opt$adv$LMinit; mu <- opt$adv$LMstep - N <- dim(y,1) + N <- dim(y,1); df <- N - length(theta0) # Initialize Algorithm i <- 0 eout <- matrix(rep(0,N+2*n)) - X <- t(sapply(n+1:(N+n),reg)) + X <- t(sapply(n+1:(N+n),reg,y,u,e)) Y <- yout[n+1:(N+n),,drop=F] e <- Y-X%*%theta0 sumsq0 <- sum(e^2) # parameter indicating whether to update gradient in the iteration update <- 1 + # variable to count the number of times objective function is called + countObj <- 0 repeat{ + i=i+1 if(update ==1){ - i=i+1 + countObj <- countObj+1 # Update gradient eout <- matrix(c(rep(0,n),e[,])) - X <- t(sapply(n+1:(N+n),reg)) + X <- t(sapply(n+1:(N+n),reg,y,u,e)) filt1 <- Arma(b=1,a=c(1,theta0[(na+nb+1:nc)])) grad <- apply(X,2,filter,filt=filt1) } @@ -37,13 +41,13 @@ levbmqdt <- function(y,u,n,reg,theta0,opt=optimOptions()){ # If sum square error with the updated parameters is less than the # previous one, the updated parameters become the current parameters - # and the damping factor is reduced by a factor of 10 + # and the damping coefficient is reduced by a factor of mu if(sumsq<sumsq0){ d <- d/mu theta0 <- theta sumsq0 <- sumsq update <- 1 - } else{ # increase damping factor + } else{ # increase damping coefficient by a factor of mu d <- d*v update <- 0 } @@ -55,6 +59,18 @@ levbmqdt <- function(y,u,n,reg,theta0,opt=optimOptions()){ break } + if(sumSqRatio < tol){ + WhyStop <- "Tolerance" + } else{ + WhyStop <- "Maximum Iteration Limit" + } + + e <- e[1:N,] + sigma2 <- sum(e^2)/df + vcov <- sigma2 * Hinv + + list(params=theta,residuals=e,vcov=vcov,sigma = sqrt(sigma2), + termination=list(WhyStop=WhyStop,iter=i,FcnCount = countObj)) } #' @export |