Appendix A.1. Code Used to Perform Simulation Study
###############################################################################
### ###
### ###
### Excerpt of code written in Program R to evaluate effects of ###
### alternative numerical optimization methods on the precision ###
### of maximum likelihood statistical population reconstruction ###
### ###
###############################################################################
###############################################################################
############################## Set Up Work Space ##############################
###############################################################################
# Clear global environment, import required packages, and set working directory
{
rm(list=ls())
require(doSNOW) # For multi-core processing
require(truncnorm); require(numDeriv); require(pso); require(BB)
}
###############################################################################
######################### Define Multi-Core Simulation ########################
###############################################################################
simulateReconstruction <- function() {
# Define Monte Carlo simulation
simulatePopulation <- function(par) {
## Initialize matrix to record simulated harvest counts
{
h <- matrix(0, nrow = 30 + Y, ncol = A)
}
## Expand effort estimates for establishment and reconstruction years
{
f <- c(f0[sample(1:Y, size = 30, replace = TRUE)],
f0[sample(1:Y, size = Y, replace = FALSE)])
f <- matrix(rep(f, A), ncol = A)
}
## Define initial (diagonal) cohort values
{
N <- matrix(NA, nrow = 30 + Y, ncol = A)
N[1, 1:A] = round(n / 3)
}
## Define vulnerability and survival values
{
c <- matrix(c, nrow = 30 + Y, ncol = 3)
s <- matrix(s, nrow = 30 + Y, ncol = 3)
}
## Define probability of harvest
{
P <- (1 - exp(-c * f))
}
## Define population sizes and harvest counts for simulated years
{
for (i in 1:(30 + Y - 1)) {
### Simulate harvest
{
h[i,] <- rbinom(rep(1, A), N[i,] , P[i,])
}
### Simulate survival
{
for (j in 1:(A - 2)) {
N[i+1, j+1] <- rbinom(1, N[i, j] - h[i, j], s[i, j])
}
for (j in (A - 1)) {
N[i+1, j+1] <- rbinom(1, N[i, j] - h[i, j] , s[i, j] ) +
rbinom(1, N[i, j + 1] - h[i, j + 1], s[i, j + 1])
}
N[i+1, 1] <- round((sum(N[i, 1:A]) - sum(N[i + 1, 2 : A])) *
runif(1, min = 0.80, max = 1.25))
}
}
## Simulate harvest and index survey during the final year
{
h[(30 + Y), ] <- rbinom(rep(1, A), N[(30 + Y), ], P[(30 + Y), ])
}
}
# Trim establishment years from the simulated data
{
N <- N[-c(1:30), ]
h <- h[-c(1:30), ]
f = f[-c(1:30), ]
}
# Return population simulated values
{
return(list(N = N, h = h, f = f))
}
}
# Define likelihood variant of the objective function
objectiveFunction <- function(par) {
## Import initial (diagonal) cohort values
{
N <- matrix(NA, nrow = Y, ncol = A)
N[1, 1:A] <- par[1:A]
N[2:Y, 1] <- par[(A + 1):(A + Y - 1)]
}
## Import vulnerability and survival estimates
{
C <- matrix(par[Y + (A - 1) + c(1, 1, 1)] / scaleFactor,
nrow = Y, ncol = A, byrow = T)
S <- matrix(par[Y + (A - 1) + c(2, 2, 2)] / scaleFactor,
nrow = Y, ncol = A, byrow = T)
}
## Define probability of harvest
{
P <- (1 - exp(-C * f))
}
## Define expected population sizes
{
for (i in 1:(Y - 1)) {
for (j in 1:(A - 2)) {
N[i + 1, j + 1] <- N[i, j] * (1 - P[i, j]) * S[i, j]
}
for (j in (A - 1)) {
N[i + 1, j + 1] <- N[i, j] * (1 - P[i, j]) * S[i, j] +
N[i, j + 1] * (1 - P[i, j + 1]) * S[i, j + 1]
}
}
}
# Return population size estimate (if requested)
{
if (returnPopulationAbundance) return (N)
}
# Perform initial test of model boundaries
{
if (any(N < h)) return (9000009)
if (any(C < limitForC[1],
C > limitForC[2])) return (9000008)
if (any(S < limitForS[1], S > limitForS[2])) return (9000007)
if (any(P + (1 - S) > 0.500)) return (9000006)
}
# Define expected harvest values
{
E <- N * P
}
## Return chi-square estimate for inflation factor if requested
{
if (returnChiSquareCorrection) {
return (sum((h - E) ^ 2 / E))
}
}
## Calculate contributions of each likelihood component
{
### Initialize the age-at-harvest and catch-effort components
{
logL_AAH <- matrix(0, nrow = Y, ncol = A)
logL_CAE <- matrix(0, nrow = Y, ncol = A)
}
### Contribution of the age-at-harvest component
{
logL_AAH[1, A] <- sum(log((N[1,A] + 1 - 1:h[1,A]) / 1:h[1,A])) +
(h[1,A]) * (log( P[1,A])) +
(N[1,A] - h[1,A]) * (log(1 - P[1,A]))
logL_AAH[1, (A - 1)] <- sum(log((h[1,2] + h[2,3] + 1 - 1:h[1,2]) /
1:h[1,2])) +
h[1,2] * log(E[1,2] / (E[1,2] + E[2,3])) +
h[2,3] * log(E[2,3] / (E[1,2] + E[2,3]))
for (i in 1 : (Y - 2)) {
logL_AAH[i, 1] <- sum(log((h[i,1] + h[i+1,2] + h[i+2,3] +
1 - 1:h[i,1]) / 1:h[i,1])) +
sum(log((h[i+1,2] + h[i+2,3] +
1 - 1:h[i+1,2]) / 1:h[i+1,2])) +
h[i+0,1] * log(E[i+0,1] / (E[i,1] + E[i+1,2] + E[i+2,3])) +
h[i+1,2] * log(E[i+1,2] / (E[i,1] + E[i+1,2] + E[i+2,3])) +
h[i+2,3] * log(E[i+2,3] / (E[i,1] + E[i+1,2] + E[i+2,3]))
}
logL_AAH[(Y - 1), 1] <- sum(log((N[Y-1,1] +
1 - 1:h[Y-1,1]) / 1:h[Y-1,1])) +
sum(log((N[Y-1,1] - h[Y-1,1] +
1 - 1:h[Y,2]) / 1:h[Y,2])) +
(h[Y-1,1]) * log(( P[Y-1,1])) +
(h[Y,2]) * log((1 - P[Y-1,1]) *
S[1] * P[Y,2]) +
(N[Y-1,1] - h[Y-1,1] - h[Y,2]) * log((1 - P[Y-1,1]) *
(1 - S[1] * P[Y,2]))
logL_AAH[Y, 1] <- sum(log((N[Y,1] + 1 - 1:h[Y,1]) / 1:h[Y,1])) +
(h[Y,1]) * (log( P[Y,1])) +
(N[Y,1] - h[Y,1]) * (log(1 - P[Y,1]))
}
### Contribution of the catch-effort component
{
for (i in 1:Y) {
for (j in 1:A) {
logL_CAE[i,j] <- sum(log((sum(N[i,j]) + 1 - 1:sum(h[i,j])) /
1:sum(h[i,j]))) +
sum(h[i,j]) * (log( P[i,j])) +
sum(N[i,j] - h[i,j]) * (log(1 - P[i,j]))
}
}
}
}
## Return value of objective function if valid
{
logLikelihood <- -c(logL_AAH, logL_CAE)
if (any(is.na(logLikelihood))) return (9000003) else
if (any(logLikelihood == -Inf)) return (9000002) else
if (any(logLikelihood == Inf)) return (9000001) else
return(sum(logLikelihood))
}
}
# Define model parameterizations
{
## Import estimates of yearly catch-effort
{
f0 <- c(1.1552360, 0.9585951, 1.1134204, 0.8088820, 0.9529701, 1.0108965)
}
## Define number of years and age classes
{
Y <- length(f0)
A <- 3
}
## Set reasonable limits for survival and harvest vulnerability
{
limitForC <- c(0.05, 0.50)
limitForS <- c(0.50, 0.95)
}
}
# Repeat process until valid results are produced
repeat{
## Define numerical optimization methods to compare
{
algorithms <- c("PSO", "SPG", "Nelder-Mead", "BFGS")
}
## Initialize data frame to store temporary results
{
result <- data.frame(VLN = NA, SRV = NA, ABN = NA, HRV = NA,
ALG = algorithms, RAD = NA, AIC = NA)
}
## Generate and record simulated data
{
repeat{
c <- runif(1, 0.100, 0.250)
s <- runif(1, 0.750, 0.900)
n <- runif(1, 05000, 15000)
simulatedData <- simulatePopulation()
simulatedN <- simulatedData$N
if (min(rowSums(simulatedN)) > 05000 &
max(rowSums(simulatedN)) < 15000) {
break()
}
}
h <- simulatedData$h
f <- simulatedData$f
}
## Initialize starting values for parameterization
{
scaleFactor <- 1e5
alValues <- c(rep(c(mean(h) * 10), Y + (A - 1)),
rep(c(0.10, 0.70), each = 1) * scaleFactor)
}
## Cycle through options for objective functions
for (k in 1:length(algorithms)) {
### Reconstruct population abundance
{
returnPopulationAbundance <- FALSE
returnChiSquareCorrection <- FALSE
startingTime <- Sys.time()
if (k == 1) {
optimized <- psoptim(par = initialValues,
fn = objectiveFunction,
lower = c(rep(c(00000), A + Y - 1),
rep(limitForC[1] * scaleFactor, 1),
rep(limitForS[1] * scaleFactor, 1)),
upper = c(rep(c(20000), A + Y - 1),
rep(limitForC[2] * scaleFactor, 1),
rep(limitForS[2] * scaleFactor, 1)),
control = list(maxit = 1000, trace = F,
hybrid = "improved",
vectorize = TRUE,
type = "SPSO2011"))
}
if (k == 2) {
optimized <- spg(par = initialValues,
fn = objectiveFunction,
lower = c(rep(c(00000), A + Y - 1),
rep(limitForC[1] * scaleFactor, 1),
rep(limitForS[1] * scaleFactor, 1)),
upper = c(rep(c(20000), A + Y - 1),
rep(limitForC[2] * scaleFactor, 1),
rep(limitForS[2] * scaleFactor, 1)),
control = list(maxit = 1e7, trace = F))
}
if (k == 3) {
optimized <- optim(par = initialValues,
fn = objectiveFunction,
method = "Nelder-Mead",
control = list(maxit = 1e7, trace = F))
}
if (k == 4) {
optimized <- optim(par = initialValues,
fn = objectiveFunction,
method = "BFGS",
control = list(maxit = 1e7, trace = F))
}
returnPopulationAbundance <- TRUE
estimatedN <- objectiveFunction(optimized$par)
returnPopulationAbundance <- FALSE
}
### Check for algorithmic convergence before proceeding
if (optimized$value < 9e6) {
#### Record simulated values
{
result$VLN <- c
result$SRV <- s
result$ABN <- mean(rowSums(simulatedN))
result$HRV <- mean(rowSums(h))
}
#### Record optimized objective function values
{
result$AIC[k] <- optimized$value + 2 * (A + Y - 1 + 2)
}
#### Evaluate reconstruction precision
{
result$RAD[k] <- 1 / (Y) * sum(abs(rowSums(estimatedN) -
rowSums(simulatedN)) /
rowSums(simulatedN))
}
}
}
## Check for validity of results
if (!(any(is.na(result$RAD)))) {
break()
}
}
# Return validated results
return(result)
}
###############################################################################
############# Perform Numerical Optimization of Simulated Results #############
###############################################################################
# Initialize multi-core processing
{
maxIterations <- 1000
core <- 10
cl <- makeCluster(core, type = "SOCK")
registerDoSNOW(cl)
}
# Initialize data frame to store final results
{
finalResults <- data.frame(VLN = NA, SRV = NA, ABN = NA, HRV = NA,
ALG = NA, RAD = NA, AIC = NA)
}
# Cycle through sufficient number of simulations
{
for (counter in 1:ceiling(maxIterations / core)) {
## Simulate reconstruction
{
results <- foreach(i = 1:core, .combine = rbind,
.packages = c("readxl","pso","BB",
"numDeriv","truncnorm"),
.errorhandling = "remove") %dopar% (
simulateReconstruction())
}
## Append simulated results to data frame
{
finalResults <- rbind(finalResults, as.data.frame(results))
finalResults <- finalResults[complete.cases(finalResults), ]
}
## Update and export partial results
{
print(paste(round(100 * nrow(finalResults) / maxIterations /
(nrow(results) / core), 2),
"% at ", Sys.time(), sep = ""))
write.csv(finalResults, "Simulated Results for IN Otters MK1v1.csv",
row.names = FALSE)
}
## Visualize partial results
{
boxplot(RAD ~ ALG, data = finalResults, outline = F)
}
}
}
# Terminate multi-core processing
{
stopCluster(cl)
}
Appendix A.2. Code Used to Perform Case Study
###############################################################################
### ###
### ###
### Excerpt of code written in Program R to perform statistical ###
### population reconstruction of North American river otters in ###
### the state of Indiana using 4 different optimization methods ###
### ###
###############################################################################
###############################################################################
############################## Set Up Work Space ##############################
###############################################################################
# Clear global environment, import required packages, and set working directory
{
rm(list=ls())
require(BB)
require(pso)
require(readxl)
require(ggplot2)
require(numDeriv)
require(truncnorm)
setwd("~/Dropbox/Published Papers/Active - SPR of Otter in IN/Analysis")
}
###############################################################################
############################# Import Harvest Data #############################
###############################################################################
# Import three-tier age-at-harvest matrix
{
## Import values as a matrix with each year as a row
h <- matrix(c(242, 181, 180,
158, 208, 152,
232, 179, 86,
142, 241, 204,
127, 281, 206,
174, 260, 182), byrow = T, nrow = 6, ncol = 3)
}
# Import estimates of yearly catch-effort
{
f <- c(1.1552360, 0.9585951, 1.1134204, 0.8088820, 0.9529701, 1.0108965)
}
## Define number of years and age classes
{
Y <- nrow(h)
A <- ncol(h)
yearRange <- c(2015, 2016, 2017, 2018, 2019, 2020)
}
# Set reasonable limits for survival and harvest vulnerability
{
limitForC <- c(0.05, 0.50)
limitForS <- c(0.50, 0.95)
}
###############################################################################
########################## Define Objective Function ##########################
###############################################################################
# Define objective function using a multinomial likelihood formulation
objectiveFunction <- function(par) {
## Import initial (diagonal) cohort values
{
N <- matrix(NA, nrow = Y, ncol = A)
N[1, 1:A] <- par[1:A]
N[2:Y, 1] <- par[(A + 1):(A + Y - 1)]
}
## Import vulnerability and survival estimates
{
if (vulnerability == 1) {
C <- matrix(par[Y + (A - 1) + c(1, 1, 1)] / scaleFactor,
nrow = Y, ncol = A, byrow = T)
}
if (vulnerability == 2) {
C <- matrix(par[Y + (A - 1) + c(1, 2, 2)] / scaleFactor,
nrow = Y, ncol = A, byrow = T)
}
if (vulnerability == 3) {
C <- matrix(par[Y + (A - 1) + c(1, 2, 3)] / scaleFactor,
nrow = Y, ncol = A, byrow = T)
}
if (survivability == 1) {
S <- matrix(par[Y + (A - 1) + c(4, 4, 4)] / scaleFactor,
nrow = Y, ncol = A, byrow = T)
}
if (survivability == 2) {
S <- matrix(par[Y + (A - 1) + c(4, 5, 5)] / scaleFactor,
nrow = Y, ncol = A, byrow = T)
}
if (survivability == 3) {
S <- matrix(par[Y + (A - 1) + c(4, 5, 6)] / scaleFactor,
nrow = Y, ncol = A, byrow = T)
}
}
## Define probability of harvest
{
P <- (1 - exp(-C * f))
}
## Define expected population sizes
{
for (i in 1:(Y - 1)) {
for (j in 1:(A - 2)) {
N[i + 1, j + 1] <- N[i, j] * (1 - P[i, j]) * S[i, j]
}
for (j in (A - 1)) {
N[i + 1, j + 1] <- N[i, j] * (1 - P[i, j]) * S[i, j] +
N[i, j + 1] * (1 - P[i, j + 1]) * S[i, j + 1]
}
}
}
# Return population size estimate (if requested)
{
if (returnPopulationAbundance) return (N)
}
# Perform initial test of model boundaries
{
if (any(N < h)) return (9000009)
if (any(C < limitForC[1],
C > limitForC[2])) return (9000008)
if (any(S < limitForS[1],
S > limitForS[2])) return (9000007)
if (any(P + (1 - S) > 0.500)) return (9000006)
}
# Define expected harvest values
{
E <- N * P
}
## Return chi-square estimate for inflation factor if requested
{
if (returnChiSquareCorrection) {
return (sum((h - E) ^ 2 / E))
}
}
## Calculate contributions of each likelihood component
{
### Initialize the age-at-harvest and catch-effort components
{
logL_AAH <- matrix(0, nrow = Y, ncol = A)
logL_CAE <- matrix(0, nrow = Y, ncol = A)
}
### Contribution of the age-at-harvest component
{
logL_AAH[1, A] <- sum(log((N[1,3] + 1 - 1:h[1,3]) / 1:h[1,3])) +
h[1,3] * (log( P[1,A])) +
(N[1,3] - h[1,3]) * (log(1 - P[1,A]))
logL_AAH[1, (A - 1)] <- sum(log((h[1,2] + h[2,3] + 1 - 1:h[1,2]) /
1:h[1,2])) +
h[1,2] * log(E[1,2] / (E[1,2] + E[2,3])) +
h[2,3] * log(E[2,3] / (E[1,2] + E[2,3]))
for (i in 1 : (Y - 2)) {
logL_AAH[i, 1] <- sum(log((h[i,1] + h[i+1,2] + h[i+2,3] +
1 - 1:h[i,1]) / 1:h[i,1])) +
sum(log((h[i+1,2] + h[i+2,3] +
1 - 1:h[i+1,2]) / 1:h[i+1,2])) +
h[i+0,1] * log(E[i+0,1] / (E[i,1] + E[i+1,2] + E[i+2,3])) +
h[i+1,2] * log(E[i+1,2] / (E[i,1] + E[i+1,2] + E[i+2,3])) +
h[i+2,3] * log(E[i+2,3] / (E[i,1] + E[i+1,2] + E[i+2,3]))
}
logL_AAH[(Y - 1), 1] <- sum(log((N[Y-1,1] +
1 - 1:h[Y-1,1]) / 1:h[Y-1,1])) +
sum(log((N[Y-1,1] - h[Y-1,1] +
1 - 1:h[Y,2]) / 1:h[Y,2])) +
(h[Y-1,1]) * log(( P[Y-1,1])) +
(h[Y,2]) * log((1 - P[Y-1,1]) *
S[1] * P[Y,2]) +
(N[Y-1,1] - h[Y-1,1] - h[Y,2]) * log((1 - P[Y-1,1]) *
(1 - S[1] * P[Y,2]))
logL_AAH[Y, 1] <- sum(log((N[Y,1] + 1 - 1:h[Y,1]) / 1:h[Y,1])) +
(h[Y,1]) * (log( P[Y,1])) +
(N[Y,1] - h[Y,1]) * (log(1 - P[Y,1]))
}
### Contribution of the catch-effort component
{
for (i in 1:Y) {
for (j in 1:A) {
logL_CAE[i,j] <- sum(log((sum(N[i,j]) + 1 - 1:sum(h[i,j])) /
1:sum(h[i,j]))) +
sum(h[i,j]) * (log( P[i,j])) +
sum(N[i,j] - h[i,j]) * (log(1 - P[i,j]))
}
}
}
}
## Return value of objective function if valid
{
logLikelihood <- -c(logL_AAH, logL_CAE)
if (any(is.na(logLikelihood))) return (9000003) else
if (any(logLikelihood == -Inf)) return (9000002) else
if (any(logLikelihood == Inf)) return (9000001) else
return(sum(logLikelihood))
}
}
###############################################################################
############### Perform NUMERICAL OPTIMIZATION of Observed Data ###############
###############################################################################
# Initialize data frames to store results
{
pointEstimatesPSO <- cbind(data.frame(N11 = rep(NA, 4), N12 = NA, N13 = NA),
data.frame(matrix(NA, nrow = 4, ncol = (Y - 1))),
data.frame(C1 = rep(NA, 4), C2 = NA, C3 = NA,
S1 = NA, S2 = NA, S3 = NA, VAL = NA))
populationSizePSO <- data.frame(matrix(NA, nrow = 4, ncol = Y))
pointEstimatesSPG <- pointEstimatesPSO
pointEstimatesNLM <- pointEstimatesPSO
pointEstimatesBFG <- pointEstimatesPSO
populationSizeSPG <- populationSizePSO
populationSizeNLM <- populationSizePSO
populationSizeBFG <- populationSizePSO
}
# Initialize starting values for parameterization
{
scaleFactor <- 1e5
initialValues <- c(rep(c(mean(h) * 10), Y + (A - 1)),
rep(c(0.10, 0.70), each = A) * scaleFactor)
}
# Optimize parameter space using four different numerical optimization methods
{
## Initialize model counter
{
modelCount <- 1
}
## Cycle through possible vulnerability and survival delineations
for (vulnerability in c(1,3)) {
for (survivability in c(1,3)) {
### Optimize objective function using particle swarm optimization
{
returnPopulationAbundance <- FALSE
returnChiSquareCorrection <- FALSE
optimized <- psoptim(par = initialValues,
fn = objectiveFunction,
lower = c(rep(c(00000), A + Y - 1),
rep(limitForC[1] * scaleFactor, A),
rep(limitForS[1] * scaleFactor, A)),
upper = c(rep(c(20000), A + Y - 1),
rep(limitForC[2] * scaleFactor, A),
rep(limitForS[2] * scaleFactor, A)),
control = list(maxit = 10000, trace = F,
hybrid = "improved",
vectorize = TRUE,
type = "SPSO2011"))
returnPopulationAbundance <- TRUE
estimatedN <- objectiveFunction(optimized$par)
pointEstimatesPSO[modelCount, ] <- c(optimized$par,
optimized$value)
populationSizePSO[modelCount, ] <- rowSums(estimatedN)
returnPopulationAbundance <- FALSE
}
### Optimize objective function using a spectral projected gradient
{
returnPopulationAbundance <- FALSE
returnChiSquareCorrection <- FALSE
optimized <- spg(par = initialValues, fn = objectiveFunction,
lower = c(rep(c(00000), A + Y - 1),
rep(limitForC[1] * scaleFactor, A),
rep(limitForS[1] * scaleFactor, A)),
upper = c(rep(c(20000), A + Y - 1),
rep(limitForC[2] * scaleFactor, A),
rep(limitForS[2] * scaleFactor, A)),
control = list(maxit = 1e7, trace = F))
returnPopulationAbundance <- TRUE
estimatedN <- objectiveFunction(optimized$par)
pointEstimatesSPG[modelCount, ] <- c(optimized$par,
optimized$value)
populationSizeSPG[modelCount, ] <- rowSums(estimatedN)
returnPopulationAbundance <- FALSE
}
### Optimize objective function using the Nelder–Mead method
{
returnPopulationAbundance <- FALSE
returnChiSquareCorrection <- FALSE
optimized <- optim(par = initialValues, fn = objectiveFunction,
control = list(maxit = 1e7))
returnPopulationAbundance <- TRUE
estimatedN <- objectiveFunction(optimized$par)
pointEstimatesNLM[modelCount, ] <- c(optimized$par,
optimized$value)
populationSizeNLM[modelCount, ] <- rowSums(estimatedN)
returnPopulationAbundance <- FALSE
}
### Optimize objective function using the BFGS method
{
returnPopulationAbundance <- FALSE
returnChiSquareCorrection <- FALSE
optimized <- optim(par = initialValues, fn = objectiveFunction,
method = "BFGS", control = list(maxit = 1e7))
returnPopulationAbundance <- TRUE
estimatedN <- objectiveFunction(optimized$par)
pointEstimatesBFG[modelCount, ] <- c(optimized$par,
optimized$value)
populationSizeBFG[modelCount, ] <- rowSums(estimatedN)
returnPopulationAbundance <- FALSE
}
### Increment model counter
{
modelCount <- modelCount + 1
}
}
}
## Compute AIC for each reconstruction estimate
{
pointEstimatesPSO$AIC <- 2 * pointEstimatesPSO$VAL +
2 * ((A + Y - 1) + c(2, 4, 4, 6))
pointEstimatesSPG$AIC <- 2 * pointEstimatesSPG$VAL +
2 * ((A + Y - 1) + c(2, 4, 4, 6))
pointEstimatesNLM$AIC <- 2 * pointEstimatesNLM$VAL +
2 * ((A + Y - 1) + c(2, 4, 4, 6))
pointEstimatesBFG$AIC <- 2 * pointEstimatesBFG$VAL +
2 * ((A + Y - 1) + c(2, 4, 4, 6))
}
}
# Extract uncertainty estimates for best-fit model
{
## Construct data frame to compile subsequent results
{
bestFitModel <- data.frame(Year = yearRange,
Estimate = NA,
Lo95CI = NA, Hi95CI = NA,
Measure = rep(c("Abundance",
"Recruitment"),
each = length(yearRange)))
}
## Calculate standard errors for best-fit model parameters
{
bestParameters <- as.numeric(pointEstimatesPSO[4, 1:(A + Y - 1 + A + A)])
vulnerability <- 3
survivability <- 3
returnPopulationAbundance <- FALSE
hessianMatrix <- abs(hessian(x = bestParameters,
func = objectiveFunction))
standardErrors <- sqrt(abs(diag(solve(hessianMatrix))))
returnChiSquareCorrection <- TRUE
chiSquare <- objectiveFunction(bestParameters)
returnChiSquareCorrection <- FALSE
standardErrors <- standardErrors * sqrt(chiSquare / length(bestParameters))
}
## Calculate stochastic abundance estimates using standard errors
{
abundanceProjection <- matrix(NA, nrow = 1000, ncol = Y)
recruiterProjection <- matrix(NA, nrow = 1000, ncol = Y)
tempParameters <- bestParameters
returnPopulationAbundance <- TRUE
for (i in 1:nrow(abundanceProjection)) {
for (j in 1:(A + Y - 1)) {
tempParameters[j] <- rtruncnorm(1, a = 0,
mean = bestParameters[j],
sd = standardErrors[j])
}
for (j in (A + Y - 1 + 1:(2 * A))) {
tempParameters[j] <- rtruncnorm(1, a = 0, b = scaleFactor,
mean = bestParameters[j],
sd = standardErrors[j])
}
returnPopulationAbundance <- TRUE
abundanceProjection[i,] <- rowSums(objectiveFunction(tempParameters))
recruiterProjection[i,] <- (objectiveFunction(tempParameters))[,1]
}
}
## Compute confidence intervals
{
bestFitModel$Estimate[bestFitModel$Measure == "Abundance"] <-
as.numeric(populationSizePSO[4,])
bestFitModel$Lo95CI[bestFitModel$Measure == "Abundance"] <-
apply(abundanceProjection, 2, quantile, prob = 0.025)
bestFitModel$Hi95CI[bestFitModel$Measure == "Abundance"] <-
apply(abundanceProjection, 2, quantile, prob = 0.975)
bestFitModel$Estimate[bestFitModel$Measure == "Recruitment"] <-
as.numeric(pointEstimatesPSO[4,c(1, (A - 1) + c(2:Y))])
bestFitModel$Lo95CI[bestFitModel$Measure == "Recruitment"] <-
apply(recruiterProjection, 2, quantile, prob = 0.025)
bestFitModel$Hi95CI[bestFitModel$Measure == "Recruitment"] <-
apply(recruiterProjection, 2, quantile, prob = 0.975)
}
}
###############################################################################
############### Construct VISUALIZATION Best-Fit Reconstruction ###############
###############################################################################
# Generate figure of abundance and recruitment estimates
{
ggplot(aes(y = Estimate, x = Year, fill = Measure),
data = bestFitModel) +
geom_ribbon(aes(ymin = Lo95CI, ymax = Hi95CI,
fill = Measure), alpha = 0.25) +
geom_point(aes(colour = Measure)) +
geom_path(aes(colour = Measure, linetype = Measure), size = 1) +
scale_x_continuous(breaks = seq(min(yearRange), max(yearRange), 1)) +
scale_fill_manual(values = c("#F8766D", "#00BA38")) +
scale_color_manual(values = c("#F8766D", "#00BA38"))
}
###############################################################################
############### Synthesize ANALYSIS of Reconstruction Estimates ###############
###############################################################################
# Compile summary statistics under the likelihood objective function
{
lambda <- ((bestFitModel$Estimate[bestFitModel$Measure == "Abundance"])[A] /
(bestFitModel$Estimate[bestFitModel$Measure == "Abundance"])[1]) ^
(1 / (Y - 1))
print(paste(" BEST-FIT MODEL USING PARTICLE SWARM OPTIMIZATION "))
print(paste("------------------------------------------------------------"))
print(paste("The model detected", vulnerability,
"separate vulnerability coefficient(s):", sep = " "))
print(paste(round(bestParameters[(A + Y - 1 + 1:A)] / scaleFactor, 3),
" (SD = ",
round(standardErrors[(A + Y - 1 + 1:A)] / scaleFactor, 3),
")",
sep = ""))
print(paste("These corresponded to harvest rates between ",
round((1 - exp(-mean(bestParameters[(A + Y - 1 + 1:A)] /
scaleFactor) * min(f))) * 100, 1),
"% and ",
round((1 - exp(-mean(bestParameters[(A + Y - 1 + 1:A)] /
scaleFactor) * max(f))) * 100, 1),
"%", sep = ""))
print(paste("The model detected", survivability,
"separate survival coefficient(s):", sep = " "))
print(paste(round(bestParameters[(A + Y - 1 + A + 1:A)] / scaleFactor, 3),
" (SD = ",
round(standardErrors[(A + Y - 1 + A + 1:A)] / scaleFactor, 3),
")",
sep = ""))
if (lambda > 1) {
print(paste("The model detected a positive annual growth rate of",
round(lambda, 3), sep = " "))
} else {
print(paste("The model detected a negative annual growth rate of",
round(lambda, 3), sep = " "))
}
print(paste("------------------------------------------------------------"))
}