Page 1 of 1

Some eigenvalues of the Hessian are positive, indicating convergence to a saddle point!

Posted: 03 Jan 2023, 13:27
by LianEnaLiu
Hi,

I have a problem with estimating my LCCM. When I estimate a 4-class model for example, I get the following warnings:

1. WARNING: Some eigenvalues of the Hessian are positive, indicating convergence to a saddle point!
2. Warning message: In sqrt(diag(varcov)) : NaNs produced
3. Some eigenvalues of Hessian are positive, indicating potential problems!

I tried to figure out how to fix this problem for a long time now but I cannot find the problem. Could someone please help.

My code looks the following:

rm(list = ls())

### Load Apollo library

library(apollo)

### Initialise code

apollo_initialise()


### Set core controls
apollo_control = list(
modelName = "LCCM",
modelDescr = "LCCM",
indivID = "ID"
)

### Load data
database = read.delim("LCCMfinal.dat",sep=',',header=TRUE)

###
apollo_beta = c(BETA_env_a = 0,
BETA_env_b = 0,
BETA_env_c = 0,
BETA_env_d = 0,
BETA_nutri_a = 0,
BETA_nutri_b = 0,
BETA_nutri_c = 0,
BETA_nutri_d = 0,
BETA_price_a = 0,
BETA_price_b = 0,
BETA_price_c = 0,
BETA_price_d = 0,
BETA_taste_a = 0,
BETA_taste_b = 0,
BETA_taste_c = 0,
BETA_taste_d = 0,
BETA_tex_a = 0,
BETA_tex_b = 0,
BETA_tex_c = 0,
BETA_tex_d = 0,
BETA_app_a = 0,
BETA_app_b = 0,
BETA_app_c = 0,
BETA_app_d = 0,
BETA_AW_a = 0,
BETA_AW_b = 0,
BETA_AW_c = 0,
BETA_AW_d = 0,
delta_a = 0,
delta_b = 0,
delta_c = 0,
delta_d = 0)

apollo_fixed = c()



### Defining Latent Class Parameters
apollo_lcPars=function(apollo_beta, apollo_inputs){
lcpars = list()
lcpars[["BETA_env"]] = list(BETA_env_a, BETA_env_b, BETA_env_c, BETA_env_d)
lcpars[["BETA_nutri"]] = list(BETA_nutri_a, BETA_nutri_b, BETA_nutri_c, BETA_nutri_d)
lcpars[["BETA_price"]] = list(BETA_price_a, BETA_price_b, BETA_price_c, BETA_price_d)
lcpars[["BETA_taste"]] = list(BETA_taste_a, BETA_taste_b, BETA_taste_c, BETA_taste_d)
lcpars[["BETA_tex"]] = list(BETA_tex_a, BETA_tex_b, BETA_tex_c, BETA_tex_d)
lcpars[["BETA_app"]] = list(BETA_app_a, BETA_app_b, BETA_app_c, BETA_app_d)
lcpars[["BETA_AW"]] = list(BETA_AW_a, BETA_AW_b, BETA_AW_c, BETA_AW_d)

V=list()
V[["class_a"]] = delta_a
V[["class_b"]] = delta_b
V[["class_c"]] = delta_c
V[["class_d"]] = delta_d


mnl_settings = list(
alternatives = c(class_a=1, class_b=2, class_c=3, class_d=4),
avail = 1,
choiceVar = NA,
V = V
)
lcpars[["pi_values"]] = apollo_mnl(mnl_settings, functionality = "raw")
lcpars[["pi_values"]] = apollo_firstRow(lcpars[["pi_values"]], apollo_inputs)
return(lcpars)
}



### VALIDATING AND PREPARING INPUTS
apollo_inputs = apollo_validateInputs()



### Model definition
apollo_probabilities = function(apollo_beta, apollo_inputs, functionality="estimate"){

### Attach inputs and detach after function exit
apollo_attach(apollo_beta, apollo_inputs)
on.exit(apollo_detach(apollo_beta, apollo_inputs))

### Create list of probabilities P
P = list()

## Define settings for MNL model component that are generic across classes
mnl_settings= list(
alternatives = c(alt1=1, alt2=2, alt3=3),
avail = list(alt1=1, alt2=1, alt3=1),
choiceVar = value
)


### Loop over classes
for(s in 1:4){

### Compute class-specific utilities
V = list()
V[['alt1']] = env1 * BETA_env[[s]] + nutri1 * BETA_nutri[[s]] + price1 * BETA_price[[s]] +
taste1 * BETA_taste[[s]] + tex1 * BETA_tex[[s]] + app1 * BETA_app[[s]] + AW1 * BETA_AW[[s]]
V[['alt2']] = env2 * BETA_env[[s]] + nutri2 * BETA_nutri[[s]] + price2 * BETA_price[[s]] +
taste2 * BETA_taste[[s]] + tex2 * BETA_tex[[s]] + app2 * BETA_app[[s]] + AW2 * BETA_AW[[s]]
V[['alt3']] = env3 * BETA_env[[s]] + nutri3 * BETA_nutri[[s]] + price3 * BETA_price[[s]] +
taste3 * BETA_taste[[s]] + tex3 * BETA_tex[[s]] + app3 * BETA_app[[s]] + AW3 * BETA_AW[[s]]

mnl_settings$V = V
mnl_settings$componentName = paste0("Class_",s)

### Compute within-class choice probabilities using MNL model
P[[paste0("Class_",s)]] = apollo_mnl(mnl_settings, functionality)

### Take product across observation for same individual
P[[paste0("Class_",s)]] = apollo_panelProd(P[[paste0("Class_",s)]], apollo_inputs, functionality)

}

### Compute latent class model probabilities
lc_settings = list(inClassProb = P, classProb=pi_values)
P[["model"]] = apollo_lc(lc_settings,
apollo_inputs,
functionality)

### Prepare and return outputs of function
P = apollo_prepareProb(P, apollo_inputs, functionality)
return(P)
}

#### MODEL ESTIMATION
model = apollo_estimate(apollo_beta,
apollo_fixed,
apollo_probabilities,
apollo_inputs)


#### MODEL OUTPUTS
apollo_modelOutput(model,modelOutput_settings=list(printPVal=TRUE))

apollo_saveOutput(model)

My output is clearly off as well. I added a attachment with a copy of the output.
Output file
Output file
faulty output.png (86.78 KiB) Viewed 1046 times

Re: Some eigenvalues of the Hessian are positive, indicating convergence to a saddle point!

Posted: 02 Feb 2023, 12:58
by stephanehess
Hi

your model is overspecified. You cannot estimate the intercept for each class, so you need to fix one of the delta terms to zero

Stephane