################## Practice Exam # 1 (Code) ################## FMX_da <- read.csv("http://www.bauer.uh.edu/rsusmel/4397/FX_USA_MX.csv", head=TRUE, sep=",") summary(FMX_da) x_years <- FMX_da$Years us_CPI <- FMX_da$US_CPI us_M1 <- FMX_da$US_M1 us_i <- FMX_da$US_int us_GDP <- FMX_da$US_GDP us_CA <- FMX_da$US_CA mx_CPI <- FMX_da$MX_CPI mx_M1 <- FMX_da$MX_M1 mx_i <- FMX_da$MX_int mx_GDP <- FMX_da$MX_GDP mx_CA <- FMX_da$MX_CA S_mx <- FMX_da$MXN_USD T <- length(us_CPI) us_I <- log(us_CPI[-1]/us_CPI[-T]) # US Inflation us_y <- log(us_GDP[-1]/us_GDP[-T]) # US growth rate us_ca <- us_CA[-1]/us_CA[-T] - 1 # US CA % change us_mg <- log(us_M1[-1]/us_M1[-T]) # US Money supply growth rate mx_I <- log(mx_CPI[-1]/mx_CPI[-T]) # Mexican Inflation mx_y <- log(mx_GDP[-1]/mx_GDP[-T]) # Mexican growth rate mx_ca <- mx_CA[-1]/mx_CA[-T] - 1 # Mexican CA % change mx_mg <- log(mx_M1[-1]/mx_M1[-T]) # Mexican Money supply growth rate e_mx <- log(S_mx[-1]/S_mx[-T]) # Exchange rate MX Pesos (MXN) per USD or (MXN/USD) us_i_1 <- us_i[-1]/100 # US interest rate (at time t) mx_i_1 <- mx_i[-1]/100 # MX interest rate (at time t) mx_i_0 <- mx_i[-T]/100 # Lagged MX interest rate (at time t-1) T_new <- length(mx_i_1) ###### Q1 ###### x <- us_y ### Q1.a. Histogram n_breaks <- 40 h <- hist(x, breaks=n_breaks, col="red", xlab="% US Growth Rate", main = "Distribution - Histogram for US growth") ### Q1.b. Moments for us_y m1 <- sum(x)/T ## Mean T <-length(x) m1 m2 <- sum((x-m1)^2)/(T-1) ## Var sd <- sqrt(m2) ## SD sd m3 <- sum((x-m1)^3)/T ## For numerator of S m4 <- sum((x-m1)^4)/T ## For numerator of K b1 <- m3/m2^(3/2) ## Sample Skewness b1 b2 <- (m4/m2^2) ## Sample Kurtosis b2 ### Q1.c. Test for Normality of e_mx JB <- (b1^2+(b2-3)^2/4)*T/6 JB qchisq(.95, df = 2) # chi-squared (df=2) value at 5% level p_val <- 1 - pchisq(JB, df = 2) # p-value of LM_test p_val ### Q1.d. Test for mean(us_y) = 0 t_us_y <- m1/(sd/sqrt(T)) t_us_y ### Q1.e. 95% C.I for SD(us_y) - Using asymptotic distribution (CLT) SE_s <- sd/sqrt(2*(T-1)) us_y_sd_ci <- c(sd - 1.96*SE_s, sd + 1.96*SE_s) # 95% C.I for SD(us_y) - Using a bootstrap library(boot) sim_size <- 1000 # function to obtain the SD from the data sd_p <- function(data, i) { d <- data[i] return(sd(d)) } boot.samps <- boot(data=x, statistic=sd_p, R=sim_size) # resampling and theta* estimation # Percentile method using boot boot.ci(boot.samps, type = "basic") ### Q1.f. Test if annual mean = 3% t_us_y_a <- (m1*4 - .03)/(sd * sqrt(4)/sqrt(T/4)) t_us_y_a ###### Q2 ###### ### Size 100 sim_size = 100 dat_i <- data.frame(mx_I, mx_i_1) library(boot) # function to obtain cor from the data cor_xy <- function(data, i) { d <-data[i,] return(cor(d$mx_I,d$mx_i_1)) } # bootstrapping with sim_size replications boot.samps <- boot(data=dat_i, statistic=cor_xy, R=sim_size) ## Q2.a. view stored bootstrap samples and compute mean boot.samps mean(boot.samps$t) # our estimate of the correlation sd(boot.samps$t) # SD of the correlation estimate ### Q2.b. Size 1,000 sim_size = 1000 # bootstrapping with sim_size replications boot.samps <- boot(data=dat_i, statistic=cor_xy, R=sim_size) # view stored bootstrap samples and compute mean boot.samps mean(boot.samps$t) # our estimate of the correlation sd(boot.samps$t) # SD of the correlation estimate ### Q2.c. 95% CI using percentile method boot.ci(boot.samps, type="perc") ### Q2.d. 95% C.I for MX Interest Rates using sampling distribution, asumming bivariate normality for data (mx_I, mx_i_1) T_n <- T-1 # sample size m_cor <- cor(mx_I, mx_i_1) sd_cor <- sqrt(1-m_cor^2)/sqrt(T_new-2) cor_lb <- m_cor - 1.96 * sd_cor cor_lb cor_ub <- m_cor + 1.96 * sd_cor cor_ub ### Q2.e. Real Interest Rates in MX - H0: Mean = 0 m_disc <- mx_i_1 - mx_I m1_ri_mx <- mean(m_disc) sd_ri_mx <- sd(m_disc) t_e_ri_mx <- mean(m1_ri_mx)/(sd_ri_mx/sqrt(T)) t_e_ri_mx pval <- 1 - pnorm(t_e_ri_mx) pval ###### Q3 ###### ## Regression fit_i <- lm(mx_i_1 ~ us_i_1 + e_mx + mx_y + mx_ca) # Model (US int, changes in S, MX gdp growth, MX CA % changes) ### Q3.a. Report Regression summary(fit_i) ### Q3.b. Interpret US_int coefficient and t-value ### Q3.c. Drivers (significant variables) ### Q3.d. Interpret R^2 and report F-goodness of fit test ### Q3.e. Test if beta_1 = 1 b_i <- fit_i$coefficients # Extract from lm function OLS coefficients SE_i <- sqrt(diag(vcov(fit_i))) # SE from fit_i (also a kx1 vector) t_beta1 <- (b_i[2] - 1)/SE_i[2] # t-stat for H0: Beta1 - 1 p_val_beta1 <- (1- pnorm(abs(t_beta1))) * 2 # pvalue for t_beta p_val_beta1 ### Q3.f. F-test for beta_2 = beta_4 = 0 # using library lmtest library(lmtest) fit_i_r <- lm(mx_i_1 ~ us_i_1 + mx_y ) waldtest(fit_i, fit_i_r) # fit_i is the U Model; fit_i_r is the R Model ## You can get the same answer by computing the F test with the restricted and unrestricted RSS. # Unrestricted Model RSS e_i <- fit_i$residuals # Unrestricted residuals RSS <- sum(e_i^2) # Unrestricted RSS # Restricted Model RSS e_i_r <- fit_i_r$residuals RSS_r <- sum(e_i_r^2) # Restricted RSS k <- length(fit_i$coefficients) # k: number of coefficients in U Model k2 <- length(fit_i_r$coefficients) # k2: number of coefficients in R Model # F-test J <- k - k2 # J: number of restrictions (= k - k2) T <- length(e_i_r) F_test <- ((RSS_r - RSS)/J)/(RSS/(T-k)) F_test qf(.95, df1=J, df2=(T-k)) p_val <- 1 - pf(F_test, df1=J, df2=(T-k)) p_val ### Q3.g. Testing J Hypothesis with R package car (J=2, beta_1 = 1 & beta_3 = 2) library(car) linearHypothesis(fit_i, c("us_i_1 = 1","mx_y = 2"), test="F") # F€: exact test ### Q3.h. Wald test for missing US Inflation in model (using library lmtest, called above) fit_i_I <- lm(mx_i_1 ~ us_i_1 + e_mx + mx_y + mx_ca + us_I) # Now, U Model waldtest(fit_i_I, fit_i) # fit_i is the R Model summary(fit_i_I) # We could have done a t-test on us_I ## (EXTRA) Wald test for missing MX Inflation in model (using library lmtest) fit_i_Im <- lm(mx_i_1 ~ us_i_1 + e_mx + mx_y + mx_ca + mx_I) #Now, U Model waldtest(fit_i_Im, fit_i) summary(fit_i_Im) # We could have done a t-test on mx_I ### Q3.i. Structural Change with Chow Test # Compute Chow test with R package strucchange library(strucchange) x_years[68] # Check Tequila effect date x_break <- 67 # We lost one observation when taking log changes sctest(mx_i_1 ~ us_i_1 + e_mx + mx_y + mx_ca, type = "Chow", point = x_break) ### Q3.j. Expected Mexican interest rate according to Model Ex_Mx_i <- b_i[1] + b_i[2] * mean(us_i_1) + b_i[3] * mean(e_mx) + b_i[4] * mean(mx_y) + b_i[5] * mean(mx_ca) mean(mx_i)/100 # Observed Mexican interest rate in the period # Compute Expected Mexican interest rate with fitted values mx_i_hat <- fit_i$fitted mean(mx_i_hat) ## (EXTRA) Plot fitted values vs actual values plot(mx_i_hat, type="l", main="Plot: Fitted Values", xlab="time ", ylab="Fitted values ", pch=19) lines(mx_i_1, type="l", col="red") ###### Q4 ###### (NOT COVERED) fit_M1 <- lm(y ~ us_i_1 + e_mx + mx_I + mx_y) y_hat1 <- fitted(fit_M1) fit_J1 <- lm( y ~ mx_i_0 + us_I + y_hat1) summary(fit_J1) fit_M2 <- lm(y ~ mx_i_0 + us_I) y_hat2 <- fitted(fit_M2) fit_J2 <- lm( y ~ us_i_1 + e_mx + mx_I + mx_y + y_hat2) summary(fit_J2) fit_enc <- lm( y ~ us_i_1 + e_mx + mx_I + mx_y + mx_i_0 + us_I) summary(fit_enc) library(lmtest) jtest(fit_M1, fit_M2) encomptest(fit_M1,fit_M2) ###### Q5 ###### True of False # a. A Restricted model can have a lower RSS than an unrestricted model. // False # b. Joint Hypothesis and individual hypothesis do not have to reach same conclusions. // True # c. Type I error can be greater than Type II error. // True # d. Perfect multicollinearity is not a problem for the unbiasedness of the OLS regression. // False. # e. The sampling distribution of the mean drives the sampling distribution of the t-tests and Wald-tests in small (finite) samples. // True # f. The J-test can be used to test restrictions of the model. // False (NOT COVERED) # g. If the model is misspecified, OLS is not longer unbiased and consistent. // True (NOT COVERED) # h. The Chow test can be used when three or more regimes are suspected in the data. // False # i. If returns are not normally distributed, it is not possible to test the CAPM. // False