Purpose

This glossary provides a quick reference guide to key functionality in R for anyone working with macro/financial data. This glossary is NOT a substitute for a textbook nor is it a substitute for package documentation.

The glossary is organized into 4 chapters, motivated by the manner in which a quantitative analyst might approach the investing process.

Chapter 1: Explore — loading/cleaning financial time series data, EDA

Chapter 2: Explain — modeling with regressions and similar tools

Chapter 3: Forecast — general forecasting techniques with examples for returns

Chapter 4: Protect — portfolio optimization for diversification, value at risk, etc..

Each section of code is designed to stand alone, complete with references to data and required packages.

Please contact me if you find typos or if you have suggestions for new items to add to the glossary. | https://www.linkedin.com/in/mike-aguilar-econ/

A special thanks to Ziming Huang https://www.linkedin.com/in/ziming-huang/ for tremendous contributions to this document.

1 Explore

1.1 Housekeeping

Standard lines to put at the top of all R scripts

rm(list=ls()) # clear workspace
cat("\014")  # clear console
#graphics.off() # clear all plots

Standard lines to put at the top of all RMD files

knitr::opts_chunk$set(fig.width=10, fig.height=4,echo=TRUE, warning=FALSE, message=FALSE)
rm(list=ls()) # clear workspace
cat("\014")  # clear console
#graphics.off() # clear all plots

1.2 Loading & Saving Data

1.2.1 Load from Yahoo Finance API

1.2.1.1 Load over Fixed Time Span

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
startd = "2015-01-01"
endd = "2016-01-01"
Ticker = "AAPL"
getSymbols(Ticker,from=startd,to=endd,src='yahoo')
## [1] "AAPL"

1.2.1.2 Load Until Most Recent

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
startd = "2015-01-01"
Ticker = "AAPL"
getSymbols(Ticker,from=startd,src='yahoo')
## [1] "AAPL"

1.2.1.3 Load Multiple Tickers

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
startd = "2015-01-01"
Tickers = c("AAPL","IBM","GE")
getSymbols(Tickers,from=startd,src='yahoo')
## [1] "AAPL" "IBM"  "GE"

1.2.2 Batch Load Yahoo Finance

Load multiple symbols quickly.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
# Github (dev version)
# https://github.com/msperlin/yfR
#devtools::install_github('msperlin/yfR') #package installation (until CRAN available)
library(yfR)
library(ggplot2)

library(rvest) # crawl data from html
url <- "https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average"
DOWJIA <- url %>%
  xml2::read_html() %>%
  html_nodes(xpath='//*[@id="constituents"]') %>%
  html_table()
DOWJIA <- DOWJIA[[1]]
DOWJIA_Tickers <- DOWJIA$Symbol

my_tickers<-DOWJIA_Tickers
first_date <- '2000-01-01'
last_date <- '2022-01-01'

RawData <- yf_get(tickers = my_tickers, 
                  first_date = first_date, 
                  last_date = last_date,
                  type_return = "arit"
                  )
WideRawData<-yf_convert_to_wide(RawData)
Ret<-WideRawData$ret_adjusted_prices
na.omit(Ret)
## # A tibble: 4,413 x 29
##    ref_date         MMM       AXP     AMGN      AAPL       BA       CAT      CVX
##    <date>         <dbl>     <dbl>    <dbl>     <dbl>    <dbl>     <dbl>    <dbl>
##  1 2004-06-24 -0.0108   -0.00837   0.00553 -0.0154   -0.00632  0.0155   -5.06e-3
##  2 2004-06-25  0.000903  0.00765  -0.00715  0.0157    0.0189  -0.0120   -1.07e-2
##  3 2004-06-28  0.000338 -0.00798  -0.0105  -0.0359   -0.0212  -0.00857  -6.10e-3
##  4 2004-06-29  0.0125    0.00648   0.00990  0.000308  0.00598  0.0173    6.68e-3
##  5 2004-06-30  0.00234   0.00410   0.00887  0.00123   0.0115   0.00787   7.17e-3
##  6 2004-07-01 -0.0204   -0.000973  0.00348 -0.00737  -0.0233  -0.0159   -6.80e-3
##  7 2004-07-02 -0.00760  -0.00175   0.00803 -0.0378   -0.00762 -0.0187   -2.35e-3
##  8 2004-07-06  0.000572 -0.00800  -0.0212  -0.00418  -0.00424  0.000131  1.50e-3
##  9 2004-07-07  0.0101    0.00354  -0.00259 -0.0181    0.00162  0.0100    5.35e-4
## 10 2004-07-08 -0.00780  -0.0145   -0.0106  -0.00823   0.0117  -0.0125   -9.63e-4
## # ... with 4,403 more rows, and 21 more variables: CSCO <dbl>, KO <dbl>,
## #   DIS <dbl>, GS <dbl>, HD <dbl>, HON <dbl>, IBM <dbl>, INTC <dbl>, JNJ <dbl>,
## #   JPM <dbl>, MCD <dbl>, MRK <dbl>, MSFT <dbl>, NKE <dbl>, PG <dbl>,
## #   CRM <dbl>, TRV <dbl>, UNH <dbl>, VZ <dbl>, WBA <dbl>, WMT <dbl>

1.2.3 Load Tickers from EXCEL

Task: Paste a list of tickers into R in order to conduct analysis.

Step 1: Set up an XLS like below and use the CONCAT function

The result should look like

Copy the result in F7.

Step 2: Within R, name your vector and then paste your XLS vector

1.2.4 Load from quandl/NASDAQ

  1. Get an API key from quandl.com.
  2. Save that key in a csv file in the same directory as your code file. It should look like this

In this example we are loading the adjusted closing price, which is the 11th column of the dataframe. We are loading daily data.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
quandl_api_key<-read.csv("../data/quandlkey.csv",stringsAsFactors=FALSE)
Quandl.api_key(quandl_api_key)
startd<-"2011-12-30"  
endd<-"2013-01-01"
freq<-"daily"
ticker<- c('WIKI/GOOGL.11','WIKI/IBM.11','WIKI/GE.11','WIKI/YHOO.11')
stock_prices = Quandl(ticker,start_date=startd, end_date=endd, collapse=freq,type="zoo")
names(stock_prices)=c('GOOG.Adjusted','IBM.Adjusted','GE.Adjusted','YHOO.Adjusted') 

1.2.5 Load from FRED

  1. Obtain an API key from FRED https://fred.stlouisfed.org/docs/api/api_key.html
  2. Save that key in a csv file in the same directory as your code file. It should look like this

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(fredr)
fred_api_key<-read.csv("../data/fredkey.csv",stringsAsFactors=FALSE)
fredr_set_key(fred_api_key$Key)
mydata<-fredr(
  series_id = "UNRATE",
  frequency = "m", # monthly
  observation_start = as.Date("1990-01-01"),
  observation_end = as.Date("2000-01-01"),
  units = "pc1" # % Delta 1yr ago
)

1.2.6 Loading from CSV

This assumes that you have a folder called “data” in the directory where your code file is located.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
mydata<-read.csv("../data/AAPL.csv")

1.2.7 Loading from TXT

This assumes that you have a folder called “data” in the directory where your code file is located.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
mydata<-read.table("../data/AAPL.txt", header = TRUE, sep=" ")

1.2.8 Load from XLS

This assumes that you have a folder called “data” in the directory where your code file is located.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(readxl)
mydata <- read_excel("../data/AAPL.xlsx") 

1.2.9 Save Data

1.2.9.1 Save to CSV

This assumes that you have a folder called “data” in the directory where your code file is located.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
mydata<-read.csv("../data/AAPL.csv")
write.csv(mydata, "../data/AAPL_data.csv",row.names = TRUE)

1.2.9.2 Save to XLS

This assumes that you have a folder called “data” in the directory where your code file is located.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(writexl)
library(readxl)
mydata <- read_excel("../data/AAPL.xlsx") 
write_xlsx(mydata, '../data/SaveAPPLData.xlsx')

1.2.10 Load SP500 Tickers

rm(list=ls()) # clear workspace
cat("\014")  # clear console
# prepare library
library(rvest) # crawl data from html
# fetch S&P ticker list from wiki
url <- "https://en.wikipedia.org/wiki/List_of_S%26P_500_companies"
SP500 <- url %>%
  xml2::read_html() %>%
  html_nodes(xpath='//*[@id="mw-content-text"]/div/table[1]') %>%
  html_table()
SP500 <- SP500[[1]]
SP500_Tickers <- SP500$Symbol

1.2.11 Load DJIA Tickers

rm(list=ls()) # clear workspace
cat("\014")  # clear console
# prepare library
library(rvest) # crawl data from html
url <- "https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average"
DOWJIA <- url %>%
  xml2::read_html() %>%
  html_nodes(xpath='//*[@id="constituents"]') %>%
  html_table()
DOWJIA <- DOWJIA[[1]]
DOWJIA_Tickers <- DOWJIA$Symbol

1.2.12 Load Fama-French 3 Factors

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(tidyverse)
library(xts)
# Load factor data from Ken French website via FTP
# create temp_file to store the file
temp <- tempfile()
# download the file
filename = c('F-F_Research_Data_Factors_CSV.zip')
path = c('https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/')
todownload = paste0(path,filename)
download.file(todownload, temp)
# unzip the file, to extract the data
#ff_factors_raw_data <- unzip(temp, exdir = here("data"))
ff_factors_raw_data <- unzip(temp)
# read the contents read_csv and extract the desired dates
Factors <- read_csv(ff_factors_raw_data, skip = 3) %>%
  rename("Date" = "...1") %>%
  mutate_all(as.numeric) %>%
  filter(Date > 196301) %>%
  na.omit()

# Format the Factor data frame
FFdate<-as.Date(paste0(as.character(Factors$Date), '01'), format='%Y%m%d')
FFdata<-log(select(Factors, -Date)/100+1) #recall: r=ln(1+R)
FFxts<-xts(FFdata,order.by=FFdate)
# print the first several rows
head(FFxts)
##                  Mkt-RF          SMB          HML          RF
## 1963-02-01 -0.024087796  0.004589452  0.021761492 0.002297359
## 1963-03-01  0.030335200 -0.025112701  0.018821754 0.002297359
## 1963-04-01  0.044112575 -0.013591954  0.009950331 0.002496880
## 1963-05-01  0.017446914  0.010643160  0.025765208 0.002397125
## 1963-06-01 -0.020202707 -0.002904213  0.007670506 0.002297359
## 1963-07-01 -0.003907625 -0.005314095 -0.008939842 0.002696362

1.2.13 Load Fama-French 5 Factors

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(tidyverse)
library(xts)
# Load factor data from Ken French website via FTP
# create temp_file to store the file
temp <- tempfile()
# download the file
filename = c('F-F_Research_Data_5_Factors_2x3_CSV.zip')
path = c('https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/')
todownload = paste0(path,filename)
download.file(todownload, temp)
# unzip the file, to extract the data
#ff_factors_raw_data <- unzip(temp, exdir = here("data"))
ff_factors_raw_data <- unzip(temp)
# read the contents read_csv and extract the desired dates
Factors <- read_csv(ff_factors_raw_data, skip = 3) %>%
  rename("Date" = "...1") %>%
  mutate_all(as.numeric) %>%
  filter(Date > 196301) %>%
  na.omit()

# Format the Factor data frame
FFdate<-as.Date(paste0(as.character(Factors$Date), '01'), format='%Y%m%d')
FFdata<-log(select(Factors, -Date)/100+1) #recall: r=ln(1+R)
FFxts<-xts(FFdata,order.by=FFdate)
# print the first several rows
head(FFxts)
##                  Mkt-RF          SMB           HML          RMW          CMA
## 1963-07-01 -0.003907625 -0.004409708 -0.0089398416  0.006776984 -0.012376271
## 1963-08-01  0.049456609 -0.007528266  0.0166604409  0.003593536 -0.003405793
## 1963-09-01 -0.015824550 -0.005515181  0.0007996802 -0.007125325  0.002895803
## 1963-10-01  0.024985253 -0.013794711 -0.0014009809  0.027615167 -0.020406810
## 1963-11-01 -0.008536331 -0.008939842  0.0179381451 -0.005113049  0.022837234
## 1963-12-01  0.018134570 -0.020917248 -0.0008003202  0.000299955 -0.000400080
##                     RF
## 1963-07-01 0.002696362
## 1963-08-01 0.002496880
## 1963-09-01 0.002696362
## 1963-10-01 0.002895803
## 1963-11-01 0.002696362
## 1963-12-01 0.002895803

1.2.14 Load French 49 Industries

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(tidyverse)
library(xts)
# Load factor data from Ken French website via FTP
# create temp_file to store the file
temp <- tempfile()
# download the file
filename = c('49_Industry_Portfolios_CSV.zip')
path = c('https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/')
todownload = paste0(path,filename)
download.file(todownload, temp)
# unzip the file, to extract the data
ff_industries_raw_data <- unzip(temp)
# read the contents read_csv and extract the desired dates
Industries <- read_csv(ff_industries_raw_data, skip = 11) %>%
  rename("Date" = "...1") %>%
  mutate_all(as.numeric) %>%
  filter(Date > 196301) %>%
  na.omit()

# Format the Factor data frame
FFdate<-as.Date(paste0(as.character(Industries$Date), '01'), format='%Y%m%d')
FFdata<-log(select(Industries, -Date)/100+1) #recall: r=ln(1+R)
FFxts<-xts(FFdata,order.by=FFdate)
# print the first several rows
head(FFxts)
##                  Agric         Food     Soda          Beer       Smoke
## 1963-02-01 -0.06006853 -0.026652031 -9.21034 -0.0518197487 -0.03417745
## 1963-02-01 -0.05752319 -0.004811557 -9.21034 -0.0293258294 -0.02829660
## 1963-02-01  0.01980263  0.482426149  0.00000  0.1043600153  0.11332869
## 1963-02-01  1.22829462  1.210392156 -9.21034  0.8182809113  1.38404183
## 1963-03-01 -0.01969263  0.018330957 -9.21034  0.0450689634  0.07455073
## 1963-03-01 -0.01979463  0.014790085 -9.21034  0.0009995003  0.04258045
##                   Toys         Fun        Books       Hshld       Clths
## 1963-02-01 -0.10236501 -0.03676772 -0.020815140 -0.03303985 -0.03438443
## 1963-02-01 -0.09453048 -0.02326863 -0.033143219 -0.03624913 -0.02613866
## 1963-02-01  0.04879016  0.11332869  0.095310180  0.27002714  0.19062036
## 1963-02-01  0.97009687  0.52579305  0.517125736  1.05997541  0.39971528
## 1963-03-01 -0.01867327  0.03758476 -0.003506139  0.03922071  0.01419872
## 1963-03-01  0.05458288  0.03796993 -0.001200721  0.02078254  0.01163208
##                Hlth         MedEq       Drugs       Chems        Rubbr
## 1963-02-01 -9.21034  0.0153811020 -0.02901695 -0.03926071 -0.006923915
## 1963-02-01 -9.21034 -0.0107576567 -0.02357574 -0.01755316 -0.015824550
## 1963-02-01  0.00000  0.0392207132  0.17395331  0.37843644  0.067658648
## 1963-02-01 -9.21034  0.8751770281  1.66013103  1.95996802  0.237835100
## 1963-03-01 -9.21034  0.0006997551  0.01252128  0.03584965  0.036042591
## 1963-03-01 -9.21034 -0.0058168853  0.01646373  0.03662118  0.022739487
##                   Txtls        BldMt        Cnstr        Steel    FabPr
## 1963-02-01  0.006478966 -0.006018072 -0.027371197 -0.021530119 -9.21034
## 1963-02-01 -0.008435479 -0.008334637 -0.026036015 -0.003907625 -9.21034
## 1963-02-01  0.215111380  0.536493371  0.076961041  0.553885113  0.00000
## 1963-02-01  0.405465108  0.838502148  0.501441732  1.121221432 -9.21034
## 1963-03-01  0.008265744  0.035367144  0.005286004  0.022152805 -9.21034
## 1963-03-01  0.014198719  0.030626194 -0.003907625  0.020390690 -9.21034
##                   Mach       ElcEq        Autos        Aero        Ships
## 1963-02-01 -0.02634398 -0.06571254 -0.029737818 -0.01765494 0.0129162253
## 1963-02-01 -0.01714616 -0.04040541 -0.004811557 -0.01359195 0.0005998201
## 1963-02-01  0.55388511  0.42526774  0.350656872  0.22314355 0.1397619424
## 1963-02-01  0.71798612  1.35472629  2.081202490  0.89824895 0.6159421097
## 1963-03-01  0.02858746  0.01380428  0.065974989 -0.01551981 0.0497420919
## 1963-03-01  0.03556018 -0.01694272  0.025570276 -0.01806214 0.0412379080
##                Guns     Gold        Mines         Coal         Oil         Util
## 1963-02-01 -9.21034 -9.21034 -0.017756722 -0.004409708 -0.01338924 -0.014910613
## 1963-02-01 -9.21034 -9.21034 -0.021019367 -0.018163971 -0.01836766 -0.006521217
## 1963-02-01  0.00000  0.00000  0.157003749  0.076961041  0.35767444  0.708035793
## 1963-02-01 -9.21034 -9.21034  0.530157552  0.814568036  2.51850260  1.652382453
## 1963-03-01 -9.21034 -9.21034  0.026934001  0.060153923  0.06756519  0.017741681
## 1963-03-01 -9.21034 -9.21034 -0.001701447  0.028976108  0.05675833  0.016660441
##                  Telcm      PerSv       BusSv       Hardw    Softw        Chips
## 1963-02-01 -0.01095984 0.01340969 -0.02234786 -0.06102470 -9.21034  0.040565962
## 1963-02-01 -0.02112150 0.03372487 -0.01836766 -0.07774535 -9.21034 -0.031593872
## 1963-02-01  0.11332869 0.02955880  0.07696104  0.12221763  0.00000  0.113328685
## 1963-02-01  3.43357744 0.56480231  0.24865534  2.47002987 -9.21034  0.606771965
## 1963-03-01  0.01921422 0.04506896 -0.02367813  0.04678816 -9.21034  0.006478966
## 1963-03-01  0.04152575 0.02303270 -0.02316628  0.01262003 -9.21034  0.005783245
##                  LabEq      Paper        Boxes       Trans         Whlsl
## 1963-02-01 -0.01156664 0.03777736 -0.023780529 0.002995509 -0.0188770558
## 1963-02-01 -0.02665203 0.06475725 -0.009242581 0.017545179  0.0006997551
## 1963-02-01  0.07696104 0.03922071  0.254642218 0.530628251  0.1655144385
## 1963-02-01  2.11473629 0.55933003  1.516664512 0.805090979  0.5374870287
## 1963-03-01  0.01715206 0.06034223  0.028684634 0.032563991  0.0059820717
## 1963-03-01 -0.01308524 0.02917038  0.033724869 0.034691240  0.0078689584
##                   Rtail       Meals        Banks        Insur        RlEst
## 1963-02-01 -0.005515181 -0.03811732  0.004788517 -0.037598030 -0.056464537
## 1963-02-01 -0.025830760 -0.03874083 -0.010454458 -0.037598030 -0.059537713
## 1963-02-01  0.536493371  0.08617770  0.207014169  0.009950331  0.019802627
## 1963-02-01  1.147973718  0.29877037  1.031831255  2.190166394  0.310274921
## 1963-03-01  0.021467907 -0.05890111  0.019410394  0.020586634  0.003394233
## 1963-03-01  0.008265744 -0.03056231  0.049742092  0.020586634  0.000299955
##                     Fin       Other
## 1963-02-01 -0.003606496 0.006578315
## 1963-02-01 -0.025215249 0.012323750
## 1963-02-01  0.165514438 0.048790164
## 1963-02-01  0.924377942 0.699924165
## 1963-03-01  0.029073247 0.057136191
## 1963-03-01  0.018527305 0.002397125

1.2.15 Load US Recession Dates

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
getSymbols("USREC",src="FRED")
## [1] "USREC"

1.3 Plotting Prices

1.3.1 Price Chart

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
chart_Series(x=MSFT$MSFT.Open)

1.3.2 Candlestick Chart

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
chart_Series(x=MSFT['2020-01/2021-01'])

1.3.3 Price Chart w/ Volume & Bands

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
chartSeries(x=MSFT['2020-01/2021-01'],TA=c(addVo(),addBBands()))

1.3.4 Price Chart with Multiple Tickers

can probably use the quandl api to make this easier

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
# pull data from yahoo/FRED 
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
colnames(Prices)<-Tickers
plot(Prices,legend.loc = 'topleft')

1.3.5 Indexed Price Chart

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = c('MSFT','IBM') # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT" "IBM"
Prices = do.call(merge,lapply(ticker, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(ticker, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-ticker
StartingEquity=1
GrossReturns <- Returns+1
GrossReturns[1,]<-StartingEquity 
GrossReturns<-cumprod(GrossReturns)
head(GrossReturns)
##                MSFT       IBM
## 2017-01-04 1.000000 1.0000000
## 2017-01-05 1.000000 0.9966915
## 2017-01-06 1.008668 1.0015953
## 2017-01-09 1.005457 0.9904880
## 2017-01-10 1.005137 0.9779040
## 2017-01-11 1.014286 0.9910789
chart.TimeSeries(GrossReturns,legend.loc = 'topleft')

1.3.6 Multi-Asset Indexed Price Chart

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
Prices = as.xts(data.frame(F=F$F.Adjusted,GE=GE$GE.Adjusted, CAT=CAT$CAT.Adjusted))
colnames(Prices)<-Tickers
Assets_begPrice=as.numeric(head(Prices,n=1))
Assets_GrossCumRet<- Prices/rep((Assets_begPrice), each = nrow(Prices))
Assets_Index<-100*Assets_GrossCumRet
plot(Assets_Index,legend.loc = 'topleft')

1.3.7 Price Chart with Drawdowns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
Returns = do.call(merge,lapply(ticker, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-ticker
charts.PerformanceSummary(Returns)

1.3.8 Recession Shading

Reference: https://rpubs.com/FSl/609471

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(fredr)
fred_api_key<-read.csv("../data/fredkey.csv",stringsAsFactors=FALSE)
fredr_set_key(fred_api_key$Key)
fred_data<-fredr(series_id = "WILL5000INDFC",observation_start = as.Date("1971-01-01"))
theme_am<-
function (base_size = 12, base_family = "") 
{
  library(ggthemes)
  library(scales)
  library(extrafont)
  theme_hc(base_size = base_size, base_family = base_family) %+replace% 
    theme(
      axis.text.x = element_text(color = "grey20", size = 11,family="Calibri Light"),
      axis.text.y = element_text(color = "grey20", size = 11,family="Calibri Light"),
      axis.title.x = element_text(color = "grey20", size = 12,family="Calibri Light"),
      axis.title.y = element_text(color = "grey20", size = 12,family="Calibri Light"),
      plot.title = element_text(color="#04103b", size=13, face="bold",family="Calibri Light"),
      legend.text = element_text(color = "grey20", size = 12,family="Calibri Light")
    )
}
add_rec_shade<-function(st_date,ed_date,shade_color="darkgray")
{
  library(fredr)
  library(ecm)
  library(ggplot2)
  #fredr_set_key("insert_your_api_key")
  
  #st_date<-as.Date("2001-06-01")
  #ed_date<-as.Date(Sys.Date())
  
  recession<-fredr(series_id = "USRECD",observation_start = as.Date(st_date),observation_end = as.Date(ed_date))
  
  recession$diff<-recession$value-lagpad(recession$value,k=1)
  recession<-recession[!is.na(recession$diff),]
  recession.start<-recession[recession$diff==1,]$date
  recession.end<-recession[recession$diff==(-1),]$date
  
  if(length(recession.start)>length(recession.end))
  {recession.end<-c(recession.end,Sys.Date())}
  if(length(recession.end)>length(recession.start))
  {recession.start<-c(min(recession$date),recession.start)}
  
  recs<-as.data.frame(cbind(recession.start,recession.end))
  recs$recession.start<-as.Date(as.numeric(recs$recession.start),origin=as.Date("1970-01-01"))
  recs$recession.end<-as.Date(recs$recession.end,origin=as.Date("1970-01-01"))
  if(nrow(recs)>0)
  {
    rec_shade<-geom_rect(data=recs, inherit.aes=F, 
                         aes(xmin=recession.start, xmax=recession.end, ymin=-Inf, ymax=+Inf), 
                         fill=shade_color, alpha=0.5)
    return(rec_shade)
  }
}
library(extrafont)
## Registering fonts with R
library(ggplot2)

my_plot<-
  ggplot(fred_data, aes(x=date)) +
  #Add recession shading here
  #******************************************************************
  add_rec_shade(min(fred_data$date),max(fred_data$date))+
  #******************************************************************
  geom_line(aes(y=value/100),size = 0.8,color="#dd0400") +
  scale_y_continuous(name="Unemployment Rate in %",labels = scales::percent_format(accuracy = 1)) +
  theme_am()+
  scale_x_date(labels = date_format("%m-%Y"))+
  theme(plot.title = element_text(color="#04103b", size=13, face="bold",family="Calibri Light"))+
  labs(title="Wilshire 5000 and NBER Recessions",x ="")

my_plot

1.4 Computing Returns

1.4.1 Compute Lag

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(tidyquant)
ticker = 'AAPL' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-02-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date, to = end_date, src=data_src)
## [1] "AAPL"
LagAdjClose = lag.xts(AAPL$AAPL.Adjusted,k=1)
head(LagAdjClose)
##            AAPL.Adjusted
## 2017-01-03            NA
## 2017-01-04      27.25764
## 2017-01-05      27.22714
## 2017-01-06      27.36559
## 2017-01-09      27.67067
## 2017-01-10      27.92413

1.4.2 Compute Many Lags

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(tidyquant)
ticker = 'AAPL' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-02-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date, to = end_date, src=data_src)
## [1] "AAPL"
LagAdjClose = lag.xts(AAPL$AAPL.Adjusted,k=1:2)
head(LagAdjClose)
##            AAPL.Adjusted AAPL.Adjusted.1
## 2017-01-03            NA              NA
## 2017-01-04      27.25764              NA
## 2017-01-05      27.22714        27.25764
## 2017-01-06      27.36559        27.22714
## 2017-01-09      27.67067        27.36559
## 2017-01-10      27.92412        27.67067

1.4.3 Simple Daily Returns (v1)

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(tidyquant)
ticker = 'AAPL' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-02-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date, to = end_date, src=data_src)
## [1] "AAPL"
Return_AAPL = AAPL$AAPL.Adjusted/lag.xts(AAPL$AAPL.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
Return_AAPL = na.omit(Return_AAPL) #remove NA value

1.4.4 Simple Daily Returns (v2)

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(tidyquant)
ticker = 'AAPL' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-02-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date, to = end_date, src=data_src)
## [1] "AAPL"
price_diff = diff(x = AAPL$AAPL.Adjusted,differences = 1) # calculate one period difference
price_diff = na.omit(price_diff) #remove NA value
dim(price_diff) # Look at the size of price_diff
## [1] 1025    1
price_t = AAPL$AAPL.Adjusted[-length(AAPL$AAPL.Adjusted)] # remove the last observation
## calculate return via R_t = (P_t-P_{t-1})/P_{t-1}
Return_AAPL2 = price_diff/as.numeric(price_t)

1.4.5 Simple Weekly Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
Returns = do.call(merge,lapply(ticker, function(x) 
        periodReturn(Ad(get(x)),period='weekly',type='arithmetic')))
head(Returns)
##            weekly.returns
## 2017-01-06   0.0041546561
## 2017-01-13  -0.0022278401
## 2017-01-20   0.0006379347
## 2017-01-27   0.0484540182
## 2017-02-03  -0.0319247222
## 2017-02-10   0.0050250230

1.4.6 Simple Rolling Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
Prices = do.call(merge,lapply(ticker, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
T=dim(Prices)[1]
FiveDayRet = Prices$MSFT.Adjusted/lag.xts(Prices$MSFT.Adjusted,k=5) - 1
head(FiveDayRet,n=10)
##            MSFT.Adjusted
## 2017-01-04            NA
## 2017-01-05            NA
## 2017-01-06            NA
## 2017-01-09            NA
## 2017-01-10            NA
## 2017-01-11   0.014285696
## 2017-01-12   0.004975897
## 2017-01-13  -0.002227703
## 2017-01-17  -0.001756091
## 2017-01-18  -0.001916456

1.4.7 Simple Total Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
Prices = do.call(merge,lapply(ticker, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
TotalReturn  = as.numeric(head(Prices,n=1))/as.numeric(tail(Prices,n=1))-1
TotalReturn
## [1] -0.7492189

1.4.8 Log Daily Returns (v1)

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
Returns = periodReturn(MSFT$MSFT.Adjusted,period = 'daily',type = 'log')
head(Returns)
##            daily.returns
## 2017-01-03  0.0000000000
## 2017-01-04 -0.0044844212
## 2017-01-05  0.0000000000
## 2017-01-06  0.0086304016
## 2017-01-09 -0.0031877975
## 2017-01-10 -0.0003192809

1.4.9 Log Daily Returns (v2)

My preferred approach since I can “see” what is happening.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
Returns = log(MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1))
head(Returns)
##            MSFT.Adjusted
## 2017-01-03            NA
## 2017-01-04 -0.0044841795
## 2017-01-05  0.0000000000
## 2017-01-06  0.0086302805
## 2017-01-09 -0.0031877286
## 2017-01-10 -0.0003193497

1.4.10 Currency Adjust. Ret. (v1)

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(tidyquant)
ticker = 'AAPL' # asset ticker
start_date = '2014-09-25' # data start date
end_date = '2015-02-01' # data end date
data_src = 'yahoo' # data source
freq = "daily" # data frequency
getSymbols(ticker,from = start_date,
           to = end_date, src=data_src, periodicity = freq)
## [1] "AAPL"
ticker = 'DEXUSEU' # ticker for "U.S. / Euro Foreign Exchange Rate", can be found from FRED website
data_src = 'FRED' # data source
getSymbols(ticker, src=data_src)
## [1] "DEXUSEU"
DEXUSEU = na.approx(DEXUSEU) # fill in missing values with mean of previous and next obs. 
### merge exchange rate and AAPL price
dat=merge(AAPL$AAPL.Adjusted, DEXUSEU, join='left')
### calculate adjusted price: covert USD price to EUR price(our original data price is USD based)
dat$Price_EUR = dat$AAPL.Adjusted/dat$DEXUSEU
head(dat)
##            AAPL.Adjusted DEXUSEU Price_EUR
## 2014-09-25      21.99609  1.2748  17.25454
## 2014-09-26      22.64337  1.2686  17.84910
## 2014-09-29      22.49952  1.2703  17.71198
## 2014-09-30      22.64337  1.2628  17.93108
## 2014-10-01      22.29051  1.2618  17.66564
## 2014-10-02      22.45233  1.2670  17.72086
### calculate USD based return 
price_t = dat$AAPL.Adjusted[-length(dat$AAPL.Adjusted)] # remove the last observation
price_diff = diff(x = dat$AAPL.Adjusted,differences = 1) # calculate one period difference
price_diff = na.omit(price_diff) # remove NA value
Return_AAPL_USD = price_diff/as.numeric(price_t)
names(Return_AAPL_USD)="USDReturn"

### calculate exchange rate return
FX_t = dat$DEXUSEU[-length(dat$DEXUSEU)] # remove the last observation
FX_diff = diff(x = dat$DEXUSEU,differences = 1) # calculate one period difference
FX_diff = na.omit(FX_diff) # remove NA value
Return_FX = FX_diff/as.numeric(FX_t)
names(Return_FX)="ReturnFX"

### calculate Euro Based Investor's return
# method 1: 1+R_t(USD)=(1+R_t(FX))(1+R_t(EUR))
Return_AAPL_EUR = (1+Return_AAPL_USD)/(1+Return_FX)-1
head(Return_AAPL_EUR)
##               USDReturn
## 2014-09-26  0.034457961
## 2014-09-29 -0.007682222
## 2014-09-30  0.012370221
## 2014-10-01 -0.014803027
## 2014-10-02  0.003125614
## 2014-10-03  0.009386244

1.4.11 Currency Adjust. Ret. (v2)

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(tidyquant)
ticker = 'AAPL' # asset ticker
start_date = '2014-09-25' # data start date
end_date = '2015-02-01' # data end date
data_src = 'yahoo' # data source
freq = "daily" # data frequency
getSymbols(ticker,from = start_date,
           to = end_date, src=data_src, periodicity = freq)
## [1] "AAPL"
ticker = 'DEXUSEU' # ticker for "U.S. / Euro Foreign Exchange Rate", can be found from FRED website
data_src = 'FRED' # data source
getSymbols(ticker, src=data_src)
## [1] "DEXUSEU"
DEXUSEU = na.approx(DEXUSEU) # fill in missing values with mean of previous and next obs. 
### merge exchange rate and AAPL price
dat=merge(AAPL$AAPL.Adjusted, DEXUSEU, join='left')
### calculate adjusted price: covert USD price to EUR price(our original data price is USD based)
dat$Price_EUR = dat$AAPL.Adjusted/dat$DEXUSEU
head(dat)
##            AAPL.Adjusted DEXUSEU Price_EUR
## 2014-09-25      21.99609  1.2748  17.25454
## 2014-09-26      22.64336  1.2686  17.84910
## 2014-09-29      22.49952  1.2703  17.71198
## 2014-09-30      22.64336  1.2628  17.93108
## 2014-10-01      22.29051  1.2618  17.66564
## 2014-10-02      22.45232  1.2670  17.72086
price_tEUR = dat$Price_EUR[-length(dat$Price_EUR)] # remove the last observation
price_diffEUR = diff(x = dat$Price_EUR,differences = 1) # calculate one period difference
price_diffEUR = na.omit(price_diffEUR) # remove NA value
Return_AAPL_EUR_method2 = price_diffEUR/as.numeric(price_tEUR)
head(Return_AAPL_EUR_method2)
##               Price_EUR
## 2014-09-26  0.034457775
## 2014-09-29 -0.007682222
## 2014-09-30  0.012370221
## 2014-10-01 -0.014803028
## 2014-10-02  0.003125436
## 2014-10-03  0.009386559

1.4.12 Inflation Adjust. Ret. (v1)

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)

ticker = 'AAPL' # asset ticker
start_date = '2005-01-01' # data start date
end_date = '2021-02-01' # data end date
data_src = 'yahoo' # data source
freq = "monthly" # data frequency
# load data using `getSymbols`
getSymbols(ticker,from = start_date,
           to = end_date, src=data_src, periodicity = freq)
## [1] "AAPL"
### load CPI data from FRED
# set parameters
ticker = 'CPIAUCSL' # ticker for "Consumer Price Index for All Urban Consumers: All Items", can be found from FRED website
data_src = 'FRED' # data source
# load data using `getSymbols`
getSymbols(ticker, src=data_src)
## [1] "CPIAUCSL"
### merge CPI data and AAPL price
dat=merge(AAPL$AAPL.Adjusted, CPIAUCSL, join='left')
### calculate adjusted price
# calculate deflator (with deflator on 2017-01 equal to one)
dat$Deflator = dat$CPIAUCSL/as.numeric(dat$CPIAUCSL['2017-01',])
# calculate adjusted price
dat$RealPrice = dat$AAPL.Adjusted/dat$Deflator
head(dat)
##            AAPL.Adjusted CPIAUCSL  Deflator RealPrice
## 2005-01-01      1.174123    191.6 0.7864772  1.492889
## 2005-02-01      1.369861    192.4 0.7897610  1.734526
## 2005-03-01      1.272449    193.1 0.7926344  1.605342
## 2005-04-01      1.101141    193.7 0.7950972  1.384914
## 2005-05-01      1.214125    193.6 0.7946868  1.527803
## 2005-06-01      1.124043    193.7 0.7950972  1.413718
### method 1: calculate real returns using real price
price_t_real = dat$RealPrice[-length(dat$RealPrice)] # remove the last observation
price_diff_real = diff(x = dat$RealPrice,differences = 1) # calculate one period difference
price_diff_real = na.omit(price_diff_real) #remove NA value
realReturn_AAPL= price_diff_real/as.numeric(price_t_real)
names(realReturn_AAPL)="RealReturn_method1"
# print the first several rows
head(realReturn_AAPL)
##            RealReturn_method1
## 2005-02-01         0.16185878
## 2005-03-01        -0.07447815
## 2005-04-01        -0.13730913
## 2005-05-01         0.10317582
## 2005-06-01        -0.07467295
## 2005-07-01         0.15151847

1.4.13 Inflation Adjust. Ret. (v2)

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)

ticker = 'AAPL' # asset ticker
start_date = '2005-01-01' # data start date
end_date = '2021-02-01' # data end date
data_src = 'yahoo' # data source
freq = "monthly" # data frequency
# load data using `getSymbols`
getSymbols(ticker,from = start_date,
           to = end_date, src=data_src, periodicity = freq)
## [1] "AAPL"
### load CPI data from FRED
# set parameters
ticker = 'CPIAUCSL' # ticker for "Consumer Price Index for All Urban Consumers: All Items", can be found from FRED website
data_src = 'FRED' # data source
# load data using `getSymbols`
getSymbols(ticker, src=data_src)
## [1] "CPIAUCSL"
### merge CPI data and AAPL price
dat=merge(AAPL$AAPL.Adjusted, CPIAUCSL, join='left')
### method 2: calculate real returns by adjusting nominal returns
# calculate norminal returns
price_t = dat$AAPL.Adjusted[-length(dat$AAPL.Adjusted)] # remove the last observation
price_diff = diff(x = dat$AAPL.Adjusted,differences = 1) # calculate one period difference
price_diff = na.omit(price_diff) # remove NA value
Return_AAPL = price_diff/as.numeric(price_t)
names(Return_AAPL)="norminalReturn"

# calculate monthly inflation
CPI_diff = diff(x=dat$CPIAUCSL,differences =1) # calculate one period difference
CPI_diff = na.omit(CPI_diff) # remove NA
monthly_inflation = CPI_diff/as.numeric(dat$CPIAUCSL[-length(dat$CPIAUCSL)])
names(monthly_inflation) = "monthly_inflation"

# adjust nominal return to inflation: (1+R_norminal)=(1+R_real)(1+monthly inflation rate)
realReturn_AAPL_method2 = (1+Return_AAPL)/(1+monthly_inflation)-1
# print the first several rows
head(realReturn_AAPL_method2)
##            norminalReturn
## 2005-02-01     0.16185977
## 2005-03-01    -0.07447742
## 2005-04-01    -0.13730981
## 2005-05-01     0.10317673
## 2005-06-01    -0.07467289
## 2005-07-01     0.15151833

1.4.14 Annualize Simple Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)

ticker = 'AAPL' # asset ticker
start_date = '2005-01-01' # data start date
end_date = '2021-02-01' # data end date
data_src = 'yahoo' # data source
freq = "daily" # data frequency
# load data using `getSymbols`
getSymbols(ticker,from = start_date,
           to = end_date, src=data_src, periodicity = freq)
## [1] "AAPL"
Daily_AAPL = periodReturn(AAPL$AAPL.Adjusted,period = "daily",type = "arithmetic")

Annualized_APPL<-(1+Daily_AAPL)^(250/1)-1
todisplay<-cbind(Daily_AAPL,Annualized_APPL)
head(todisplay)
##            daily.returns daily.returns.1
## 2005-01-03  0.0000000000    0.000000e+00
## 2005-01-04  0.0102708929    1.186649e+01
## 2005-01-05  0.0087580205    7.846066e+00
## 2005-01-06  0.0007747782    2.136345e-01
## 2005-01-07  0.0728123373    4.275036e+07
## 2005-01-10 -0.0041879390   -6.497763e-01

Note: A number like 1.18e+01 can be read as 11.8.
Note: A number like .01 is 1%, so 11.8 is 1,180%

1.4.15 Annualize Log Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
Daily_MSFT = log(MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1))
Annualized_MSFT = Daily_MSFT*250
todisplay<-cbind(Daily_MSFT,Annualized_MSFT)
head(todisplay)
##            MSFT.Adjusted MSFT.Adjusted.1
## 2017-01-03            NA              NA
## 2017-01-04 -0.0044843698     -1.12109245
## 2017-01-05  0.0000000000      0.00000000
## 2017-01-06  0.0086304022      2.15760055
## 2017-01-09 -0.0031877977     -0.79694942
## 2017-01-10 -0.0003190916     -0.07977289

Note: A number like 1.18e+01 can be read as 11.8.
Note: A number like .01 is 1%, so 11.8 is 1,180%

1.4.16 Convert Daily to Weekly

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
MSFT<-MSFT$MSFT.Adjusted
colnames(MSFT)<-ticker
WeeklyPrices<-to.weekly(MSFT)
head(WeeklyPrices)
##            MSFT.Open MSFT.High MSFT.Low MSFT.Close
## 2017-01-06  58.06545  58.30669 57.80566   58.30669
## 2017-01-13  58.12112  58.63144 58.09329   58.17680
## 2017-01-20  58.01906  58.21391 57.80566   58.21391
## 2017-01-27  58.41805  61.03460 58.41805   61.03460
## 2017-02-03  60.43150  60.43150 58.61288   59.08610
## 2017-02-10  59.04898  59.43868 58.77063   59.38302
head(MSFT,n=10)
##                MSFT
## 2017-01-03 58.06545
## 2017-01-04 57.80566
## 2017-01-05 57.80566
## 2017-01-06 58.30669
## 2017-01-09 58.12112
## 2017-01-10 58.10257
## 2017-01-11 58.63144
## 2017-01-12 58.09329
## 2017-01-13 58.17680
## 2017-01-17 58.01906

CAUTION: The suffixes .Open,.High,.Low,.Close are NOT those respective metrics from the HLOCV dataset pulled from yahoo. For instance, .Open is the first day of the week, whereas .High is the highest daily price during the week.

1.4.17 Fill in Missing Dates

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
# Suppose observations were missing
MSFT$MSFT.Adjusted[c(3:5),]=NA
head(MSFT$MSFT.Adjusted)
##            MSFT.Adjusted
## 2017-01-03      58.06546
## 2017-01-04      57.80566
## 2017-01-05            NA
## 2017-01-06            NA
## 2017-01-09            NA
## 2017-01-10      58.10257
#Fill in with na.approx
FILLED<-na.approx(MSFT$MSFT.Adjusted)
head(FILLED)
##            MSFT.Adjusted
## 2017-01-03      58.06546
## 2017-01-04      57.80566
## 2017-01-05      57.85514
## 2017-01-06      57.90463
## 2017-01-09      58.05309
## 2017-01-10      58.10257

1.4.18 Returns for Multiple Assets

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers

1.4.19 Multi-Asset Total Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
Prices = as.xts(data.frame(F=F$F.Adjusted,GE=GE$GE.Adjusted, CAT=CAT$CAT.Adjusted))
colnames(Prices)<-Tickers
Assets_begPrice=as.numeric(head(Prices,n=1))
Assets_Total_return = tail(Prices,1)/Assets_begPrice - 1
Assets_Total_return
##                      F         GE      CAT
## 2021-04-29 0.001785599 -0.3721098 2.013257

1.4.20 Prices-Multiple Assets (v1)

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
colnames(Prices)<-Tickers

1.4.21 Prices-Multiple Assets (v2)

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
Prices = as.xts(data.frame(F=F$F.Adjusted,GE=GE$GE.Adjusted, CAT=CAT$CAT.Adjusted))
colnames(Prices)<-Tickers

1.5 Describing Returns

1.5.1 Mean of Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
MeanReturn = mean(na.omit(MSFT$Return))
print(MeanReturn)
## [1] 0.001485417

1.5.2 Var.& Std.Dev. of Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
VarianceOfReturns = var(na.omit(MSFT$Return))
StdDevOfReturns = sd(na.omit(MSFT$Return))
sprintf('Variance is %s ', VarianceOfReturns)
## [1] "Variance is 0.000327936237835957 "
sprintf('Std Deviation is %s ', StdDevOfReturns)
## [1] "Std Deviation is 0.0181090098524452 "
# Can also do var(MSFT$Returns,na.rm = TRUE)

1.5.3 Skew & Kurt. of Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
SkewnessOfReturns = skewness(na.omit(MSFT$Return))
KurtosisOfReturns = kurtosis(na.omit(MSFT$Return))
sprintf('Skewness is %s ', SkewnessOfReturns)
## [1] "Skewness is -0.0151103665863959 "
sprintf('Kurtosis is %s ', KurtosisOfReturns)
## [1] "Kurtosis is 10.830787995039 "

1.5.4 Min/Max of Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
MinReturn = min(na.omit(MSFT$Return))
MaxReturn = max(na.omit(MSFT$Return))
sprintf('Min is %s ', MinReturn)
## [1] "Min is -0.147390294354279 "
sprintf('Max is %s ', MaxReturn)
## [1] "Max is 0.142168887747612 "

1.5.5 Sample Statistics

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
table.Stats(MSFT$Return)
##                    Return
## Observations    1044.0000
## NAs                1.0000
## Minimum           -0.1474
## Quartile 1        -0.0057
## Median             0.0015
## Arithmetic Mean    0.0015
## Geometric Mean     0.0013
## Quartile 3         0.0098
## Maximum            0.1422
## SE Mean            0.0006
## LCL Mean (0.95)    0.0004
## UCL Mean (0.95)    0.0026
## Variance           0.0003
## Stdev              0.0181
## Skewness          -0.0151
## Kurtosis          10.8308
table.Distributions(MSFT$Return)
##                         Return
## daily  Std Dev          0.0181
## Skewness               -0.0151
## Kurtosis               13.8308
## Excess kurtosis        10.8308
## Sample skewness        -0.0152
## Sample excess kurtosis 10.8886

1.5.6 Histogram

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
chart.Histogram(MSFT$Return)

1.5.7 Hist. with Density Overlay

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
chart.Histogram(MSFT$Return, main = "Density", breaks=40,methods = c("add.density", "add.normal"))
legend("topright", c("density", "normal"), fill=c("blue4", "blue"))

1.5.8 Histogram with VaR

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
chart.Histogram(MSFT$Return, main = "Risk Measures", methods = c("add.risk"))

1.5.9 Hist. with Risk Boundaries

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
chart.Histogram(MSFT$Return,breaks=100)
threshold = .1
abline(v=-threshold, col="red",lty="dashed")
abline(v=threshold,  col="red",lty="dashed")

1.5.10 Relative Freq. Hist.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
library("HistogramTools")
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
PlotRelativeFrequency(hist(na.omit(MSFT$Return),breaks = 40,plot=FALSE),xlab="Return",main="Relative Frequency Histogram",col = "lightgray",border = "white", )

1.5.11 Order Rank

We can rank returns from big to small. The highest (or most favorable) return rank is 1 and the lowest (or least favorable) return rank is 252 in our example.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
library("HistogramTools")
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
MSFT$Return.Rank = rank(-as.numeric(MSFT$Return),na.last = "keep")
head(cbind(MSFT$Return,MSFT$Return.Rank))
##                   Return Return.Rank
## 2017-01-03            NA          NA
## 2017-01-04 -0.0044742609         745
## 2017-01-05  0.0000000000         594
## 2017-01-06  0.0086676985         285
## 2017-01-09 -0.0031826532         711
## 2017-01-10 -0.0003194363         608

1.5.12 Quantiles Historical Ret.

Quantile determines how many values in a distribution are above or below a certain limit, so it is important for performance analysis.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
library("HistogramTools")
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
Quantile = quantile(MSFT$Return,probs=seq(0,1,by=0.01),na.rm=TRUE)
print(Quantile[c(1:7,95:101)])
##          0%          1%          2%          3%          4%          5% 
## -0.14739038 -0.04885809 -0.04016747 -0.03185859 -0.02954018 -0.02781362 
##          6%         94%         95%         96%         97%         98% 
## -0.02437219  0.02428265  0.02583972  0.02848786  0.03296667  0.03736686 
##         99%        100% 
##  0.06078512  0.14216887

1.5.13 Find Date of Quantile

What is the date of the 80th percentile observation?

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
library("HistogramTools")
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
# calculate 80th percentage quantile return
Quantile80 = quantile(MSFT$Return,probs=0.8,na.rm=TRUE)
# find the data index with closest return to Quantile80
index80 = which.min(abs(Quantile80-MSFT$Return))
cat(paste("\n\nThe 80th percentile price:",Quantile80,"\nThe date found is:",index(MSFT)[index80],"\nThat day's return is:",MSFT$Return[index80]))
## 
## 
## The 80th percentile price: 0.0117884391742921 
## The date found is: 2019-11-08 
## That day's return is: 0.011784451267419

1.5.14 Percentile of Obs. on Date

What is the percentile of the observation on Mar.10, 2020?

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
library("HistogramTools")
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
TargetReturn = MSFT$Return["2020-03-10"]
# calculate its quantile
{EstimatedQuantile = 
  sum(na.omit(MSFT$Return)<as.numeric(TargetReturn))/length(na.omit(MSFT$Return))}
cat(paste("\n\nThe return on Mar 10 2020:",TargetReturn,"\nThe estimated quantile :",EstimatedQuantile))
## 
## 
## The return on Mar 10 2020: 0.068383787609047 
## The estimated quantile : 0.993295019157088

1.5.15 Visualize Outliers

This is a simple heuristic, not a formal rule. Your initial EDA can control the threshold you choose to set.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
library("HistogramTools")
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
threshold = .1 # use your discretion in setting this
plot(x=1:length(MSFT$Return),y=MSFT$Return,xlab="Index",ylab = "Return",main="Adjusted Simple Return",col=ifelse(abs(MSFT$Return) >= threshold, "red", "darkblue"))

1.5.16 Boxplot of Outliers

The observations in red are potential outliers since they lie >75th quantile and less than 25th quantile.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
chart.Boxplot(MSFT$Return,outlier.symbol="*",symbol.color =c("darkblue"),outcol="red")

1.5.17 Compute Z Scores

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
# calculate z-scores
MSFT = na.omit(MSFT)
MSFT$Return.Zscore = (MSFT$Return-mean(MSFT$Return))/sd(MSFT$Return)
head(MSFT$Return.Zscore)
##            Return.Zscore
## 2017-01-04   -0.32911052
## 2017-01-05   -0.08202640
## 2017-01-06    0.39660886
## 2017-01-09   -0.25776852
## 2017-01-10   -0.09966223
## 2017-01-11    0.42063656

1.5.18 DeMean

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return<-MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
MSFT = na.omit(MSFT)
MSFT$ReturnDeMean<-MSFT$Return-mean(MSFT$Return)
todisplay<-cbind(MSFT$Return,MSFT$ReturnDeMean)
head(todisplay)
##                   Return ReturnDeMean
## 2017-01-04 -0.0044741406 -0.005959558
## 2017-01-05  0.0000000000 -0.001485417
## 2017-01-06  0.0086674903  0.007182073
## 2017-01-09 -0.0031824478 -0.004667865
## 2017-01-10 -0.0003195051 -0.001804923
## 2017-01-11  0.0091026107  0.007617193

1.5.19 ReScale

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
upper = 1
lower = 0
MSFT$Return<-MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
MSFT = na.omit(MSFT)
MSFT$ReScaledReturn = lower+(MSFT$Return-min(MSFT$Return)*(upper-lower))/(max(MSFT$Return-min(MSFT$Return)))
#MSFT$ReScaledPrice = lower+(MSFT$MSFT.Adjusted-min(MSFT$MSFT.Adjusted)*(upper-lower))/(max(MSFT$MSFT.Adjusted-min(MSFT$MSFT.Adjusted)))
head(MSFT)
##            MSFT.Open MSFT.High MSFT.Low MSFT.Close MSFT.Volume MSFT.Adjusted
## 2017-01-04     62.48     62.75    62.12      62.30    21340000      57.80566
## 2017-01-05     62.19     62.66    62.03      62.30    24876000      57.80566
## 2017-01-06     62.30     63.15    62.04      62.84    19922900      58.30670
## 2017-01-09     62.76     63.08    62.54      62.64    20382700      58.12112
## 2017-01-10     62.73     63.07    62.28      62.62    18593000      58.10257
## 2017-01-11     62.61     63.23    62.43      63.19    21517300      58.63144
##                   Return ReScaledReturn
## 2017-01-04 -0.0044742095      0.4935643
## 2017-01-05  0.0000000000      0.5090161
## 2017-01-06  0.0086676985      0.5389502
## 2017-01-09 -0.0031827904      0.4980242
## 2017-01-10 -0.0003191611      0.5079139
## 2017-01-11  0.0091023340      0.5404512

1.5.20 Compute IQR

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
# calculate z-scores
MSFT = na.omit(MSFT)
# calculate IQR for history returns
Q3=quantile(MSFT$Return,probs = 0.75)
Q1=quantile(MSFT$Return,probs = 0.25)
IQR = Q3 - Q1
IQR
##        75% 
## 0.01550025
# pick observations beyond 2*IQR from the Q1 and Q3
IQR_threshold = 2
IQR_outlier_ind =  which(MSFT$Return>Q3+IQR_threshold*IQR|MSFT$Return<Q1-IQR_threshold*IQR)
# print outliers
print(MSFT$Return[IQR_outlier_ind])
##                 Return
## 2017-10-27  0.06411897
## 2017-12-04 -0.03774028
## 2018-02-05 -0.04118512
## 2018-02-08 -0.05133347
## 2018-03-26  0.07570538
## 2018-03-27 -0.04595871
## 2018-10-10 -0.05433819
## 2018-10-24 -0.05346882
## 2018-10-25  0.05844396
## 2018-12-07 -0.04002181
## 2018-12-24 -0.04173873
## 2018-12-26  0.06830979
## 2019-01-03 -0.03678824
## 2019-01-04  0.04650942
## 2020-02-24 -0.04311538
## 2020-02-27 -0.07045878
## 2020-03-02  0.06653917
## 2020-03-03 -0.04791943
## 2020-03-09 -0.06777264
## 2020-03-10  0.06838411
## 2020-03-11 -0.04530201
## 2020-03-12 -0.09483820
## 2020-03-13  0.14216876
## 2020-03-16 -0.14739029
## 2020-03-17  0.08233657
## 2020-03-18 -0.04209612
## 2020-03-20 -0.03755873
## 2020-03-24  0.09089555
## 2020-03-26  0.06255128
## 2020-03-27 -0.04106110
## 2020-03-30  0.07034087
## 2020-04-06  0.07436769
## 2020-04-14  0.04948328
## 2020-04-21 -0.04135721
## 2020-04-29  0.04487355
## 2020-06-11 -0.05369837
## 2020-07-20  0.04298099
## 2020-07-23 -0.04349472
## 2020-08-03  0.05624121
## 2020-09-03 -0.06194681
## 2020-09-08 -0.05409560
## 2020-09-09  0.04258345
## 2020-10-28 -0.04956626
## 2020-11-04  0.04824887

1.5.21 Drop Outliers

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
# calculate z-scores
MSFT = na.omit(MSFT)
# calculate IQR for history returns
Q3=quantile(MSFT$Return,probs = 0.75)
Q1=quantile(MSFT$Return,probs = 0.25)
IQR = Q3 - Q1
# pick observations beyond 2*IQR from the Q1 and Q3
IQR_threshold = 2
IQR_outlier_ind =  which(MSFT$Return>Q3+IQR_threshold*IQR|MSFT$Return<Q1-IQR_threshold*IQR)
clean_MSFT = MSFT[-IQR_outlier_ind]

1.5.22 Replace Outliers with Mean

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
# calculate z-scores
MSFT = na.omit(MSFT)
# calculate IQR for history returns
Q3=quantile(MSFT$Return,probs = 0.75)
Q1=quantile(MSFT$Return,probs = 0.25)
IQR = Q3 - Q1
# pick observations beyond 2*IQR from the Q1 and Q3
IQR_threshold = 2
IQR_outlier_ind =  which(MSFT$Return>Q3+IQR_threshold*IQR|MSFT$Return<Q1-IQR_threshold*IQR)
MSFT$CleanReturn= MSFT$Return
newind = c(207,232)
MSFT$CleanReturn[IQR_outlier_ind]<-mean(MSFT$Return)
head(MSFT[IQR_outlier_ind])
##            MSFT.Open MSFT.High MSFT.Low MSFT.Close MSFT.Volume MSFT.Adjusted
## 2017-10-27     84.37     86.20    83.61      83.81    71066700      79.10301
## 2017-12-04     84.42     84.43    80.70      81.08    39094900      76.91067
## 2018-02-05     90.56     93.24    88.00      88.00    51031500      83.47482
## 2018-02-08     89.71     89.88    84.76      85.01    55628700      80.63857
## 2018-03-26     90.61     94.00    90.40      93.78    56396800      89.37547
## 2018-03-27     94.94     95.14    88.51      89.47    56569000      85.26790
##                 Return CleanReturn
## 2017-10-27  0.06411886 0.001485417
## 2017-12-04 -0.03774019 0.001485417
## 2018-02-05 -0.04118547 0.001485417
## 2018-02-08 -0.05133355 0.001485417
## 2018-03-26  0.07570519 0.001485417
## 2018-03-27 -0.04595854 0.001485417

1.5.23 Replace Outliers with Quant.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
# calculate z-scores
MSFT = na.omit(MSFT)
# calculate IQR for history returns
Q3=quantile(MSFT$Return,probs = 0.75)
Q1=quantile(MSFT$Return,probs = 0.25)
IQR = Q3 - Q1
# pick observations beyond 2*IQR from the Q1 and Q3
IQR_threshold = 2
IQR_outlier_ind =  which(MSFT$Return>Q3+IQR_threshold*IQR|MSFT$Return<Q1-IQR_threshold*IQR)
trimfun <- function(x){
    quantiles <- quantile( x, c(.25, .75 ) )
    x[ x < quantiles[1] ] <- quantiles[1]
    x[ x > quantiles[2] ] <- quantiles[2]
    x
}
MSFT$CleanReturn = trimfun(MSFT$Return)

Notice that the min and max are equal to Q1 and Q3, respectively, confirming that we’ve trimmed appropriately.

1.5.24 Rolling Mean Return

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
# calculate z-scores
MSFT = na.omit(MSFT)
library(PerformanceAnalytics)
width_n = 250 # window length = 1 year
by_n = 1 # update(recalculate) frequency = everyday 
MSFT$RollingReturn250D = apply.rolling(MSFT$Return, width = width_n, by = by_n,FUN="mean")
tail(cbind(MSFT$Return,MSFT$RollingReturn250D))
##                  Return RollingReturn250D
## 2021-02-19 -0.011567249       0.001796962
## 2021-02-22 -0.026808318       0.001755736
## 2021-02-23 -0.005287555       0.001684606
## 2021-02-24  0.005487186       0.001988392
## 2021-02-25 -0.023705024       0.001796720
## 2021-02-26  0.014804206       0.001589780
chart_Series(MSFT$RollingReturn250D, name = "Mean Daily Return over trailing 250D")

1.5.25 Rolling Std of Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
# calculate z-scores
MSFT = na.omit(MSFT)
library(PerformanceAnalytics)
width_n = 250 # window length = 1 year
by_n = 1 # update(recalculate) frequency = everyday 
MSFT$RollingSD = apply.rolling(MSFT$Return, width = width_n, by = by_n,FUN="sd")
chart_Series(MSFT$RollingSD, name = "Sigma over trailing 250D")

1.5.26 Rolling Skewness of Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
# calculate z-scores
MSFT = na.omit(MSFT)
library(PerformanceAnalytics)
width_n = 250 # window length = 1 year
by_n = 1 # update(recalculate) frequency = everyday 
MSFT$RollingSkew = apply.rolling(MSFT$Return, width = width_n, by = by_n,FUN="skewness")
chart_Series(MSFT$RollingSkew, name = "Skew over trailing 250D")

1.5.27 Rolling Kurtosis of Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
# calculate z-scores
MSFT = na.omit(MSFT)
library(PerformanceAnalytics)
width_n = 250 # window length = 1 year
by_n = 1 # update(recalculate) frequency = everyday 
MSFT$RollingKurt = apply.rolling(MSFT$Return, width = width_n, by = by_n,FUN="kurtosis")
chart_Series(MSFT$RollingKurt, name = "Kurtosis over trailing 250D")

1.5.28 Rolling Total Return

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return = MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
# calculate z-scores
MSFT = na.omit(MSFT)
library(PerformanceAnalytics)
width_n = 250 # window length 
by_n = 1 # update(recalculate) frequency = everyday 
MSFT$GrossReturn<-MSFT$Return+1
MSFT$RollingTR = apply.rolling(MSFT$GrossReturn, width = width_n, by = by_n,FUN="prod")-1
chart_Series(MSFT$RollingTR)

1.5.29 Create Calendar Dummies

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
Returns = periodReturn(MSFT$MSFT.Adjusted,period = 'daily',type = 'log')
library(lubridate)
Returns$Q1<-ifelse(quarter(index(Returns))==1,1,0)
plot(Returns$Q1)

Returns$Summer<- ifelse(month(index(Returns))==6 | month(index(Returns))==7 | month(index(Returns))==8,1,0)
plot(Returns$Summer)

1.5.30 Create Dummies by Value

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
Returns = periodReturn(MSFT$MSFT.Adjusted,period = 'daily',type = 'log')
colnames(Returns)<-ticker
Quant75=quantile(Returns,.75)
Returns$Big<-ifelse(Returns>Quant75,1,0)
head(Returns[Returns$MSFT>Quant75])
##                  MSFT Big
## 2017-01-27 0.02322285   1
## 2017-02-09 0.01130305   1
## 2017-02-13 0.01118728   1
## 2017-03-01 0.01489316   1
## 2017-03-22 0.01268975   1
## 2017-04-21 0.01364681   1
head(Returns[Returns$MSFT<Quant75])
##                     MSFT Big
## 2017-01-03  0.0000000000   0
## 2017-01-04 -0.0044842320   0
## 2017-01-05  0.0000000000   0
## 2017-01-06  0.0086304708   0
## 2017-01-09 -0.0031875910   0
## 2017-01-10 -0.0003196939   0

1.5.31 Test Equal Mean Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
t.test(Returns$DAX,Returns$CAC,var.equal=TRUE)
## 
##  Two Sample t-test
## 
## data:  Returns$DAX and Returns$CAC
## t = 0.61417, df = 3716, p-value = 0.5391
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -0.0004713134  0.0009012890
## sample estimates:
##    mean of x    mean of y 
## 0.0006520417 0.0004370540
t.test(Returns$DAX,Returns$CAC) # If variances unequal
## 
##  Welch Two Sample t-test
## 
## data:  Returns$DAX and Returns$CAC
## t = 0.61417, df = 3698.7, p-value = 0.5391
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -0.0004713145  0.0009012900
## sample estimates:
##    mean of x    mean of y 
## 0.0006520417 0.0004370540

1.5.32 Test Mean Ret. One-Sided

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
t.test(Returns$DAX,Returns$CAC,var.equal=TRUE,alternative = 'greater')
## 
##  Two Sample t-test
## 
## data:  Returns$DAX and Returns$CAC
## t = 0.61417, df = 3716, p-value = 0.2696
## alternative hypothesis: true difference in means is greater than 0
## 95 percent confidence interval:
##  -0.0003609303           Inf
## sample estimates:
##    mean of x    mean of y 
## 0.0006520417 0.0004370540

1.5.33 Test Equal Variance Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
Obs<-index(Returns)
Returns$Obs<-Obs
Returns<-subset(Returns,select=-c(SMI,FTSE))
StackedReturns<-cbind(Returns$Obs,stack(Returns[1:2]))
var.test(values~ind,data=StackedReturns)
## 
##  F test to compare two variances
## 
## data:  values by ind
## F = 0.87202, num df = 1858, denom df = 1858, p-value = 0.003178
## alternative hypothesis: true ratio of variances is not equal to 1
## 95 percent confidence interval:
##  0.7961927 0.9550629
## sample estimates:
## ratio of variances 
##          0.8720172

1.5.34 Return Correlation

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
ReturnCorMatrix = cor(Returns)
print(ReturnCorMatrix)
##            F        GE       CAT
## F   1.000000 0.5160320 0.5337420
## GE  0.516032 1.0000000 0.4983247
## CAT 0.533742 0.4983247 1.0000000

1.5.35 Visualize Correlations (v1)

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PerformanceAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
chart.Correlation(Returns)

1.5.36 Visualize Correlations (v2)

The corrplot() function from package allows to draw other fancy correlation graphs(go for this notebook for more examples)

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(corrplot)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
ReturnCorMatrix = cor(Returns)
corrplot(ReturnCorMatrix, method="circle")

1.5.37 Rolling Correlations

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(corrplot)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
cor2 <- function(x) cor(x[,1], x[,2])
# correlation of trailing 20day window
RollCorM60D = rollapply(data = Returns[,c("F","GE")], width = 60,FUN=cor2, by.column = FALSE)
names(RollCorM60D) = "60D"

1.5.38 Covariance of Returns

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(corrplot)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
CovMatrix<-cov(Returns)
CovMatrix
##                F           GE          CAT
## F   0.0004277850 0.0002526818 0.0002115286
## GE  0.0002526818 0.0005604924 0.0002260591
## CAT 0.0002115286 0.0002260591 0.0003671553

2 Explain

2.1 Seasonality & Smoothing

2.1.1 Seasonal Decomposition

rm(list=ls()) # clear workspace
cat("\014")  # clear console
ticker="FRED/TOTALNSA"
startdate<-"1980-01-01"
library(Quandl)
quandl_api_key<-read.csv("../data/quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(quandl_api_key)
VehicleNSA <- Quandl(ticker,start_date=startdate,type="ts")
plot(VehicleNSA)

decomp<-decompose(VehicleNSA)
plot(decomp)

2.1.2 Seasonal Adjustment

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(fredr)
fred_api_key<-read.csv("./fredkey.csv",stringsAsFactors=FALSE)
fredr_set_key(fred_api_key$Key)
ticker="FRED/TOTALNSA"
startdate<-"1980-01-01"
library(Quandl)
VehicleNSA=Quandl(ticker,start_date=startdate,type="ts")
plot(VehicleNSA)

decomp<-decompose(VehicleNSA)
VehicleSA<-decomp$x-decomp$seasonal
Vehicle<-cbind(as.xts(VehicleNSA),as.xts(VehicleSA))
colnames(Vehicle)<-c("NSA","SA")
plot(Vehicle,legend.loc = "topleft")

2.1.3 Simple Moving Average

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(fredr)
fred_api_key<-read.csv("./fredkey.csv",stringsAsFactors=FALSE)
fredr_set_key(fred_api_key$Key)
ticker="FRED/TOTALNSA"
startdate<-"1980-01-01"
library(Quandl)
VehicleNSA=Quandl(ticker,start_date=startdate,type="ts")
library(TTR)
Smoothed<-SMA(VehicleNSA,n=12)
toplot<-cbind(as.xts(VehicleNSA),as.xts(Smoothed))
colnames(toplot)<-c("NSA","Smoothed")
plot(toplot,legend.loc="topleft")

2.2 Regression

2.2.1 Univariate Regression

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC,Returns) #Run Regression
print(summary(reg1))
## 
## Call:
## lm(formula = DAX ~ CAC, data = Returns)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.044676 -0.004302 -0.000076  0.004093  0.040100 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 0.0003523  0.0001623    2.17   0.0301 *  
## CAC         0.6858248  0.0147070   46.63   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.006993 on 1857 degrees of freedom
## Multiple R-squared:  0.5394, Adjusted R-squared:  0.5391 
## F-statistic:  2175 on 1 and 1857 DF,  p-value: < 2.2e-16

2.2.2 Multivariate Regression

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC+FTSE,Returns) #Run Regression
print(summary(reg1))
## 
## Call:
## lm(formula = DAX ~ CAC + FTSE, data = Returns)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.046142 -0.003848 -0.000009  0.003987  0.033370 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 0.0002694  0.0001542   1.747   0.0807 .  
## CAC         0.5152838  0.0183380  28.099   <2e-16 ***
## FTSE        0.3644971  0.0254199  14.339   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.006637 on 1856 degrees of freedom
## Multiple R-squared:  0.5853, Adjusted R-squared:  0.5849 
## F-statistic:  1310 on 2 and 1856 DF,  p-value: < 2.2e-16

2.2.3 Regress w/Interactions

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC:FTSE,Returns) #Run Regression
reg2<-lm(DAX~CAC*FTSE,Returns) # Run Regression

2.2.4 Regress w/Calendar Dummy

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
Tickers = c('MSFT','GE') # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(Tickers,from = start_date,to = end_date, src=data_src)
## [1] "MSFT" "GE"
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='log')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
library(lubridate)
Returns$Q1<-ifelse(quarter(index(Returns))==1,1,0)
reg1=lm(MSFT~GE*Q1,data=Returns)
print(summary(reg1))
## 
## Call:
## lm(formula = MSFT ~ GE * Q1, data = Returns)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.088806 -0.007493  0.000016  0.008366  0.095418 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  1.460e-03  5.973e-04   2.445   0.0147 *  
## GE           9.620e-02  2.342e-02   4.108  4.3e-05 ***
## Q1          -2.437e-05  1.147e-03  -0.021   0.9831    
## GE:Q1        3.423e-01  3.848e-02   8.894  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.01647 on 1040 degrees of freedom
## Multiple R-squared:  0.1766, Adjusted R-squared:  0.1742 
## F-statistic: 74.36 on 3 and 1040 DF,  p-value: < 2.2e-16

2.2.5 Regression Plots

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC,Returns) #Run Regression
plot(DAX~CAC,data=Returns)
abline(reg1)

2.2.6 Regression Plots(v2)

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC,Returns) #Run Regression
#par(mfrow=c(3,2))
plot(reg1,which=c(1))

plot(reg1,which=c(2))

plot(reg1,which=c(3))

plot(reg1,which=c(4))

plot(reg1,which=c(5))

plot(reg1,which=c(6))

2.2.7 Extract Regress. Coef.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC,Returns) #Run Regression
print('Coefficients')
## [1] "Coefficients"
coef(reg1)
##  (Intercept)          CAC 
## 0.0003522993 0.6858247625
alpha=coef(reg1)[1]
beta=coef(reg1)[2]
print('Confidence intervals')
## [1] "Confidence intervals"
confint(reg1)
##                    2.5 %      97.5 %
## (Intercept) 3.396064e-05 0.000670638
## CAC         6.569808e-01 0.714668761
print('Covariance')
## [1] "Covariance"
vcov(reg1)
##               (Intercept)           CAC
## (Intercept)  2.634610e-08 -9.453302e-08
## CAC         -9.453302e-08  2.162960e-04

2.2.8 Extract Regression Residuals

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC,Returns) #Run Regression
Errors<-resid(reg1)
plot(Errors)

#or
Errors2<-Returns$DAX-reg1$fitted.values
head(cbind(Errors,Errors2))
##          Errors       Errors2
## 2 -0.0009971609 -0.0009971609
## 3  0.0080783190  0.0080783190
## 4  0.0126150011  0.0126150011
## 5 -0.0081269243 -0.0081269243
## 6 -0.0015174787 -0.0015174787
## 7  0.0040407495  0.0040407495

2.2.9 Regression Fitted Values

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC,Returns) #Run Regression
Estimates<-fitted(reg1)
#or
Estimates2<-coef(reg1)[1]+coef(reg1)[2]*Returns$CAC
head(cbind(Estimates,Estimates2))
##      Estimates   Estimates2
## 2 -0.008329389 -0.008329389
## 3 -0.012500494 -0.012500494
## 4 -0.003611207 -0.003611207
## 5  0.006348707  0.006348707
## 6 -0.003159233 -0.003159233
## 7  0.008386293  0.008386293

2.2.10 Regression Predictions

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
newCAC<-data.frame(CAC=seq(-.05,.05,.01))
PredictedModel<-predict(lm(DAX~CAC,Returns),newCAC,se.fit=TRUE)
cbind(newCAC,PredictedModel$fit,PredictedModel$se.fit)
##      CAC PredictedModel$fit PredictedModel$se.fit
## 1  -0.05      -0.0339389388          0.0007593019
## 2  -0.04      -0.0270806912          0.0006164270
## 3  -0.03      -0.0202224436          0.0004761139
## 4  -0.02      -0.0133641959          0.0003415345
## 5  -0.01      -0.0065059483          0.0002233078
## 6   0.00       0.0003522993          0.0001623148
## 7   0.01       0.0072105469          0.0002146743
## 8   0.02       0.0140687946          0.0003302774
## 9   0.03       0.0209270422          0.0004640479
## 10  0.04       0.0277852898          0.0006040340
## 11  0.05       0.0346435374          0.0007467481

2.2.11 Test Parameter Significance

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC,Returns) #Run Regression
print(summary(reg1))
## 
## Call:
## lm(formula = DAX ~ CAC, data = Returns)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.044676 -0.004302 -0.000076  0.004093  0.040100 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 0.0003523  0.0001623    2.17   0.0301 *  
## CAC         0.6858248  0.0147070   46.63   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.006993 on 1857 degrees of freedom
## Multiple R-squared:  0.5394, Adjusted R-squared:  0.5391 
## F-statistic:  2175 on 1 and 1857 DF,  p-value: < 2.2e-16
h0=0 # set to your hypotehsized value
tstat = (summary(reg1)$coefficients[1,1]-h0)/summary(reg1)$coefficients[1,2]
pvalue = pt(q=abs(tstat),df = reg1$df,lower.tail = FALSE)*2
pvalue
## [1] 0.03009767

2.2.12 Test Param. Sig. One-sided

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC,Returns) #Run Regression
print(summary(reg1))
## 
## Call:
## lm(formula = DAX ~ CAC, data = Returns)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.044676 -0.004302 -0.000076  0.004093  0.040100 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 0.0003523  0.0001623    2.17   0.0301 *  
## CAC         0.6858248  0.0147070   46.63   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.006993 on 1857 degrees of freedom
## Multiple R-squared:  0.5394, Adjusted R-squared:  0.5391 
## F-statistic:  2175 on 1 and 1857 DF,  p-value: < 2.2e-16
print('H0: slope > 0')
## [1] "H0: slope > 0"
h0=0 # set to your hypothesized value
tstat = (summary(reg1)$coefficients[1,1]-h0)/summary(reg1)$coefficients[1,2]
pvalue = pt(q=tstat,df = reg1$df,lower.tail = TRUE)
pvalue
## [1] 0.9849512
print('H0: slope < 0')
## [1] "H0: slope < 0"
h0=0 # set to your hypothesized value
tstat = (summary(reg1)$coefficients[1,1]-h0)/summary(reg1)$coefficients[1,2]
pvalue = 1-pt(q=tstat,df = reg1$df,lower.tail = TRUE)
pvalue
## [1] 0.01504883
pvalue = pt(q=tstat,df = reg1$df,lower.tail = FALSE)
pvalue
## [1] 0.01504883

2.2.13 Test Joint Hypothesis

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC+FTSE,Returns) #Run Regression
print(summary(reg1))
## 
## Call:
## lm(formula = DAX ~ CAC + FTSE, data = Returns)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.046142 -0.003848 -0.000009  0.003987  0.033370 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 0.0002694  0.0001542   1.747   0.0807 .  
## CAC         0.5152838  0.0183380  28.099   <2e-16 ***
## FTSE        0.3644971  0.0254199  14.339   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.006637 on 1856 degrees of freedom
## Multiple R-squared:  0.5853, Adjusted R-squared:  0.5849 
## F-statistic:  1310 on 2 and 1856 DF,  p-value: < 2.2e-16
library(car)
# Test if the coefficients are equal
linearHypothesis(reg1,c("CAC=FTSE"))
## Linear hypothesis test
## 
## Hypothesis:
## CAC - FTSE = 0
## 
## Model 1: restricted model
## Model 2: DAX ~ CAC + FTSE
## 
##   Res.Df      RSS Df  Sum of Sq      F    Pr(>F)    
## 1   1857 0.082383                                   
## 2   1856 0.081752  1 0.00063101 14.326 0.0001586 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Test if the coefficients take specific values jointly
linearHypothesis(reg1,c("CAC=0","FTSE=.36"))
## Linear hypothesis test
## 
## Hypothesis:
## CAC = 0
## FTSE = 0.36
## 
## Model 1: restricted model
## Model 2: DAX ~ CAC + FTSE
## 
##   Res.Df      RSS Df Sum of Sq   F    Pr(>F)    
## 1   1858 0.142273                               
## 2   1856 0.081752  2  0.060521 687 < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

2.2.14 Test Joint Hyp. Wald

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC,Returns) #Restricted
reg2<-lm(DAX~.,Returns) #Unrestricted
anova(reg1,reg2)
## Analysis of Variance Table
## 
## Model 1: DAX ~ CAC
## Model 2: DAX ~ SMI + CAC + FTSE
##   Res.Df      RSS Df Sum of Sq      F    Pr(>F)    
## 1   1857 0.090808                                  
## 2   1855 0.067909  2    0.0229 312.76 < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

2.2.15 Variance Infl. Factor

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~.,Returns) #Run Regression for all assets ("." gives all assets not DAX)
library(car)
vif(reg1)
##      SMI      CAC     FTSE 
## 1.781686 2.023629 1.908167
#VIF>5 indicates multicollinearity

2.2.16 BPTest-Heteroscedasticity

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~.,Returns) #Run Regression for all assets ("." gives all assets not DAX)
library(lmtest)
#H0:Homoscedasticity (i.e. var(residuals)=constant)
bptest(reg1)
## 
##  studentized Breusch-Pagan test
## 
## data:  reg1
## BP = 7.3734, df = 3, p-value = 0.0609

2.2.17 White StdErr. Hetero.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC+FTSE,Returns) #Run Regression
print(summary(reg1))
## 
## Call:
## lm(formula = DAX ~ CAC + FTSE, data = Returns)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.046142 -0.003848 -0.000009  0.003987  0.033370 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 0.0002694  0.0001542   1.747   0.0807 .  
## CAC         0.5152838  0.0183380  28.099   <2e-16 ***
## FTSE        0.3644971  0.0254199  14.339   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.006637 on 1856 degrees of freedom
## Multiple R-squared:  0.5853, Adjusted R-squared:  0.5849 
## F-statistic:  1310 on 2 and 1856 DF,  p-value: < 2.2e-16
library(car)
# Examine CAC 
linearHypothesis(reg1,c("CAC=0"))
## Linear hypothesis test
## 
## Hypothesis:
## CAC = 0
## 
## Model 1: restricted model
## Model 2: DAX ~ CAC + FTSE
## 
##   Res.Df      RSS Df Sum of Sq      F    Pr(>F)    
## 1   1857 0.116530                                  
## 2   1856 0.081752  1  0.034778 789.56 < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Examine CAC with hetero robust std errors
linearHypothesis(reg1,c("CAC=0"),white.adjust = TRUE)
## Linear hypothesis test
## 
## Hypothesis:
## CAC = 0
## 
## Model 1: restricted model
## Model 2: DAX ~ CAC + FTSE
## 
## Note: Coefficient covariance matrix supplied.
## 
##   Res.Df Df      F    Pr(>F)    
## 1   1857                        
## 2   1856  1 274.82 < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

2.2.18 Hetero.Robust T-Test

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC+FTSE,Returns) #Run Regression
print(summary(reg1))
## 
## Call:
## lm(formula = DAX ~ CAC + FTSE, data = Returns)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.046142 -0.003848 -0.000009  0.003987  0.033370 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 0.0002694  0.0001542   1.747   0.0807 .  
## CAC         0.5152838  0.0183380  28.099   <2e-16 ***
## FTSE        0.3644971  0.0254199  14.339   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.006637 on 1856 degrees of freedom
## Multiple R-squared:  0.5853, Adjusted R-squared:  0.5849 
## F-statistic:  1310 on 2 and 1856 DF,  p-value: < 2.2e-16
library("lmtest")
library("sandwich")
coeftest(reg1, vcov = vcovHC(reg1, type = "HC0"))
## 
## t test of coefficients:
## 
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 0.00026938 0.00015441  1.7446  0.08122 .  
## CAC         0.51528381 0.03061855 16.8291  < 2e-16 ***
## FTSE        0.36449714 0.03611028 10.0940  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

2.2.19 Hetero.Robust Wald Test

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC,Returns) #Restricted
reg2<-lm(DAX~.,Returns) #Unrestricted
anova(reg1,reg2)
## Analysis of Variance Table
## 
## Model 1: DAX ~ CAC
## Model 2: DAX ~ SMI + CAC + FTSE
##   Res.Df      RSS Df Sum of Sq      F    Pr(>F)    
## 1   1857 0.090808                                  
## 2   1855 0.067909  2    0.0229 312.76 < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
library(lmtest)
library(sandwich)
waldtest(reg1, reg2, vcov = vcovHC(reg2, type = "HC0"))
## Wald test
## 
## Model 1: DAX ~ CAC
## Model 2: DAX ~ SMI + CAC + FTSE
##   Res.Df Df     F    Pr(>F)    
## 1   1857                       
## 2   1855  2 170.3 < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

2.2.20 Test for serial correlation

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC+FTSE,Returns) #Run Regression
library(lmtest)
dwtest(reg1) #Durbin Watson
## 
##  Durbin-Watson test
## 
## data:  reg1
## DW = 1.9577, p-value = 0.1799
## alternative hypothesis: true autocorrelation is greater than 0

2.2.21 Test for serial correlation 2

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC+FTSE,Returns) #Run Regression
library(lmtest)
#perform Breusch-Godfrey test
# H0: no serial correlation up to indicated order
bgtest(reg1, order=3)
## 
##  Breusch-Godfrey test for serial correlation of order up to 3
## 
## data:  reg1
## LM test = 0.80293, df = 3, p-value = 0.8488

2.2.22 Test for serial correlation 3

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC+FTSE,Returns) #Run Regression
library(lmtest)
# H0: errors are independent; no serial correlation
Box.test(reg1$residuals, lag=5,type="Ljung")
## 
##  Box-Ljung test
## 
## data:  reg1$residuals
## X-squared = 0.9576, df = 5, p-value = 0.9659

2.2.23 Fixing Serial correlation

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(PortfolioAnalytics)
data("EuStockMarkets")
Prices<-as.data.frame(EuStockMarkets)
T = dim(Prices)[1]
Returns<-log(Prices[2:T,]/Prices[1:T-1,])
reg1<-lm(DAX~CAC+FTSE,Returns) #Run Regression
reg1summary<-summary(reg1)
#Here, we will  use newey west robust errors. The pre-whitening and adjust are set to F, and T respectively  to ensure the proper formula and small sample adjustments are made. 
  #https://www.econometrics-with-r.org/15-4-hac-standard-errors.html
library(sandwich)
#NW_VCOV_msft <- NeweyWest(lm(as.numeric(msftXS)~as.numeric(spyXS)), prewhite = F, adjust = T)
NW_VCOV <- NeweyWest(reg1, prewhite = F, adjust = T)

#compute standard errors 
hac_err_CAC=sqrt(diag(NW_VCOV))[2] # the [2] is to retrieve the second element, which corresponds to the "independent" variable. 
#Compare the standard Errors
cbind(reg1summary$coefficients[2,2],hac_err_CAC)
##                hac_err_CAC
## CAC 0.01833804  0.03779094
#Compute new t-stat
newCACt=(reg1$coefficients['CAC']-0)/hac_err_CAC
#Compare the t-stats
cbind(reg1summary$coefficients[2,3],newCACt)
##               newCACt
## CAC 28.09917 13.63512

2.3 Equilibrium Pricing Models

2.3.1 CAPM

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
startd<-"2015-12-01"
endd<-"2020-12-31"
freq<-"monthly"
tickers_code <- c("MSFT","^GSPC","TB4WK") # GSPC=SP500; TB4WK=1mt Treasury Yield
getSymbols(tickers_code[1:2],from = startd, to =endd, periodicity = freq, src = 'yahoo')
## [1] "MSFT"  "^GSPC"
getSymbols(tickers_code[3],src = 'FRED')
## [1] "TB4WK"
### Data processing
tickers = gsub("[[:punct:]]", "", tickers_code)
# Prices
Price = do.call(merge, lapply(tickers[1:2], function(x) Ad(get(x))))
names(Price) = lapply(tickers[1:2], function(x) paste(x,".Price",sep=""))
# Returns
{Return = do.call(merge,lapply(Price, function(x) 
        periodReturn(x,period='monthly',type='log')))}
names(Return) = lapply(tickers[1:2], function(x) paste(x,".Return",sep=""))
# Risk free rate
Rf = TB4WK["2016-01-01/2020-12-31"] # this is an annual rate by default
Rf = (Rf/100+1)^(1/12)-1 # convert to monthly date
Rf = log(1+Rf) # convert to log
names(Rf) = "Rf"
Asset = do.call(merge,list(Price,Return,Rf))### merge data together
Asset = na.omit(Asset)#clean NA's
Asset$GSPC.ExcessReturn=Asset$GSPC.Return-Asset$Rf
Asset$MSFT.ExcessReturn=Asset$MSFT.Return-Asset$Rf
CAPMModel=lm(MSFT.ExcessReturn~GSPC.ExcessReturn,data=Asset) #run CAPM
print(summary(CAPMModel))
## 
## Call:
## lm(formula = MSFT.ExcessReturn ~ GSPC.ExcessReturn, data = Asset)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.120639 -0.023982  0.004723  0.021406  0.076649 
## 
## Coefficients:
##                   Estimate Std. Error t value Pr(>|t|)    
## (Intercept)       0.016359   0.005011   3.265  0.00184 ** 
## GSPC.ExcessReturn 0.802098   0.111760   7.177 1.48e-09 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.03798 on 58 degrees of freedom
## Multiple R-squared:  0.4704, Adjusted R-squared:  0.4612 
## F-statistic: 51.51 on 1 and 58 DF,  p-value: 1.478e-09

2.3.2 CAPM Many Assets

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)

startd<-"2015-12-01"
endd<-"2020-12-31"
freq<-"monthly"
tickers_code <- c("IBM","AAPL","GOOG","FB","MSFT","^GSPC","TB4WK") # GSPC=SP500; TB4WK=1mt Treasury Yield
getSymbols(tickers_code[1:6],from = startd, to =endd, periodicity = freq, src = 'yahoo')
## [1] "IBM"   "AAPL"  "GOOG"  "FB"    "MSFT"  "^GSPC"
getSymbols(tickers_code[7],src = 'FRED')
## [1] "TB4WK"
library(tidyverse)
# risk free rate
Rf = TB4WK["2016-01-01/2020-03-31"] # annual rate
Rf = (Rf/100+1)^(1/12)-1 # convert to month rate
Rf = log(1+Rf) # converting to log returns
names(Rf) = "Rf"
# market excess return
ExcessReturn.Market = data.frame(periodReturn(Ad(get("GSPC")),
                                period = "monthly",type='log')[-1,]-Rf)
# stocks' excess return
df <- tibble(Ticker = tickers_code[1:5],
             ExcessReturn.Stock = do.call(c,lapply(Ticker, function(x) 
               data.frame(periodReturn(Ad(get(x)),type='log')[-1,]-Rf))),
             ExcessReturn.Market = rep(ExcessReturn.Market,5),
             #Date = index(Rf)
             Date = do.call(c,lapply(Ticker, function(x) (list(index(Rf)))))
             )

library(plyr)
# convert to long table
df_long = df %>% unnest(colnames(df))
#Break up df_long by Ticker, then fit the specified model to each piece and return a list
models <- dlply(df_long, "Ticker", function(x) lm(ExcessReturn.Stock~1+ExcessReturn.Market, data = x))

# Apply coef to each model and return a data frame
coefs=ldply(models, coef)
names(coefs) = c("Ticker","Intercept","Beta")
print(coefs)
##   Ticker    Intercept      Beta
## 1   AAPL  0.013577110 1.1372823
## 2     FB  0.004428500 1.0303281
## 3   GOOG  0.003823518 0.9839200
## 4    IBM -0.006817775 1.4072015
## 5   MSFT  0.018216837 0.8067234

2.3.3 CAPM Rolling

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
startd<-"2015-12-01"
endd<-"2020-12-31"
freq<-"monthly"
tickers_code <- c("IBM","^GSPC","TB4WK") # GSPC=SP500; TB4WK=1mt Treasury Yield
getSymbols(tickers_code[1:2],from = startd, to =endd, periodicity = freq, src = 'yahoo')
## [1] "IBM"   "^GSPC"
getSymbols(tickers_code[3],src = 'FRED')
## [1] "TB4WK"
library(tidyverse)
# risk free rate
Rf = TB4WK["2016-01-01/2020-03-31"] # annual rate
Rf = (Rf/100+1)^(1/12)-1 # convert to month rate
Rf = log(1+Rf) # converting to log returns
names(Rf) = "Rf"
# market excess return
ExcessReturn.Market = data.frame(periodReturn(Ad(get("GSPC")),
                                period = "monthly",type='log')[-1,]-Rf)
# stocks' excess return
df <- tibble(Ticker = tickers_code[1],
             ExcessReturn.Stock = do.call(c,lapply(Ticker, function(x) 
               data.frame(periodReturn(Ad(get(x)),type='log')[-1,]-Rf))),
             ExcessReturn.Market = rep(ExcessReturn.Market,1),
             #Date = index(Rf)
             Date = do.call(c,lapply(Ticker, function(x) (list(index(Rf)))))
             )

library(plyr)
# convert to long table
df_long = df %>% unnest(colnames(df))
#Break up df_long by Ticker, then fit the specified model to each piece and return a list
models <- dlply(df_long, "Ticker", function(x) lm(ExcessReturn.Stock~1+ExcessReturn.Market, data = x))

# Apply coef to each model and return a data frame
coefs=ldply(models, coef)
names(coefs) = c("Ticker","Intercept","Beta")
library(rollRegres)
rollmodels <- dlply(df_long, "Ticker", function(x) roll_regres(ExcessReturn.Stock~1+ExcessReturn.Market,
                          x,width = 24L,
                          do_compute = c("sigmas", "r.squareds")))
# rolling coefficients
rollcoefs=ldply(rollmodels, function(x) x$coefs)
rollcoefs$Date = rep(index(Rf),1)
rollcoefs = na.omit(rollcoefs)
rollcoefs=rollcoefs[order(rollcoefs$Date,rollcoefs$Ticker),]
row.names(rollcoefs) =NULL
names(rollcoefs) = c("Ticker","Alpha","Beta","Date")
head(rollcoefs,10)
##    Ticker        Alpha     Beta       Date
## 1     IBM -0.012153559 1.804072 2017-12-01
## 2     IBM -0.012947065 1.776360 2018-01-01
## 3     IBM -0.013550886 1.716321 2018-02-01
## 4     IBM -0.008525031 1.187984 2018-03-01
## 5     IBM -0.009532415 1.207887 2018-04-01
## 6     IBM -0.012440408 1.145114 2018-05-01
## 7     IBM -0.012534230 1.148245 2018-06-01
## 8     IBM -0.012868612 1.097941 2018-07-01
## 9     IBM -0.013262751 1.087013 2018-08-01
## 10    IBM -0.011910720 1.071676 2018-09-01
colors = c("darkred")
linestypes = c("solid")
shapes = c(15)
ggplot(data = rollcoefs,
       aes(x=Date,y=Beta,group = Ticker,color = Ticker, lty = Ticker,shape = Ticker))+
  geom_point()+
  geom_line()+
  scale_x_date(date_breaks = "6 months" , date_labels = "%Y-%m")+
  scale_color_manual(values = alpha(colors,0.5)) +
  scale_linetype_manual(values = linestypes)+ 
  scale_shape_manual(values=shapes)+
  labs(x="Date", y="",title = "Rolling Beta (estimated window = 2 year)")

2.3.4 Security Market Line

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
startd<-"2015-12-01"
endd<-"2020-12-31"
freq<-"monthly"
tickers_code <- c("IBM","F","GE","CAT","MSFT","^GSPC","TB4WK") # GSPC=SP500; TB4WK=1mt Treasury Yield
getSymbols(tickers_code[1:6],from = startd, to =endd, periodicity = freq, src = 'yahoo')
## [1] "IBM"   "F"     "GE"    "CAT"   "MSFT"  "^GSPC"
getSymbols(tickers_code[7],src = 'FRED')
## [1] "TB4WK"
library(tidyverse)
# risk free rate
Rf = TB4WK["2016-01-01/2020-03-31"] # annual rate
Rf = (Rf/100+1)^(1/12)-1 # convert to month rate
Rf = log(1+Rf) # converting to log returns
names(Rf) = "Rf"
# market excess return
ExcessReturn.Market = data.frame(periodReturn(Ad(get("GSPC")),
                                period = "monthly",type='log')[-1,]-Rf)
# stocks' excess return
df <- tibble(Ticker = tickers_code[1:5],
             ExcessReturn.Stock = do.call(c,lapply(Ticker, function(x) 
               data.frame(periodReturn(Ad(get(x)),type='log')[-1,]-Rf))),
             ExcessReturn.Market = rep(ExcessReturn.Market,5),
             #Date = index(Rf)
             Date = do.call(c,lapply(Ticker, function(x) (list(index(Rf)))))
             )

library(plyr)
# convert to long table
df_long = df %>% unnest(colnames(df))
#Break up df_long by Ticker, then fit the specified model to each piece and return a list
models <- dlply(df_long, "Ticker", function(x) lm(ExcessReturn.Stock~1+ExcessReturn.Market, data = x))

# Apply coef to each model and return a data frame
coefs=ldply(models, coef)
names(coefs) = c("Ticker","Intercept","Beta")
### expected Rf and market return
Rf_avg = mean(Rf)
Mkt_avg = mean(ExcessReturn.Market$monthly.returns)+Rf_avg

### require return from CAPM
coefs$RequireReturn = Rf_avg + coefs$Beta*(Mkt_avg-Rf_avg)
coefs$ExpectedReturn = ddply(df_long,"Ticker",summarise,
                  Mean = mean(ExcessReturn.Stock,na.rm = TRUE))$Mean+Rf_avg
head(coefs)
##   Ticker    Intercept      Beta RequireReturn ExpectedReturn
## 1    CAT  0.007489770 1.2497798   0.005495939    0.012985709
## 2      F -0.022175322 1.5373035   0.006525285   -0.015650037
## 3     GE -0.029873269 1.3534187   0.005866970   -0.024006299
## 4    IBM -0.006817773 1.4072015   0.006059515   -0.000758258
## 5   MSFT  0.018216838 0.8067241   0.003909783    0.022126622
colors = c("darkred","steelblue","darkgreen","yellow4","darkblue")
linestypes = c("solid","longdash","twodash","dashed","dotdash")
shapes = c(15:19)
ggplot(coefs,aes(x = Beta,color = Ticker))+
  geom_abline(intercept = Rf_avg,slope = Mkt_avg-Rf_avg,color="grey",size = 2, alpha =0.6)+
  geom_point(aes(y=ExpectedReturn,color = Ticker),size = 3,shape=15)+
  geom_point(aes(y=RequireReturn,color = Ticker),size = 3)+
  geom_point(aes(x=0,y=Rf_avg),color = "darkgreen",size=5,shape = 2)+
  geom_point(aes(x=1,y=Mkt_avg),color = "darkgreen",size=5,shape = 2)+
  annotate(geom="text", x=1.08, y=Mkt_avg-0.001, label="M(Market portfolio)",
              color="darkgreen")+
  annotate(geom="text", x=0.1, y=Rf_avg, label="Risk-free Rate",
              color="darkgreen")+
  geom_segment(aes(x = 1, y = 0, xend = 1, yend = Mkt_avg),linetype="dashed")+
  geom_segment(aes(x = 0, y = Mkt_avg, xend = 1, yend = Mkt_avg),linetype="dashed")+
  scale_color_manual(values = alpha(colors,0.7)) +
  scale_fill_manual(values = alpha(colors,0.7))+
  scale_y_continuous(labels = scales::percent)+
  scale_x_continuous(expand = c(0.0001, 0, 0.1, 0.1))+
  labs(x="Beta", y="Return",title = "Security Market Line")+
  theme(panel.border = element_blank())

2.3.5 APT

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
# prepare library
library(quantmod)
library(tidyverse)
library(readr)
library(car)
library(lmtest)
library(AER)
library(lubridate)
library(xts)
### Download Dow Constituents price data from Yahoo Finance
# set parameters
start_date<-"2015-12-01"
end_date<-"2020-12-31"
freq<-"daily"
tickers_code <- c("AXP","AMGN","AAPL","BA","CAT","CSCO","CVX","GS","HD","HON","IBM","INTC","JNJ","KO","JPM","MCD","MMM","MRK","MSFT","NKE","PG","TRV","UNH","CRM","VZ","V","WBA","WMT","DIS")

# pull stock price from yahoo 
getSymbols(tickers_code,from=start_date,to=end_date,periodicity=freq,src='yahoo')
##  [1] "AXP"  "AMGN" "AAPL" "BA"   "CAT"  "CSCO" "CVX"  "GS"   "HD"   "HON" 
## [11] "IBM"  "INTC" "JNJ"  "KO"   "JPM"  "MCD"  "MMM"  "MRK"  "MSFT" "NKE" 
## [21] "PG"   "TRV"  "UNH"  "CRM"  "VZ"   "V"    "WBA"  "WMT"  "DIS"
### Load Factor Data
# download factor data from Ken French website via FTP
download.file(url = "https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/F-F_Research_Data_Factors_CSV.zip", destfile = "F-F_Research_Data_Factors_CSV.zip",mode="wb")
# read the contents and extract the desired dates
Factors <- read_csv("F-F_Research_Data_Factors_CSV.zip", skip = 3, col_names = T) %>%
  na.omit()%>%
  dplyr::rename("Date" = "...1") %>%
  dplyr::mutate_all(as.numeric) %>%
  filter(Date > 196301)
# Format the Factor data frame
FFdate<-as.Date(paste0(as.character(Factors$Date), '01'), format='%Y%m%d')
# let's only keep market factor and RF (since we only want to run CAPM later) and convert them to log return
FFdata<-log(select(Factors, -Date)/100+1)
FFxts<-xts(FFdata,order.by=FFdate)[paste(start_date, "/", end_date, sep = "")]
# print the first several rows
head(FFxts)
##                   Mkt-RF          SMB          HML         RF
## 2015-12-01 -0.0219389075 -0.028502360 -0.026241311 9.9995e-05
## 2016-01-01 -0.0594315838 -0.034487930  0.020390690 9.9995e-05
## 2016-02-01 -0.0007002451  0.008067371 -0.005716307 1.9998e-04
## 2016-03-01  0.0672847468  0.007571265  0.010939940 1.9998e-04
## 2016-04-01  0.0090588445  0.006677655  0.031595562 9.9995e-05
## 2016-05-01  0.0176434352 -0.001901807 -0.016739324 9.9995e-05
# Compute the excess return and join excess return with factor data
# use `Ad()` to get adjusted closed price
Price = do.call(merge, lapply(tickers_code, function(x) Ad(get(x))))
names(Price) = lapply(tickers_code, function(x) paste(x,".Price",sep=""))
# Extract the last days of each month
Price = Price[endpoints(Price, on='months')]
# Alter the date to match with other series
Price = xts(x = Price, order.by = floor_date(index(Price), "month")) %>%
  na.omit()

# Let's keep only the appropriate factor data (price data except the first line, since it will become NA after calculating returns)
FFxts<-FFxts[index(Price)[-1]]

# stocks' excess return
df <- tibble(Ticker = tickers_code,
             ExcessReturn.Stock = do.call(c,lapply(tickers_code, function(x)
               data.frame(periodReturn(Price[,paste(x,".Price",sep = "")], type='log')[-1,]-FFxts$RF))),
             Date = do.call(c,lapply(Ticker, function(x) (list(index(FFxts)))))
             )

# Tibble for Factor
FF_df <- tibble(Date = index(Price)[-1],
                as.tibble(FFxts)) %>%
  select(-RF)

# convert to long table and join with the factors
df_long = df %>%
  unnest(colnames(df)) %>%
  inner_join(FF_df)
head(df_long)
## # A tibble: 6 x 6
##   Ticker ExcessReturn.Stock Date        `Mkt-RF`      SMB      HML
##   <chr>               <dbl> <date>         <dbl>    <dbl>    <dbl>
## 1 AXP              -0.258   2016-01-01 -0.0594   -0.0345   0.0204 
## 2 AXP               0.0379  2016-02-01 -0.000700  0.00807 -0.00572
## 3 AXP               0.0994  2016-03-01  0.0673    0.00757  0.0109 
## 4 AXP               0.0683  2016-04-01  0.00906   0.00668  0.0316 
## 5 AXP               0.00493 2016-05-01  0.0176   -0.00190 -0.0167 
## 6 AXP              -0.0743  2016-06-01 -0.000500  0.00588 -0.0146
### First-pass Regression
# Apply FF3 regression using market excess return, SMB and HML as independent variables

# Create the IDs for stocks since we cannot use Tickers directly in group_by
Stock_IDs <- tibble(Ticker = unique(df_long$Ticker),
                 IDs = 1:length(unique(df_long$Ticker)))

# apply the full sample regression to calculate the full sample betas for all stocks
FF3_step1 <- df_long %>%
  # unite the Stock_IDs
  inner_join(Stock_IDs) %>%
  select(-c(Date, Ticker)) %>%
  group_by(IDs) %>%
  do(model = lm(ExcessReturn.Stock ~ `Mkt-RF` + SMB + HML, data = .) %>%
       coefficients())

# Unwrap the results
FF3_step1_result <- do.call(rbind.data.frame, FF3_step1$model) %>%
  as_tibble()
# Rename the variables
names(FF3_step1_result) = names(Factors)[-length(Factors)]
names(FF3_step1_result)[1] = "Alpha"

# Add in the Stock_IDs, then add in Ticker
FF3_step1_result <- Stock_IDs %>%
  dplyr::inner_join(cbind(FF3_step1[, 1], FF3_step1_result)) %>%
  select(-IDs)

head(FF3_step1_result)
## # A tibble: 6 x 5
##   Ticker    Alpha `Mkt-RF`      SMB    HML
##   <chr>     <dbl>    <dbl>    <dbl>  <dbl>
## 1 AXP    -0.00271    1.27   0.00820  0.331
## 2 AMGN   -0.00689    0.794 -0.0243  -0.573
## 3 AAPL    0.00565    1.42  -0.476   -0.706
## 4 BA     -0.00233    1.50   0.130    0.953
## 5 CAT     0.00865    0.824  0.436    0.115
## 6 CSCO   -0.00167    0.922 -0.0523  -0.115
### Second-pass Regression
# Re-run the FF3 using betas as independent variables
# compute mean returns
Second_reg_data <- df_long %>%
  # group by ticker then do calculation
  dplyr::group_by(Ticker) %>%
  # use summarise to calculate the mean excess return
  dplyr::summarise(MeanEXRet = mean(ExcessReturn.Stock)) %>%
  # join with beta estimates
  dplyr::inner_join(FF3_step1_result) %>%
  dplyr::select(-Alpha)

# run FF3 regression using betas as independent variables
FF3_step2 <- lm(MeanEXRet ~ `Mkt-RF` + SMB + HML, data = Second_reg_data)
summary(FF3_step2)
## 
## Call:
## lm(formula = MeanEXRet ~ `Mkt-RF` + SMB + HML, data = Second_reg_data)
## 
## Residuals:
##        Min         1Q     Median         3Q        Max 
## -0.0166380 -0.0027617 -0.0003736  0.0035955  0.0109143 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)  
## (Intercept)  0.002267   0.004161   0.545   0.5907  
## `Mkt-RF`     0.008546   0.004161   2.054   0.0506 .
## SMB         -0.003547   0.003924  -0.904   0.3746  
## HML         -0.007221   0.002881  -2.507   0.0191 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.006452 on 25 degrees of freedom
## Multiple R-squared:  0.3173, Adjusted R-squared:  0.2354 
## F-statistic: 3.874 on 3 and 25 DF,  p-value: 0.02104

2.4 Time Series Models

2.4.1 ACF

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01" #We want from Jan1990 forward. 
endd<-"2018-01-01"
freq<-"quarterly"
GDP = Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)
acf(GDP)

2.4.2 PACF

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01" #We want from Jan1990 forward. 
endd<-"2018-01-01"
freq<-"quarterly"
GDP = Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)
pacf(GDP)

2.4.3 KPSS Test Stationarity

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01"
endd<-"2018-01-01"
freq<-"quarterly"
GDP = Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)


library(tseries)
# KPSS test for trend stationary
kpss.test(GDP, null="Trend")
## 
##  KPSS Test for Trend Stationarity
## 
## data:  GDP
## KPSS Trend = 0.37964, Truncation lag parameter = 4, p-value = 0.01
# H0: trend stationarity 
# Interpret: Low p-value -->reject the null hypothesis of trend stationary

2.4.4 ADF Test Unit Root

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01"
endd<-"2018-01-01"
freq<-"quarterly"
GDP = Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)

# ADF Test for unit root, which is a common type of non-stationary
library(tseries)
adf.test(GDP, alternative = "stationary") 
## 
##  Augmented Dickey-Fuller Test
## 
## data:  GDP
## Dickey-Fuller = -1.9904, Lag order = 4, p-value = 0.5806
## alternative hypothesis: stationary
# H0: Not stationary
# Interpret: High p-value -->fail to reject the null hypothesis of non-stationary

2.4.5 Fix NonStationary w/Diff.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01"
endd<-"2018-01-01"
freq<-"quarterly"
GDP = Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)
colnames(GDP)[1]="Level" # Add column name 

# Use ADF test to check if it is non-stationary 
library(tseries)
adf.test(GDP$Level)
## 
##  Augmented Dickey-Fuller Test
## 
## data:  GDP$Level
## Dickey-Fuller = -1.9904, Lag order = 4, p-value = 0.5806
## alternative hypothesis: stationary
# Interpret: High p-value -->fail to reject the null hypothesis of non-stationary. This series is non-stationary

In order to get this in a commonly used format, we will first take logs, noting that the a first diff of logs is a % change.

GDP$LogLevel<- log(GDP$Level) 
GDP$FirstDifLog<- diff(GDP$LogLevel, differences = 1)
GDP<-na.omit(GDP) # get rid of the NAs

# Now use the ADF test again to check if it is still non-stationary 
adf.test(GDP$FirstDifLog)
## 
##  Augmented Dickey-Fuller Test
## 
## data:  GDP$FirstDifLog
## Dickey-Fuller = -4.2675, Lag order = 4, p-value = 0.01
## alternative hypothesis: stationary
# Interpret: Low p-value --> reject the null hypothesis of non-stationarity
# Our data apears to now be stationary due to differencing

2.4.6 Cointegration

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)

startd <-"2013-01-01"  
endd <-"2013-12-31"
SPY <- Ad(getSymbols("SPY", from = startd, to = endd, auto.assign = FALSE)) # get the adjusted closed 
IVV <- Ad(getSymbols("IVV", from = startd, to = endd, auto.assign = FALSE)) # get the adjusted closed 

 
### Step 1: Finding the integration order of each series
library(tseries)
kpss.test(SPY, null="Trend") # Low p-value --> reject H0 -->Non stationary. 
## 
##  KPSS Test for Trend Stationarity
## 
## data:  SPY
## KPSS Trend = 0.20051, Truncation lag parameter = 5, p-value = 0.01581
kpss.test(IVV, null="Trend") # Low p-value --> reject H0 -->Non stationary. 
## 
##  KPSS Test for Trend Stationarity
## 
## data:  IVV
## KPSS Trend = 0.19944, Truncation lag parameter = 5, p-value = 0.01621
# Take first difference
SPY_d1<-diff(SPY,differences = 1)
IVV_d1<-diff(IVV,differences = 1)
# ADF test for unit root
adf.test(na.omit(SPY_d1)) # low p-value --> reject H0 -->SPY_d1 is stationary --> SPY is I(1).
## 
##  Augmented Dickey-Fuller Test
## 
## data:  na.omit(SPY_d1)
## Dickey-Fuller = -5.742, Lag order = 6, p-value = 0.01
## alternative hypothesis: stationary
adf.test(na.omit(IVV_d1)) # low p-value --> reject H0 -->IVV_d1 is stationary --> IVV is I(1).
## 
##  Augmented Dickey-Fuller Test
## 
## data:  na.omit(IVV_d1)
## Dickey-Fuller = -5.711, Lag order = 6, p-value = 0.01
## alternative hypothesis: stationary
#Interpret: Series Y_t(IVV) and X_t(SPY) have the same integration order (1)


### Step 2: Estimate cointegration coefficient and get residuals
fit<-lm(IVV~SPY)
res<-fit$residuals

### Step 3: Do ADF test for unit root
adf.test(res) # Low p-value --> reject H0--> Residuals are stationary. 
## 
##  Augmented Dickey-Fuller Test
## 
## data:  res
## Dickey-Fuller = -6.0563, Lag order = 6, p-value = 0.01
## alternative hypothesis: stationary
#Interpret: Y_t(IVV) and X_t(SPY) are co-integrated.

2.4.7 ARIMA

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01"
endd<-"2018-01-01"
freq<-"quarterly"
GDP <- Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)
GDPGrowth <- diff(log(GDP)) # calculate log growth rate
GDPGrowth <- na.omit(GDPGrowth) # get rid of the NAs


# Specify the ARIMA model you with your choice or p, q values...(p=AR order, d=differencing, q= MA order)
AR1 <- arima(GDPGrowth, order = c(1, 0, 0)) # fit an AR(1) model 
AR1 # print the model 
## 
## Call:
## arima(x = GDPGrowth, order = c(1, 0, 0))
## 
## Coefficients:
##          ar1  intercept
##       0.4050      6e-03
## s.e.  0.0858      9e-04
## 
## sigma^2 estimated as 2.917e-05:  log likelihood = 425.77,  aic = -845.54
MA1 <- arima(GDPGrowth, order = c(0, 0, 1)) # fit an MA(1) model 
MA1 # print the model 
## 
## Call:
## arima(x = GDPGrowth, order = c(0, 0, 1))
## 
## Coefficients:
##          ma1  intercept
##       0.2868      6e-03
## s.e.  0.0733      7e-04
## 
## sigma^2 estimated as 3.097e-05:  log likelihood = 422.46,  aic = -838.91
AR1MA1 <- arima(GDPGrowth, order = c(1, 0, 1)) # fit an ARMA(1,1) model 
AR1MA1 # print the model 
## 
## Call:
## arima(x = GDPGrowth, order = c(1, 0, 1))
## 
## Coefficients:
##          ar1      ma1  intercept
##       0.7001  -0.3553     0.0060
## s.e.  0.1368   0.1750     0.0011
## 
## sigma^2 estimated as 2.828e-05:  log likelihood = 427.46,  aic = -846.92

2.4.8 ARIMA-Auto Lag Select

We can use the auto.arima() function from the forecast package to ask R to return the best ARIMA model according to either AIC, AICc or BIC value.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01"
endd<-"2018-01-01"
freq<-"quarterly"
GDP <- Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)
GDPGrowth <- diff(log(GDP)) # calculate log growth rate
GDPGrowth <- na.omit(GDPGrowth) # get rid of the NAs

library(forecast)
AR <- auto.arima(GDPGrowth, max.q = 0) # set the maximum value of MA order q=0 to auto fit an AR model
AR # print the model 
## Series: GDPGrowth 
## ARIMA(2,0,0) with non-zero mean 
## 
## Coefficients:
##          ar1     ar2   mean
##       0.3292  0.1861  0.006
## s.e.  0.0921  0.0924  0.001
## 
## sigma^2 = 2.89e-05:  log likelihood = 427.76
## AIC=-847.52   AICc=-847.15   BIC=-836.65
MA <- auto.arima(GDPGrowth, max.p = 0) # set the maximum value of AR order p=0 to auto fit an AR model
MA # print the model 
## Series: GDPGrowth 
## ARIMA(0,0,3) with non-zero mean 
## 
## Coefficients:
##          ma1     ma2     ma3    mean
##       0.3227  0.2925  0.1340  0.0061
## s.e.  0.0921  0.0957  0.0816  0.0009
## 
## sigma^2 = 2.937e-05:  log likelihood = 427.39
## AIC=-844.78   AICc=-844.21   BIC=-831.18
ARMA <- auto.arima(GDPGrowth) # fit an ARMA model(the fitted model might be AR, MA or ARIMA)
ARMA # print the model 
## Series: GDPGrowth 
## ARIMA(2,0,0) with non-zero mean 
## 
## Coefficients:
##          ar1     ar2   mean
##       0.3292  0.1861  0.006
## s.e.  0.0921  0.0924  0.001
## 
## sigma^2 = 2.89e-05:  log likelihood = 427.76
## AIC=-847.52   AICc=-847.15   BIC=-836.65

2.4.9 SARIMA

A seasonal autoregressive integrated moving average (SARIMA) model is one step different from an ARIMA model based on the concept of seasonal trends.

2.4.9.1 Fit SARIMA Giving Model Orders

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"2000-01-01"
endd<-"2018-01-01"
ticker <- "FRED/HOUSTNSA" #New Privately-Owned Housing Units Started: Total Units
HSNG <- Quandl(ticker, type="ts",start_date=startd, end_date=endd)
{plot(HSNG) 
abline(v = ts(c(2000,2005,2010,2015)),col = "red") # v is for vertical lines
abline(v = ts(c(2001,2002,2003,2004)), col = "blue", lty = 2)}

# Evidence of seasonality

### Decide the order of SARIMA
library(astsa)
acf2(HSNG, 48) # Interpret: The slow decay shown in the ACF is a sign that differencing may be required

##      [,1]  [,2]  [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10] [,11] [,12] [,13]
## ACF  0.96  0.92  0.87 0.82 0.79 0.77 0.77 0.78 0.81  0.85  0.88  0.88  0.85
## PACF 0.96 -0.06 -0.12 0.06 0.15 0.15 0.13 0.21 0.26  0.22  0.08 -0.12 -0.32
##      [,14] [,15] [,16] [,17] [,18] [,19] [,20] [,21] [,22] [,23] [,24] [,25]
## ACF    0.8  0.74  0.69  0.65  0.62  0.61  0.61  0.63  0.65  0.67  0.67  0.64
## PACF  -0.4 -0.22 -0.05  0.14  0.00  0.05 -0.21  0.01 -0.02 -0.05  0.00 -0.03
##      [,26] [,27] [,28] [,29] [,30] [,31] [,32] [,33] [,34] [,35] [,36] [,37]
## ACF   0.58  0.52  0.46  0.42  0.39  0.37  0.36  0.37  0.38  0.39  0.39  0.35
## PACF -0.02 -0.08 -0.07  0.08 -0.05 -0.06 -0.05  0.03 -0.10  0.00  0.07 -0.04
##      [,38] [,39] [,40] [,41] [,42] [,43] [,44] [,45] [,46] [,47] [,48]
## ACF   0.30  0.24  0.19  0.15  0.12  0.10  0.10  0.10  0.12  0.13  0.12
## PACF  0.01  0.07  0.05  0.03  0.02  0.03 -0.04  0.02  0.09  0.01  0.01
acf2(diff(HSNG), 48) # Interpret: Even with the first order of differencing, we observe that there is still slow decay in the ACF plots at a seasonal lag period of 12. This thus suggest a seasonal difference to be applied.

##      [,1] [,2]  [,3]  [,4]  [,5]  [,6]  [,7]  [,8]  [,9] [,10] [,11] [,12]
## ACF  0.03 0.11 -0.07 -0.17 -0.18 -0.17 -0.21 -0.19 -0.09  0.08  0.28  0.41
## PACF 0.03 0.11 -0.08 -0.18 -0.17 -0.14 -0.21 -0.27 -0.22 -0.08  0.12  0.32
##      [,13] [,14] [,15] [,16] [,17] [,18] [,19] [,20] [,21] [,22] [,23] [,24]
## ACF   0.35  0.06 -0.07 -0.22 -0.10 -0.22 -0.07 -0.31 -0.06  0.10  0.24  0.36
## PACF  0.40  0.20  0.03 -0.18 -0.03 -0.06  0.20 -0.02  0.01  0.04 -0.01  0.00
##      [,25] [,26] [,27] [,28] [,29] [,30] [,31] [,32] [,33] [,34] [,35] [,36]
## ACF   0.28  0.12 -0.08 -0.22 -0.10 -0.14 -0.17 -0.20 -0.06  0.03   0.2  0.40
## PACF -0.01  0.05  0.04 -0.10  0.03  0.04  0.03 -0.07  0.08 -0.04  -0.1  0.01
##      [,37] [,38] [,39] [,40] [,41] [,42] [,43] [,44] [,45] [,46] [,47] [,48]
## ACF   0.19  0.09 -0.08 -0.15 -0.15 -0.13 -0.12 -0.18 -0.12  0.09  0.17  0.35
## PACF -0.03 -0.09 -0.07 -0.05 -0.04 -0.04  0.02 -0.03 -0.09 -0.01 -0.02  0.04
acf2(diff(diff(HSNG),12),48)

##       [,1]  [,2]  [,3] [,4]  [,5] [,6]  [,7]  [,8]  [,9] [,10] [,11] [,12]
## ACF  -0.53  0.06  0.02 0.08 -0.06 0.06 -0.12  0.09 -0.04 -0.02  0.24 -0.48
## PACF -0.53 -0.32 -0.17 0.05  0.07 0.13 -0.05 -0.05 -0.07 -0.10  0.33 -0.28
##      [,13] [,14] [,15] [,16] [,17] [,18] [,19] [,20] [,21] [,22] [,23] [,24]
## ACF   0.34 -0.09 -0.01 -0.06  0.09 -0.11  0.19 -0.18  0.04  0.07  0.00 -0.06
## PACF -0.05 -0.08 -0.07 -0.01  0.02  0.00  0.10  0.02 -0.11  0.00  0.25 -0.18
##      [,25] [,26] [,27] [,28] [,29] [,30] [,31] [,32] [,33] [,34] [,35] [,36]
## ACF   0.02  0.06  0.01 -0.05  0.02  0.07 -0.14  0.09  0.05 -0.11  0.00  0.08
## PACF  0.02  0.08  0.08  0.01  0.05  0.04 -0.03 -0.07  0.05  0.04  0.12 -0.14
##      [,37] [,38] [,39] [,40] [,41] [,42] [,43] [,44] [,45] [,46] [,47] [,48]
## ACF  -0.08  0.02 -0.06  0.04 -0.04 -0.02  0.03  0.00 -0.05  0.06 -0.06  0.03
## PACF -0.05  0.01 -0.08 -0.10 -0.06 -0.05 -0.05 -0.08  0.00 -0.01 -0.02 -0.11
# Interpret: 
# Seasonal Order: From the seasonal lag perspective, we can see that the ACF cuts off at the 2nd seasonal lag, while the PACF appears to tail off. This would suggest a SARMA model of (0,2). 
# ARMA Order: Within the first seasonal cycle, it can be seen that the ACF appears to be cutting off at lag = 1, while PACF appears to be cutting off at lag = 3. 
# Thus a proposed model can be ARMA (3,1) x Seasonal (0,3)(lag = 12) for the differenced time series. 


### Fit SARIMA Model
arima(HSNG, order = c(3,1,1), seasonal = list(order = c(0,1,2), period = 12))
## 
## Call:
## arima(x = HSNG, order = c(3, 1, 1), seasonal = list(order = c(0, 1, 2), period = 12))
## 
## Coefficients:
##          ar1     ar2     ar3      ma1     sma1     sma2
##       0.3734  0.3349  0.2163  -0.9246  -0.6783  -0.0807
## s.e.  0.0870  0.0733  0.0704   0.0493   0.0784   0.0782
## 
## sigma^2 estimated as 74.76:  log likelihood = -734.27,  aic = 1482.55
library(forecast)
sarima(HSNG, p=3, d=1, q=1, P=0, D=1, Q=2, S=12) # sarima() takes in arguments in the following order: data, ARIMA inputs (p,d,q), SARIMA inputs (P,D,Q), and seasonal lag S 
## initial  value 2.564457 
## iter   2 value 2.298184
## iter   3 value 2.195428
## iter   4 value 2.189850
## iter   5 value 2.183666
## iter   6 value 2.183389
## iter   7 value 2.183263
## iter   8 value 2.183249
## iter   9 value 2.183238
## iter  10 value 2.183220
## iter  11 value 2.183147
## iter  12 value 2.182860
## iter  13 value 2.182738
## iter  14 value 2.182723
## iter  15 value 2.181411
## iter  16 value 2.180273
## iter  17 value 2.180018
## iter  18 value 2.179797
## iter  19 value 2.179795
## iter  20 value 2.179794
## iter  21 value 2.179794
## iter  22 value 2.179794
## iter  23 value 2.179793
## iter  24 value 2.179793
## iter  24 value 2.179793
## iter  24 value 2.179793
## final  value 2.179793 
## converged
## initial  value 2.184982 
## iter   2 value 2.184962
## iter   3 value 2.184622
## iter   4 value 2.184601
## iter   5 value 2.184203
## iter   6 value 2.183855
## iter   7 value 2.183550
## iter   8 value 2.181614
## iter   9 value 2.180869
## iter  10 value 2.180682
## iter  11 value 2.180520
## iter  12 value 2.180464
## iter  13 value 2.180444
## iter  14 value 2.180441
## iter  15 value 2.180441
## iter  16 value 2.180440
## iter  17 value 2.180440
## iter  17 value 2.180440
## iter  17 value 2.180440
## final  value 2.180440 
## converged

## $fit
## 
## Call:
## arima(x = xdata, order = c(p, d, q), seasonal = list(order = c(P, D, Q), period = S), 
##     include.mean = !no.constant, transform.pars = trans, fixed = fixed, optim.control = list(trace = trc, 
##         REPORT = 1, reltol = tol))
## 
## Coefficients:
##          ar1     ar2     ar3      ma1     sma1     sma2
##       0.3734  0.3349  0.2163  -0.9246  -0.6783  -0.0807
## s.e.  0.0870  0.0733  0.0704   0.0493   0.0784   0.0782
## 
## sigma^2 estimated as 74.76:  log likelihood = -734.27,  aic = 1482.55
## 
## $degrees_of_freedom
## [1] 198
## 
## $ttable
##      Estimate     SE  t.value p.value
## ar1    0.3734 0.0870   4.2919  0.0000
## ar2    0.3349 0.0733   4.5670  0.0000
## ar3    0.2163 0.0704   3.0738  0.0024
## ma1   -0.9246 0.0493 -18.7387  0.0000
## sma1  -0.6783 0.0784  -8.6496  0.0000
## sma2  -0.0807 0.0782  -1.0320  0.3034
## 
## $AIC
## [1] 7.267385
## 
## $AICc
## [1] 7.269475
## 
## $BIC
## [1] 7.381242
### Forecast
sarima.for(HSNG, n.ahead = 20, p=3, d=1, q=1, P=0, D=1, Q=2, S=12) # forecast prediction for next 20 time points

## $pred
##            Jan       Feb       Mar       Apr       May       Jun       Jul
## 2018            89.94963 102.27485 114.10191 114.38431 119.99422 118.98789
## 2019  93.38826  95.01798 107.50834 118.91902 119.63144 124.58176 123.90903
##            Aug       Sep       Oct       Nov       Dec
## 2018 110.90079 112.45802 113.53181 101.75552  91.62767
## 2019 115.99304 117.36446                              
## 
## $se
##            Jan       Feb       Mar       Apr       May       Jun       Jul
## 2018            8.646530  9.477341 10.713797 12.129696 13.232275 14.373052
## 2019 20.580317 22.526881 23.813413 25.176267 26.565957 27.872726 29.174658
##            Aug       Sep       Oct       Nov       Dec
## 2018 15.479904 16.538306 17.580943 18.599896 19.598314
## 2019 30.455187 31.710782

2.4.9.2 Fit SARIMA Without Giving Model Orders

We can use the auto.arima() function from the forecast package and set seasonal = TRUE to ask R to return the best ARIMA/SARIMA model according to either AIC, AICc or BIC value.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"2000-01-01"
endd<-"2018-01-01"
ticker <- "FRED/HOUSTNSA" #New Privately-Owned Housing Units Started: Total Units
HSNG <- Quandl(ticker, type="ts",start_date=startd, end_date=endd)
{plot(HSNG) 
abline(v = ts(c(2000,2005,2010,2015)),col = "red") # v is for vertical lines
abline(v = ts(c(2001,2002,2003,2004)), col = "blue", lty = 2)}

# Evidence of seasonality


### Fit SARIMA Model
library(forecast)
auto.arima(HSNG, seasonal = TRUE)
## Series: HSNG 
## ARIMA(1,1,1)(0,1,2)[12] 
## 
## Coefficients:
##           ar1      ma1     sma1     sma2
##       -0.2223  -0.3329  -0.6617  -0.1001
## s.e.   0.1168   0.1079   0.0783   0.0768
## 
## sigma^2 = 77.14:  log likelihood = -735.63
## AIC=1481.27   AICc=1481.57   BIC=1497.86
# We have a model with ARIMA order (p=1,d=1,q=1), seasonal order (P=0,D=1,Q=2), and seasonal lag S=12

2.4.10 Examine Model Residual

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01"
endd<-"2018-01-01"
freq<-"quarterly"
GDP <- Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)
GDPGrowth <- diff(log(GDP)) # calculate log growth rate
GDPGrowth <- na.omit(GDPGrowth) # get rid of the NAs

library(forecast)
fit <- auto.arima(GDPGrowth)
fit
## Series: GDPGrowth 
## ARIMA(2,0,0) with non-zero mean 
## 
## Coefficients:
##          ar1     ar2   mean
##       0.3292  0.1861  0.006
## s.e.  0.0921  0.0924  0.001
## 
## sigma^2 = 2.89e-05:  log likelihood = 427.76
## AIC=-847.52   AICc=-847.15   BIC=-836.65
### Ensure errors white noise. 
# Ideally, residuals should look like white noise, meaning they are 
# normally distributed.
# We will use tsdisplay to check the residuals for our optimal model. 
tsdisplay(residuals(fit), lag.max=45, main='Model Residuals')

# Interpret: The ACF and PACF indicate little to no persistence.  The time series plot shows little remaining patterns 
# to the data. 

2.4.11 Ljung–Box-Residual Indep.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01"
endd<-"2018-01-01"
freq<-"quarterly"
GDP <- Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)
GDPGrowth <- diff(log(GDP)) # calculate log growth rate
GDPGrowth <- na.omit(GDPGrowth) # get rid of the NAs

library(forecast)
fit <- auto.arima(GDPGrowth)
fit
## Series: GDPGrowth 
## ARIMA(2,0,0) with non-zero mean 
## 
## Coefficients:
##          ar1     ar2   mean
##       0.3292  0.1861  0.006
## s.e.  0.0921  0.0924  0.001
## 
## sigma^2 = 2.89e-05:  log likelihood = 427.76
## AIC=-847.52   AICc=-847.15   BIC=-836.65
### Compute the Ljung–Box test statistic for examining the null hypothesis of independence in a given time series. 
Box.test(residuals(fit),lag=4,type="Ljung")
## 
##  Box-Ljung test
## 
## data:  residuals(fit)
## X-squared = 0.0055806, df = 4, p-value = 1
# Ho: No autocorrelation
#Interpret: Given the high p-value, we fail to reject the null of no persistence through 4 lags. 

2.4.12 Kolmogorov-Smirnov Test

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01"
endd<-"2018-01-01"
freq<-"quarterly"
GDP <- Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)
GDPGrowth <- diff(log(GDP)) # calculate log growth rate
GDPGrowth <- na.omit(GDPGrowth) # get rid of the NAs

library(forecast)
fit <- auto.arima(GDPGrowth)
fit
## Series: GDPGrowth 
## ARIMA(2,0,0) with non-zero mean 
## 
## Coefficients:
##          ar1     ar2   mean
##       0.3292  0.1861  0.006
## s.e.  0.0921  0.0924  0.001
## 
## sigma^2 = 2.89e-05:  log likelihood = 427.76
## AIC=-847.52   AICc=-847.15   BIC=-836.65
### Let's also test the null of normal distribution via a KS test
ks.test(residuals(fit),pnorm)
## 
##  One-sample Kolmogorov-Smirnov test
## 
## data:  residuals(fit)
## D = 0.49566, p-value < 2.2e-16
## alternative hypothesis: two-sided
#Interpret: Given the low pvalue, we reject the null, implying that the residuals aren't normally distributed

2.4.13 Quantile-Quantile Plot

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01"
endd<-"2018-01-01"
freq<-"quarterly"
GDP <- Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)
GDPGrowth <- diff(log(GDP)) # calculate log growth rate
GDPGrowth <- na.omit(GDPGrowth) # get rid of the NAs

library(forecast)
fit <- auto.arima(GDPGrowth)
fit
## Series: GDPGrowth 
## ARIMA(2,0,0) with non-zero mean 
## 
## Coefficients:
##          ar1     ar2   mean
##       0.3292  0.1861  0.006
## s.e.  0.0921  0.0924  0.001
## 
## sigma^2 = 2.89e-05:  log likelihood = 427.76
## AIC=-847.52   AICc=-847.15   BIC=-836.65
### Let's see if the residuals look like a normal distribution with a qq plot
qqnorm(residuals(fit)); 
qqline(residuals(fit))

2.4.14 Model Comparison

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01"
endd<-"2018-01-01"
freq<-"quarterly"
GDP <- Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)
GDPGrowth <- diff(log(GDP)) # calculate log growth rate
GDPGrowth <- na.omit(GDPGrowth) # get rid of the NAs
library(forecast)
Model1<-Arima(GDPGrowth, order = c(1, 0, 0)) # fit an AR(1) model 
Model1
## Series: GDPGrowth 
## ARIMA(1,0,0) with non-zero mean 
## 
## Coefficients:
##          ar1   mean
##       0.4050  6e-03
## s.e.  0.0858  9e-04
## 
## sigma^2 = 2.97e-05:  log likelihood = 425.77
## AIC=-845.54   AICc=-845.32   BIC=-837.39
Model2<-Arima(GDPGrowth, order = c(0, 0, 3)) # fit an MA(3) model 
Model2
## Series: GDPGrowth 
## ARIMA(0,0,3) with non-zero mean 
## 
## Coefficients:
##          ma1     ma2     ma3    mean
##       0.3227  0.2925  0.1340  0.0061
## s.e.  0.0921  0.0957  0.0816  0.0009
## 
## sigma^2 = 2.937e-05:  log likelihood = 427.39
## AIC=-844.78   AICc=-844.21   BIC=-831.18

Notice that the AIC of Model2 is -844.78, which is smaller than then Model1’s AIC -845.54, Model2 is better or with higher predict accuracy than Model1.

2.4.15 Test for ARCH Effect

ARCH effect is concerned with a relationship within the heteroskedasticity, often termed serial correlation of the heteroskedasticity.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"2010-01-01" #We want from Jan2010 forward. 
endd<-"2018-01-01"
freq<-"weekly"
trans<-"rdiff" # calculate simple return
ticker <- "WIKI/JPM.11" # we load the JPM closed price data (the 11th column)

JPM <- Quandl(ticker,transform=trans,start_date=startd, end_date=endd, collapse=freq,type="xts") # Careful here.  The `rdiff` provides the simple return.  We should convert to log returns since we are running a regression. 
colnames(JPM )[1]="SimpleRet" # Add column name 
JPM$LogRet<-log(1+JPM $SimpleRet)

The ARCH model assumes that the conditional mean of the error term in a time series model is constant (zero), but its conditional variance is not.

### Get Residual Square
library(dplyr)
reg<-lm(JPM$LogRet~1) # demean the return series by regress it on constant only
JPM$DMLogRet<-resid(reg) # get residuals, which is the de-mean value of log return
JPM$Sq_DMLogRet<-JPM$DMLogRet^2 # let's compute the squared residuals (i.e. JPM$DMLogRet)
acf(JPM$Sq_DMLogRet) # use the ACF to see if there appears to be persistence in the squared residuals

### Engle's ARCH LM test
# Engle's ARCH LM test is the most commonly applied standard test to detect autoregressive conditional heteroscedasticity. We start with the regression of squared residuals upon lagged squared residuals 
ARCH <- lm(JPM$Sq_DMLogRet~lag.xts(JPM$Sq_DMLogRet,k=1))
summary(ARCH)
## 
## Call:
## lm(formula = JPM$Sq_DMLogRet ~ lag.xts(JPM$Sq_DMLogRet, k = 1))
## 
## Residuals:
##        Min         1Q     Median         3Q        Max 
## -0.0031486 -0.0009597 -0.0007224  0.0001594  0.0143211 
## 
## Coefficients:
##                                  Estimate Std. Error t value Pr(>|t|)    
## (Intercept)                     0.0009616  0.0001200   8.012 1.17e-14 ***
## lag.xts(JPM$Sq_DMLogRet, k = 1) 0.1558554  0.0486171   3.206  0.00145 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.002168 on 413 degrees of freedom
##   (1 observation deleted due to missingness)
## Multiple R-squared:  0.02428,    Adjusted R-squared:  0.02192 
## F-statistic: 10.28 on 1 and 413 DF,  p-value: 0.001452
# Calculate the LM test statistic
library(broom)
RSq_ARCH <- glance(ARCH)[[1]] # grab the R square value of ARCH model
L <- length(JPM$Sq_DMLogRet) # lengths for data
q <- length(coef(ARCH))-1 # degrees of freedom q
LM <- (L-q)*RSq_ARCH # Compute the LM stat as (L-q)*Rsquare
LM
## [1] 10.07604
# Calculate the critical value
alpha <- 0.05 # set significance levels
Critical <- qchisq(1-alpha, q) # get chi-squared stat(the arch test stat is a chi-squared)
Critical
## [1] 3.841459
# Interpret: Null hypothesis is white noise (i.e. no ARCH effects).  Test stat is greater than critical value, so we reject, implying ARCH effects exist.  

2.4.16 GARCH

2.4.16.1 GARCH Model

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"2010-01-01" #We want from Jan2010 forward. 
endd<-"2018-01-01"
freq<-"weekly"
trans<-"rdiff" # calculate simple return
ticker <- "WIKI/JPM.11" # we load the JPM closed price data (the 11th column)

JPM <- Quandl(ticker,transform=trans,start_date=startd, end_date=endd, collapse=freq,type="xts")
colnames(JPM )[1]="SimpleRet" # Add column name 
# Careful here.  The `rdiff` provides the simple return.  We should convert to log returns since we are running a regression.   
JPM$LogRet<-log(1+JPM $SimpleRet)


### Fit GARCH(1,1) Model
library(rugarch)
# GARCH specification
garchSpec <- ugarchspec(variance.model=list(model="sGARCH",garchOrder=c(1,1)),
                        mean.model=list(armaOrder=c(0,0)), 
                        distribution.model="norm")
# Estimate coefficients
garchFit <- ugarchfit(spec=garchSpec, data=JPM$LogRet)
garchFit
## 
## *---------------------------------*
## *          GARCH Model Fit        *
## *---------------------------------*
## 
## Conditional Variance Dynamics    
## -----------------------------------
## GARCH Model  : sGARCH(1,1)
## Mean Model   : ARFIMA(0,0,0)
## Distribution : norm 
## 
## Optimal Parameters
## ------------------------------------
##         Estimate  Std. Error  t value Pr(>|t|)
## mu      0.003194    0.001534   2.0819 0.037352
## omega   0.000066    0.000051   1.2876 0.197881
## alpha1  0.063833    0.033107   1.9281 0.053843
## beta1   0.873421    0.075063  11.6358 0.000000
## 
## Robust Standard Errors:
##         Estimate  Std. Error  t value Pr(>|t|)
## mu      0.003194    0.001503  2.12504 0.033583
## omega   0.000066    0.000075  0.88779 0.374654
## alpha1  0.063833    0.050937  1.25317 0.210144
## beta1   0.873421    0.113811  7.67431 0.000000
## 
## LogLikelihood : 834.5133 
## 
## Information Criteria
## ------------------------------------
##                     
## Akaike       -3.9929
## Bayes        -3.9541
## Shibata      -3.9930
## Hannan-Quinn -3.9775
## 
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                     0.7315  0.3924
## Lag[2*(p+q)+(p+q)-1][2]    1.2946  0.4118
## Lag[4*(p+q)+(p+q)-1][5]    3.5249  0.3195
## d.o.f=0
## H0 : No serial correlation
## 
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                     0.2204  0.6387
## Lag[2*(p+q)+(p+q)-1][5]    1.3984  0.7649
## Lag[4*(p+q)+(p+q)-1][9]    2.3902  0.8537
## d.o.f=2
## 
## Weighted ARCH LM Tests
## ------------------------------------
##             Statistic Shape Scale P-Value
## ARCH Lag[3]   0.09997 0.500 2.000  0.7519
## ARCH Lag[5]   0.62060 1.440 1.667  0.8475
## ARCH Lag[7]   1.14187 2.315 1.543  0.8894
## 
## Nyblom stability test
## ------------------------------------
## Joint Statistic:  0.9341
## Individual Statistics:             
## mu     0.1905
## omega  0.1325
## alpha1 0.4934
## beta1  0.2145
## 
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic:          1.07 1.24 1.6
## Individual Statistic:     0.35 0.47 0.75
## 
## Sign Bias Test
## ------------------------------------
##                    t-value    prob sig
## Sign Bias           1.2238 0.22172    
## Negative Sign Bias  1.2018 0.23014    
## Positive Sign Bias  0.3963 0.69208    
## Joint Effect       10.7865 0.01294  **
## 
## 
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
##   group statistic p-value(g-1)
## 1    20     21.98       0.2852
## 2    30     36.88       0.1492
## 3    40     41.88       0.3468
## 4    50     48.66       0.4867
## 
## 
## Elapsed time : 0.06738901
# Interpret: The alpha and beta (ARCH and GARCH terms) appear significant. The Ljung Box text of residuals appears to indicate no more persistence in the residuals nor squared residuals. The LM tests suggest no more ARCH effects. 

2.4.16.2 GJR-GARCH Model

Let’s try to fit another popular GARCH model; one with a threshold variable controlling the level of volatility (i.e. GJRGARCH). See here http://www.scienpress.com/upload/jsem/vol%202_3_6.pdf for a nice general listing of GARCH variants.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"2010-01-01" #We want from Jan2010 forward. 
endd<-"2018-01-01"
freq<-"weekly"
trans<-"rdiff" # calculate simple return
ticker <- "WIKI/JPM.11" # we load the JPM closed price data (the 11th column)

JPM <- Quandl(ticker,transform=trans,start_date=startd, end_date=endd, collapse=freq,type="xts")
colnames(JPM )[1]="SimpleRet" # Add column name 
# Careful here.  The `rdiff` provides the simple return.  We should convert to log returns since we are running a regression.   
JPM$LogRet<-log(1+JPM $SimpleRet)

### Fit GJR-GARCH(1,1) Model
library(rugarch)
# GJR-GARCH specification
garchSpecGJR <- ugarchspec(variance.model=list(model="fGARCH", garchOrder=c(1,1), submodel="GJRGARCH"),
                           mean.model=list(armaOrder=c(0,0)), 
                           distribution.model="norm")
# Estimate coefficients
gjrgarchFit <- ugarchfit(spec=garchSpecGJR, data=JPM$LogRet)
gjrgarchFit
## 
## *---------------------------------*
## *          GARCH Model Fit        *
## *---------------------------------*
## 
## Conditional Variance Dynamics    
## -----------------------------------
## GARCH Model  : fGARCH(1,1)
## fGARCH Sub-Model : GJRGARCH
## Mean Model   : ARFIMA(0,0,0)
## Distribution : norm 
## 
## Optimal Parameters
## ------------------------------------
##         Estimate  Std. Error  t value Pr(>|t|)
## mu      0.002505    0.001494   1.6766 0.093611
## omega   0.000096    0.000069   1.3984 0.161986
## alpha1  0.042466    0.031490   1.3486 0.177481
## beta1   0.822648    0.101648   8.0931 0.000000
## eta11   0.999927    0.499401   2.0023 0.045258
## 
## Robust Standard Errors:
##         Estimate  Std. Error    t value Pr(>|t|)
## mu      0.002505    0.001563 1.6033e+00 0.108874
## omega   0.000096    0.000140 6.8349e-01 0.494299
## alpha1  0.042466    0.043766 9.7029e-01 0.331902
## beta1   0.822648    0.207667 3.9614e+00 0.000075
## eta11   0.999927    0.000041 2.4116e+04 0.000000
## 
## LogLikelihood : 841.6567 
## 
## Information Criteria
## ------------------------------------
##                     
## Akaike       -4.0224
## Bayes        -3.9739
## Shibata      -4.0227
## Hannan-Quinn -4.0032
## 
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                     0.4094  0.5223
## Lag[2*(p+q)+(p+q)-1][2]    1.0424  0.4847
## Lag[4*(p+q)+(p+q)-1][5]    3.5594  0.3143
## d.o.f=0
## H0 : No serial correlation
## 
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                    0.08876  0.7658
## Lag[2*(p+q)+(p+q)-1][5]   2.24601  0.5615
## Lag[4*(p+q)+(p+q)-1][9]   3.61423  0.6547
## d.o.f=2
## 
## Weighted ARCH LM Tests
## ------------------------------------
##             Statistic Shape Scale P-Value
## ARCH Lag[3]    0.3012 0.500 2.000  0.5831
## ARCH Lag[5]    0.8767 1.440 1.667  0.7699
## ARCH Lag[7]    1.6947 2.315 1.543  0.7816
## 
## Nyblom stability test
## ------------------------------------
## Joint Statistic:  0.9146
## Individual Statistics:              
## mu     0.09681
## omega  0.08449
## alpha1 0.27383
## beta1  0.16027
## eta11  0.27384
## 
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic:          1.28 1.47 1.88
## Individual Statistic:     0.35 0.47 0.75
## 
## Sign Bias Test
## ------------------------------------
##                    t-value    prob sig
## Sign Bias           1.7139 0.08731   *
## Negative Sign Bias  0.1440 0.88559    
## Positive Sign Bias  0.3059 0.75983    
## Joint Effect        7.3348 0.06196   *
## 
## 
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
##   group statistic p-value(g-1)
## 1    20     21.02       0.3357
## 2    30     31.12       0.3600
## 3    40     39.77       0.4357
## 4    50     55.15       0.2533
## 
## 
## Elapsed time : 0.2108381

2.4.17 VAR

The vector auto-regression (VAR) model extends the idea of uni-variate auto-regression to k time series regressions, where the lagged values of all k series appear as regressors. One the one hand, economic elements like GDP, investment and consumer spending, all depends upon interest rates. One the other hand, the level of interest rates is also set, in part, by the prospects for economic growth and inflation. Hence, we need a VAR model.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd <-"1980-04-01"  
endd <-"2012-12-31"

TB3MS <- Quandl("FRED/TB3MS",start_date=startd, end_date=endd,type="ts")
TB10YS <- Quandl("FRED/GS10",start_date=startd, end_date=endd,type="ts")
GDP <- Quandl("FRED/GDPC96",start_date=startd, end_date=endd,type="ts", transform="rdiff")# note this is simple return, we need log return since we want to run regression

TSpread <- TB10YS - TB3MS 
TSpread <- aggregate(TSpread,nfrequency=4,FUN=mean)# aggregate monthly data to quarterly(averages)
GDPGrowth = 400*log(1+GDP)# annual log growth rate%

# Visual inspection
plot(cbind(GDPGrowth, TSpread),xlab = "Date", main='RGDP growth and Term spread')

library(vars)
### Step 1: Model selection (use information criteria to decide upon the number of lags to include)
VAR_Data<-na.omit(ts.union(GDPGrowth, TSpread)) #set up data for estimation
VARselect(VAR_Data, lag.max = 12, type = "const")$selection #obtain the best lag period
## AIC(n)  HQ(n)  SC(n) FPE(n) 
##      2      2      2      2
# Interpret: All the information criteria suggest using lags = 2 --> we need to set p=2 when estimating the model


### Step 2: Estimate VAR model
VAR_fit<-VAR(y = VAR_Data, p = 2)
summary(VAR_fit)
## 
## VAR Estimation Results:
## ========================= 
## Endogenous variables: GDPGrowth, TSpread 
## Deterministic variables: const 
## Sample size: 128 
## Log Likelihood: -381.122 
## Roots of the characteristic polynomial:
## 0.8208 0.8208 0.1958 0.1958
## Call:
## VAR(y = VAR_Data, p = 2)
## 
## 
## Estimation results for equation GDPGrowth: 
## ========================================== 
## GDPGrowth = GDPGrowth.l1 + TSpread.l1 + GDPGrowth.l2 + TSpread.l2 + const 
## 
##              Estimate Std. Error t value Pr(>|t|)    
## GDPGrowth.l1  0.29512    0.08356   3.532 0.000582 ***
## TSpread.l1   -0.87548    0.36949  -2.369 0.019373 *  
## GDPGrowth.l2  0.21663    0.08173   2.651 0.009089 ** 
## TSpread.l2    1.29865    0.37148   3.496 0.000658 ***
## const         0.48692    0.47198   1.032 0.304256    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 2.421 on 123 degrees of freedom
## Multiple R-Squared: 0.3111,  Adjusted R-squared: 0.2887 
## F-statistic: 13.88 on 4 and 123 DF,  p-value: 2.246e-09 
## 
## 
## Estimation results for equation TSpread: 
## ======================================== 
## TSpread = GDPGrowth.l1 + TSpread.l1 + GDPGrowth.l2 + TSpread.l2 + const 
## 
##               Estimate Std. Error t value Pr(>|t|)    
## GDPGrowth.l1  0.009302   0.017077   0.545 0.586944    
## TSpread.l1    1.057695   0.075515  14.006  < 2e-16 ***
## GDPGrowth.l2 -0.056374   0.016703  -3.375 0.000988 ***
## TSpread.l2   -0.218659   0.075920  -2.880 0.004691 ** 
## const         0.454647   0.096461   4.713 6.48e-06 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 0.4948 on 123 degrees of freedom
## Multiple R-Squared: 0.8304,  Adjusted R-squared: 0.8249 
## F-statistic: 150.6 on 4 and 123 DF,  p-value: < 2.2e-16 
## 
## 
## 
## Covariance matrix of residuals:
##           GDPGrowth TSpread
## GDPGrowth   5.86218 0.05959
## TSpread     0.05959 0.24486
## 
## Correlation matrix of residuals:
##           GDPGrowth TSpread
## GDPGrowth   1.00000 0.04974
## TSpread     0.04974 1.00000

Our estimate functions are:

\[\begin{align} GDPgrowth_t &= c_1 + \alpha_{11}GDPGrowth_{t-1} + \alpha_{12}TSpread_{t-1} + \alpha_{13}GDPGrowth_{t-2} + \alpha_{14}TSpread_{t-2} ~~~(1)\\ TSpread_t &= c_2 + \alpha_{21}GDPGrowth_{t-1} + \alpha_{22}TSpread_{t-1} + \alpha_{23}GDPGrowth_{t-2} + \alpha_{24}TSpread_{t-2} ~~~(2) \end{align}\]

In this first equation, the a11 coefficient = 0.295 implies that for every one unit increase in the last quarter GDP growth rate, GDP this quarter will rise by .295, holding constant the dynamic effects of prior GDP growth and the Term Spread. Meanwhile, the a12 coefficient = -0.875 implies that every one unit increase in the last quarter Term Spread will cause GDP growth this quarter to fall by .875 units.

Now Let’s look at the second equation. a21 = 0.009 implies that for every one unit increase in the last quarter GDP growth rate, Term Spread this quarter will rise by .009. Likewise, the a22 = 1.058 implies that a unit increase in the last quarter Term Spread will cause Term Spread this quarter to rise by 1.058 units.

However, since each coefficient in the VAR model only reflects a partial dynamic relationship and cannot capture a comprehensive dynamic relationship, we may need other tools such as Granger causality test, IRF impulse response function to help us understand the relationships.

# Obtain the adj. R^2 from the output of 'VAR()'
summary(VAR_fit$varresult$GDPGrowth)$adj.r.squared
## [1] 0.2886718
summary(VAR_fit$varresult$TSpread)$adj.r.squared
## [1] 0.824881
# Multi-step forecast
predictions <- predict(VAR_fit,n.ahead = 15, ci = 0.95)
predictions
## $GDPGrowth
##           fcst     lower    upper       CI
##  [1,] 1.198711 -3.546741 5.944163 4.745452
##  [2,] 1.383492 -3.624828 6.391812 5.008320
##  [3,] 1.734391 -3.469314 6.938096 5.203705
##  [4,] 2.045169 -3.266459 7.356797 5.311628
##  [5,] 2.304776 -3.077621 7.687172 5.382396
##  [6,] 2.514830 -2.924988 7.954647 5.439817
##  [7,] 2.674189 -2.814666 8.163044 5.488855
##  [8,] 2.787256 -2.742916 8.317427 5.530171
##  [9,] 2.860470 -2.702736 8.423677 5.563207
## [10,] 2.901204 -2.686752 8.489160 5.587956
## [11,] 2.916929 -2.688342 8.522199 5.605271
## [12,] 2.914597 -2.701976 8.531170 5.616573
## [13,] 2.900280 -2.723177 8.523737 5.623457
## [14,] 2.878988 -2.748391 8.506367 5.627379
## [15,] 2.854633 -2.774856 8.484122 5.629489
## 
## $TSpread
##           fcst       lower    upper        CI
##  [1,] 1.805240  0.83538762 2.775093 0.9698527
##  [2,] 2.015903  0.60191951 3.429887 1.4139838
##  [3,] 2.137420  0.47220986 3.802629 1.6652097
##  [4,] 2.212731  0.37280391 4.052657 1.8399267
##  [5,] 2.248926  0.29235971 4.205491 1.9565658
##  [6,] 2.255636  0.22410116 4.287172 2.0315352
##  [7,] 2.242139  0.16481529 4.319463 2.0773237
##  [8,] 2.216036  0.11229197 4.319781 2.1037443
##  [9,] 2.183447  0.06528336 4.301611 2.1181637
## [10,] 2.148992  0.02326201 4.274722 2.1257301
## [11,] 2.115927 -0.01381595 4.245670 2.1297428
## [12,] 2.086338 -0.04576626 4.218441 2.1321039
## [13,] 2.061363 -0.07240850 4.195135 2.1337716
## [14,] 2.041416 -0.09372608 4.176558 2.1351420
## [15,] 2.026388 -0.10993889 4.162715 2.1363268
plot(predictions, names = "GDPGrowth")

plot(predictions, names = "TSpread")

2.4.18 Test the Direction of Causality

2.4.18.1 Test the Direction of Causality Using causality()

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd <-"1980-04-01"  
endd <-"2012-12-31"

TB3MS <- Quandl("FRED/TB3MS",start_date=startd, end_date=endd,type="ts")
TB10YS <- Quandl("FRED/GS10",start_date=startd, end_date=endd,type="ts")
GDP <- Quandl("FRED/GDPC96",start_date=startd, end_date=endd,type="ts", transform="rdiff")# note this is simple return, we need log return since we want to run regression

TSpread <- TB10YS - TB3MS 
TSpread <- aggregate(TSpread,nfrequency=4,FUN=mean)# aggregate monthly data to quarterly(averages)
GDPGrowth = 400*log(1+GDP)# annual log growth rate%

### VAR Model
library(vars)
VAR_Data<-na.omit(ts.union(GDPGrowth, TSpread)) #set up data for estimation
VARselect(VAR_Data, lag.max = 12, type = "const")$selection #obtain the best lag period
## AIC(n)  HQ(n)  SC(n) FPE(n) 
##      2      2      2      2
VAR_fit<-VAR(y = VAR_Data, p = 2)
summary(VAR_fit)
## 
## VAR Estimation Results:
## ========================= 
## Endogenous variables: GDPGrowth, TSpread 
## Deterministic variables: const 
## Sample size: 128 
## Log Likelihood: -381.122 
## Roots of the characteristic polynomial:
## 0.8208 0.8208 0.1958 0.1958
## Call:
## VAR(y = VAR_Data, p = 2)
## 
## 
## Estimation results for equation GDPGrowth: 
## ========================================== 
## GDPGrowth = GDPGrowth.l1 + TSpread.l1 + GDPGrowth.l2 + TSpread.l2 + const 
## 
##              Estimate Std. Error t value Pr(>|t|)    
## GDPGrowth.l1  0.29512    0.08356   3.532 0.000582 ***
## TSpread.l1   -0.87548    0.36949  -2.369 0.019373 *  
## GDPGrowth.l2  0.21663    0.08173   2.651 0.009089 ** 
## TSpread.l2    1.29865    0.37148   3.496 0.000658 ***
## const         0.48692    0.47198   1.032 0.304256    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 2.421 on 123 degrees of freedom
## Multiple R-Squared: 0.3111,  Adjusted R-squared: 0.2887 
## F-statistic: 13.88 on 4 and 123 DF,  p-value: 2.246e-09 
## 
## 
## Estimation results for equation TSpread: 
## ======================================== 
## TSpread = GDPGrowth.l1 + TSpread.l1 + GDPGrowth.l2 + TSpread.l2 + const 
## 
##               Estimate Std. Error t value Pr(>|t|)    
## GDPGrowth.l1  0.009302   0.017077   0.545 0.586944    
## TSpread.l1    1.057695   0.075515  14.006  < 2e-16 ***
## GDPGrowth.l2 -0.056374   0.016703  -3.375 0.000988 ***
## TSpread.l2   -0.218659   0.075920  -2.880 0.004691 ** 
## const         0.454647   0.096461   4.713 6.48e-06 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 0.4948 on 123 degrees of freedom
## Multiple R-Squared: 0.8304,  Adjusted R-squared: 0.8249 
## F-statistic: 150.6 on 4 and 123 DF,  p-value: < 2.2e-16 
## 
## 
## 
## Covariance matrix of residuals:
##           GDPGrowth TSpread
## GDPGrowth   5.86218 0.05959
## TSpread     0.05959 0.24486
## 
## Correlation matrix of residuals:
##           GDPGrowth TSpread
## GDPGrowth   1.00000 0.04974
## TSpread     0.04974 1.00000
### Granger Causality Tests
# H0: GDPGrowth does not Granger-cause TSpread (GDPGrowth is not the cause varaible) <=> H0: a21 = a23 = 0
causality(VAR_fit, cause = "GDPGrowth")$Granger
## 
##  Granger causality H0: GDPGrowth do not Granger-cause TSpread
## 
## data:  VAR object VAR_fit
## F-Test = 6.1452, df1 = 2, df2 = 246, p-value = 0.002487
# H0: TSpread does not ranger-cause GDPGrowth (TSpread is not the cause varaible) <=> H0: a12 = a14 = 0
causality(VAR_fit, cause = "TSpread")$Granger
## 
##  Granger causality H0: TSpread do not Granger-cause GDPGrowth
## 
## data:  VAR object VAR_fit
## F-Test = 7.1708, df1 = 2, df2 = 246, p-value = 0.00094

2.4.18.2 Test the Direction of Causality Using linearHypothesis()

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd <-"1980-04-01"  
endd <-"2012-12-31"

TB3MS <- Quandl("FRED/TB3MS",start_date=startd, end_date=endd,type="ts")
TB10YS <- Quandl("FRED/GS10",start_date=startd, end_date=endd,type="ts")
GDP <- Quandl("FRED/GDPC96",start_date=startd, end_date=endd,type="ts", transform="rdiff")# note this is simple return, we need log return since we want to run regression

TSpread <- TB10YS - TB3MS 
TSpread <- aggregate(TSpread,nfrequency=4,FUN=mean)# aggregate monthly data to quarterly(averages)
GDPGrowth = 400*log(1+GDP)# annual log growth rate%

### Model selection
VAR_Data<-na.omit(ts.union(GDPGrowth, TSpread)) #set up data for estimation
VARselect(VAR_Data, lag.max = 12, type = "const")$selection #obtain the best lag period
## AIC(n)  HQ(n)  SC(n) FPE(n) 
##      2      2      2      2
# Interpret: All the information criteria suggest using lags = 2 --> we need to set p=2 when estimating the mod


### Estimate VAR equations separately by OLS
library(dynlm)
VAR_EQ1<-dynlm(GDPGrowth ~ L(GDPGrowth, 1:2) + L(TSpread, 1:2), start=c(1981,1), end=c(2012,4))
VAR_EQ2<-dynlm(TSpread ~ L(GDPGrowth, 1:2) + L(TSpread, 1:2), start=c(1981,1), end=c(2012,4))
# Rename regressors for better readability
names(VAR_EQ1$coefficients) <- c("Intercept","Growth_t-1", "Growth_t-2", "TSpread_t-1", "TSpread_t-2")
names(VAR_EQ2$coefficients) <- names(VAR_EQ1$coefficients)

# Obtain Robust Coefficient
library(lmtest)
library(sandwich)
coeftest(VAR_EQ1, vcov. = sandwich)
## 
## t test of coefficients:
## 
##              Estimate Std. Error t value Pr(>|t|)   
## Intercept    0.486924   0.523771  0.9297 0.354373   
## Growth_t-1   0.295117   0.109974  2.6835 0.008288 **
## Growth_t-2   0.216634   0.086355  2.5086 0.013421 * 
## TSpread_t-1 -0.875477   0.361088 -2.4246 0.016781 * 
## TSpread_t-2  1.298647   0.395225  3.2858 0.001325 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
coeftest(VAR_EQ2, vcov. = sandwich)
## 
## t test of coefficients:
## 
##               Estimate Std. Error t value  Pr(>|t|)    
## Intercept    0.4546467  0.1214656  3.7430 0.0002778 ***
## Growth_t-1   0.0093019  0.0218258  0.4262 0.6707140    
## Growth_t-2  -0.0563737  0.0266037 -2.1190 0.0360996 *  
## TSpread_t-1  1.0576951  0.0984832 10.7399 < 2.2e-16 ***
## TSpread_t-2 -0.2186588  0.1088367 -2.0091 0.0467207 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
### Granger causality tests
library(car)
# H0: X does not Granger Cause Y
linearHypothesis(VAR_EQ1, 
                 hypothesis.matrix = c("TSpread_t-1", "TSpread_t-2"),
                 vcov. = sandwich)
## Linear hypothesis test
## 
## Hypothesis:
## TSpread_t - 0
## TSpread_t - 2 = 0
## 
## Model 1: restricted model
## Model 2: GDPGrowth ~ L(GDPGrowth, 1:2) + L(TSpread, 1:2)
## 
## Note: Coefficient covariance matrix supplied.
## 
##   Res.Df Df      F   Pr(>F)   
## 1    125                      
## 2    123  2 5.5884 0.004753 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
linearHypothesis(VAR_EQ2, 
                 hypothesis.matrix = c("Growth_t-1", "Growth_t-2"),
                 vcov. = sandwich)
## Linear hypothesis test
## 
## Hypothesis:
## Growth_t - 0
## Growth_t - 2 = 0
## 
## Model 1: restricted model
## Model 2: TSpread ~ L(GDPGrowth, 1:2) + L(TSpread, 1:2)
## 
## Note: Coefficient covariance matrix supplied.
## 
##   Res.Df Df      F  Pr(>F)  
## 1    125                    
## 2    123  2 3.3739 0.03746 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Interpret: Both Granger causality tests reject at the level of 5%, this is evidence in favor of the conjecture that the term spread has power in explaining GDP growth and vice versa

2.4.19 Impulse Response Function

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd <-"1980-04-01"  
endd <-"2012-12-31"

TB3MS <- Quandl("FRED/TB3MS",start_date=startd, end_date=endd,type="ts")
TB10YS <- Quandl("FRED/GS10",start_date=startd, end_date=endd,type="ts")
GDP <- Quandl("FRED/GDPC96",start_date=startd, end_date=endd,type="ts", transform="rdiff")# note this is simple return, we need log return since we want to run regression

TSpread <- TB10YS - TB3MS 
TSpread <- aggregate(TSpread,nfrequency=4,FUN=mean)# aggregate monthly data to quarterly(averages)
GDPGrowth = 400*log(1+GDP)# annual log growth rate%


### VAR Model
library(vars)
VAR_Data<-na.omit(ts.union(GDPGrowth, TSpread)) #set up data for estimation
VARselect(VAR_Data, lag.max = 12, type = "const")$selection #obtain the best lag period
## AIC(n)  HQ(n)  SC(n) FPE(n) 
##      2      2      2      2
VAR_fit<-VAR(y = VAR_Data, p = 2)
summary(VAR_fit)
## 
## VAR Estimation Results:
## ========================= 
## Endogenous variables: GDPGrowth, TSpread 
## Deterministic variables: const 
## Sample size: 128 
## Log Likelihood: -381.122 
## Roots of the characteristic polynomial:
## 0.8208 0.8208 0.1958 0.1958
## Call:
## VAR(y = VAR_Data, p = 2)
## 
## 
## Estimation results for equation GDPGrowth: 
## ========================================== 
## GDPGrowth = GDPGrowth.l1 + TSpread.l1 + GDPGrowth.l2 + TSpread.l2 + const 
## 
##              Estimate Std. Error t value Pr(>|t|)    
## GDPGrowth.l1  0.29512    0.08356   3.532 0.000582 ***
## TSpread.l1   -0.87548    0.36949  -2.369 0.019373 *  
## GDPGrowth.l2  0.21663    0.08173   2.651 0.009089 ** 
## TSpread.l2    1.29865    0.37148   3.496 0.000658 ***
## const         0.48692    0.47198   1.032 0.304256    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 2.421 on 123 degrees of freedom
## Multiple R-Squared: 0.3111,  Adjusted R-squared: 0.2887 
## F-statistic: 13.88 on 4 and 123 DF,  p-value: 2.246e-09 
## 
## 
## Estimation results for equation TSpread: 
## ======================================== 
## TSpread = GDPGrowth.l1 + TSpread.l1 + GDPGrowth.l2 + TSpread.l2 + const 
## 
##               Estimate Std. Error t value Pr(>|t|)    
## GDPGrowth.l1  0.009302   0.017077   0.545 0.586944    
## TSpread.l1    1.057695   0.075515  14.006  < 2e-16 ***
## GDPGrowth.l2 -0.056374   0.016703  -3.375 0.000988 ***
## TSpread.l2   -0.218659   0.075920  -2.880 0.004691 ** 
## const         0.454647   0.096461   4.713 6.48e-06 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 0.4948 on 123 degrees of freedom
## Multiple R-Squared: 0.8304,  Adjusted R-squared: 0.8249 
## F-statistic: 150.6 on 4 and 123 DF,  p-value: < 2.2e-16 
## 
## 
## 
## Covariance matrix of residuals:
##           GDPGrowth TSpread
## GDPGrowth   5.86218 0.05959
## TSpread     0.05959 0.24486
## 
## Correlation matrix of residuals:
##           GDPGrowth TSpread
## GDPGrowth   1.00000 0.04974
## TSpread     0.04974 1.00000
### IRF
# Consider the response of GDP growth to term spread shock
IRF_GDP<- irf(VAR_fit, impulse = "TSpread", response = "GDPGrowth", n.ahead = 20, boot = TRUE)
plot(IRF_GDP, ylab = "GDP growth", main = "Shock from Term spread")

#Interpret: The IRF estimates the effects from one unit shock to the error in the TSpread on future value of GDPGrowth. For example, IRF_GDP[21]=-0.025 implies that the influence comes from one unit shock to the error in the TSpread will cause 20 step-ahead GDPGrowth decrease by 0.025. Note that a positive shock to Term spread has an immediate negative impact on GDP growth, with growth falling to -0.432. By about the 5qtr growth turns positive again and by the 20th quarter the impact of the shock is largely dissipated.  


# Consider the response of term spread to GDP growth shock
IRF_TSpread <- irf(VAR_fit, impulse="GDPGrowth", response="TSpread", n.ahead = 20, boot = TRUE)
plot(IRF_TSpread, ylab = "Term spread", main = "Shock from GDP growth")

2.4.20 Event Study

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
# prepare library
library(rvest) # crawl data from html
library(Quandl)
library(quantmod)
library(PerformanceAnalytics)
library(here)
library("readxl")
library(tidyverse)
library(data.table)
library(plyr)
library(ggplot2)
# fetch DOWJIA ticker list from wiki
url <- "https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average"
DOWJIA <- url %>%
  xml2::read_html() %>%
  html_nodes(xpath='//*[@id="constituents"]') %>%
  html_table()

DOWJIA <- DOWJIA[[1]]
DOW_Tickers <- DOWJIA$Symbol

2.4.20.1 Load Data

### load data (Dow 30 Constituents)
Tickers<-c("^GSPC",DOW_Tickers)
Rf_Tickers<-'DTB4WK'

startd = "2018-12-01"
endd = "2021-04-15"

# pull stock price from yahoo 
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
##  [1] "^GSPC" "MMM"   "AXP"   "AMGN"  "AAPL"  "BA"    "CAT"   "CVX"   "CSCO" 
## [10] "KO"    "DIS"   "DOW"   "GS"    "HD"    "HON"   "IBM"   "INTC"  "JNJ"  
## [19] "JPM"   "MCD"   "MRK"   "MSFT"  "NKE"   "PG"    "CRM"   "TRV"   "UNH"  
## [28] "VZ"    "V"     "WBA"   "WMT"
getSymbols(Rf_Tickers,src='FRED')
## [1] "DTB4WK"
### stock Return
tickers = gsub("[[:punct:]]", "", Tickers)
Return = do.call(merge,lapply(tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Return = Return[-1,]
names(Return) = tickers

### Rf
DTB4WK = na.omit(DTB4WK)
Rf = DTB4WK[paste(startd,"/",endd,sep="")] # annual rate
Rf = (Rf/100+1)^(1/252)-1 # convert to daily date, business day
names(Rf) = "Rf"

### merge data
Data = merge(Return,Rf)
Data = na.omit(Data)

### excess return
NumCol = ncol(Data)
StocksEXRet = Data[,1:NumCol-1]
for (i in 1:ncol(StocksEXRet)){
StocksEXRet[,i]<- StocksEXRet[,i]-Data$Rf
}
Rf = Data$Rf

### log return
LogStocksEXRet = log(1+StocksEXRet)
LogRf = log(1+Rf)

# print data
head(cbind(LogStocksEXRet[,1:4],LogRf))
##                     GSPC           MMM          AXP          AMGN           Rf
## 2019-03-20 -0.0030434855 -0.0035455175 -0.017103285 -0.0020286947 9.450071e-05
## 2019-03-21  0.0106986043  0.0061735350  0.009338939  0.0039246006 9.643767e-05
## 2019-03-22 -0.0192543499 -0.0239965749 -0.021429039 -0.0275176874 9.566300e-05
## 2019-03-25 -0.0009343499 -0.0072054946 -0.003939690 -0.0006841096 9.488817e-05
## 2019-03-26  0.0070628327  0.0195445779  0.004115311  0.0088692926 9.488817e-05
## 2019-03-27 -0.0047500940 -0.0004808479 -0.004855072 -0.0105070326 9.450071e-05

2.4.20.2 Set Event Dates

# set parameters
event_date = as.Date("2020-01-06")
event_window = 10 
estimate_window = 250 
postevent_window = 30
T1 = event_date - event_window
T2 = event_date + event_window
T0 = T1 - estimate_window
T3 = T2 + postevent_window

### fit CAPM model
# estimate data
Estimate_Data = LogStocksEXRet[paste(T0,"/",T1-1,sep="")]

# CAPM regression
model<-lm(Estimate_Data[,2]~Estimate_Data[,1])
Coeff<- data.frame(model$coefficients)
Coeff = t(Coeff)

NumCols = ncol(Estimate_Data)
for (i in 3:NumCols){
  model<-lm(Estimate_Data[,i]~Estimate_Data[,1])
  coeff = data.frame(model$coefficients)
  coeff = t(coeff)
  coeff
  Coeff = rbind(Coeff,coeff)
}

# save betas for all tickers
Tickers<-DOW_Tickers
Coeff<-data.frame(Coeff,Tickers)
head(Coeff)
##                       X.Intercept. Estimate_Data...1. Tickers
## model.coefficients   -1.824562e-03          1.1560216     MMM
## model.coefficients.1 -7.993878e-05          1.1140161     AXP
## model.coefficients.2  1.449461e-03          0.6143373    AMGN
## model.coefficients.3  1.142450e-03          1.4775051    AAPL
## model.coefficients.4 -1.518523e-03          0.8504409      BA
## model.coefficients.5 -5.245565e-04          1.2804621     CAT
### predict "normal" return
Test_data = LogStocksEXRet[paste(T1,"/",T3,sep="")]
Prediction = Test_data[,-1]
for(i in 1:ncol(Prediction)){
  Prediction[,i] = Coeff[i,1]+Coeff[i,2]*Test_data[,1]
}

### abnormal return
AR = Test_data[,-1]
for(i in 1:ncol(AR)){
  AR[,i] = Test_data[,i+1]-Prediction[,i]
}

### Cumulative AR
CAR = cumsum(AR)

2.4.20.3 Plot CAR

### convert to long table
CAR_df = data.frame(CAR)
CAR_df$Date = index(CAR)
CAR_df=melt(setDT(CAR_df), measure.vars=list(c(1:30)), 
     variable.name='Ticker', value.name=c("CAR"))[, 
      Ticker:= DOW_Tickers[Ticker]][order(Date)]

# average CAR
AvgCAR <- ddply(CAR_df, "Date", summarise, Ticker = "AVERAGE",AvgCAR=mean(CAR))

### plot

ggplot(CAR_df, aes(x=Date, y=CAR, group=Ticker, color=Ticker)) +
    geom_line(size=0.8,alpha=0.8)+
    geom_line(data = AvgCAR, aes(x=Date,y=AvgCAR),size = 1.2, color = "black")+
    geom_vline(aes(xintercept = event_date),linetype="dashed",color = "darkred",size=1.5)+
    geom_hline(aes(yintercept = 0),linetype="dashed",color = "darkred",size=1.1)+
    annotate(geom="text", x=event_date+10, y=0.4, label="Jan6th",fontface =2,
           size =8,alpha = 0.8,color="darkred")+
    scale_x_date(date_breaks = "5 days", date_labels = "%b %d")+
    ggtitle("Cumulative Abnormal Log Excess Return")+ylab("CAR")

d<-data.frame(CAR=as.numeric(t(CAR[T2])))
d$Ticker = DOW_Tickers

titletoplot = paste0("CAR ",event_window," days after Jan6th")
ggplot(data = d,aes(x = Ticker, y=CAR,fill=Ticker))+
  geom_bar(stat="identity",alpha=0.5)+
  theme(axis.text.x = element_text(angle = 45, hjust = 1))+
  labs(y="CAR",x='Ticker',title = titletoplot)

2.4.20.4 Summarize J Stats Across All Assets

### calculate J-stat
Avg_event_CAR = rowMeans(CAR[paste(T1,"/",T2,sep="")])
JCAR = tail(Avg_event_CAR,1)
event_AR = AR[paste(T1,"/",T2,sep="")]
Jsigma = sqrt(sum(sapply(event_AR,FUN=var))/30^2)

Jstat = JCAR/Jsigma
pvalues = pnorm(q=abs(Jstat),lower.tail = FALSE)*2

# print result
print(cbind(CAR = JCAR, Jstat,Pvalue = pvalues))
##               CAR     Jstat      Pvalue
## [1,] -0.004618422 -3.135429 0.001716032

2.5 Classification

2.5.1 Simple Logistic

Use a logistic to model recessions with 10yr-2yr Treasury Spread. Note: This is a simplified version of the typical models used.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(fredr)
library(xts)
fred_api_key<-read.csv("../data/fredkey.csv",stringsAsFactors=FALSE)
fredr_set_key(fred_api_key$Key)
TenTwo<-fredr(
  series_id = "T10Y2Y",
  frequency = "m", # monthly
  observation_start = as.Date("1977-01-01"),
  observation_end = as.Date("2022-01-01"),
  units = "lin"
)
Recession<-fredr(
  series_id = "USREC",
  frequency = "m", # monthly
  observation_start = as.Date("1977-01-01"),
  observation_end = as.Date("2022-01-01"),
  units = "lin"
)
MyData<-data.frame(TenTwo=TenTwo$value,Recession=Recession$value,row.names = Recession$date)
MyData<-as.xts(MyData)
MyData$TenTwoLag4 <- lag.xts(MyData$TenTwo,k=4)
MyData<-na.omit(MyData)
logit1<-glm(Recession~TenTwoLag4,data=MyData,family = 'binomial')
summary(logit1)
## 
## Call:
## glm(formula = Recession ~ TenTwoLag4, family = "binomial", data = MyData)
## 
## Deviance Residuals: 
##     Min       1Q   Median       3Q      Max  
## -0.9476  -0.5342  -0.4136  -0.2796   2.6292  
## 
## Coefficients:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  -1.5604     0.1633  -9.557  < 2e-16 ***
## TenTwoLag4   -0.8068     0.1715  -4.704 2.55e-06 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 367.66  on 536  degrees of freedom
## Residual deviance: 342.96  on 535  degrees of freedom
## AIC: 346.96
## 
## Number of Fisher Scoring iterations: 5

2.5.2 Convert Log Odds to Prob

Reference: https://sebastiansauer.github.io/convert_logit2prob/

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(fredr)
fred_api_key<-read.csv("../data/fredkey.csv",stringsAsFactors=FALSE)
fredr_set_key(fred_api_key$Key)
TenTwo<-fredr(
  series_id = "T10Y2Y",
  frequency = "m", # monthly
  observation_start = as.Date("1977-01-01"),
  observation_end = as.Date("2022-01-01"),
  units = "lin"
)
Recession<-fredr(
  series_id = "USREC",
  frequency = "m", # monthly
  observation_start = as.Date("1977-01-01"),
  observation_end = as.Date("2022-01-01"),
  units = "lin"
)
MyData<-data.frame(TenTwo=TenTwo$value,Recession=Recession$value,row.names = Recession$date)

library(xts)
MyData<-as.xts(MyData)
MyData$TenTwoLag4 <- lag.xts(MyData$TenTwo,k=4)
MyData<-na.omit(MyData)
logit1<-glm(Recession~TenTwoLag4,data=MyData,family = 'binomial')

logit2prob <- function(logit){
  odds <- exp(logit)
  prob <- odds / (1 + odds) 
  return(prob)
}
logit2prob(coef(logit1))
## (Intercept)  TenTwoLag4 
##   0.1735834   0.3085671

Where .3 is read as 30% probability

2.5.3 Logit Marginal Effects

Use a logistic to model recessions with 10yr-2yr Treasury Spread. Note: This is a simplified version of the typical models used.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(fredr)
library(xts)
fred_api_key<-read.csv("../data/fredkey.csv",stringsAsFactors=FALSE)
fredr_set_key(fred_api_key$Key)
TenTwo<-fredr(
  series_id = "T10Y2Y",
  frequency = "m", # monthly
  observation_start = as.Date("1977-01-01"),
  observation_end = as.Date("2022-01-01"),
  units = "lin"
)
Recession<-fredr(
  series_id = "USREC",
  frequency = "m", # monthly
  observation_start = as.Date("1977-01-01"),
  observation_end = as.Date("2022-01-01"),
  units = "lin"
)
MyData<-data.frame(TenTwo=TenTwo$value,Recession=Recession$value,row.names = Recession$date)
MyData<-as.xts(MyData)
MyData$TenTwoLag4 <- lag.xts(MyData$TenTwo,k=4)
MyData<-na.omit(MyData)
logit1<-glm(Recession~TenTwoLag4,data=MyData,family = 'binomial')
newdata<-data.frame(TenTwoLag4=tail(TenTwo$value,n=1)) #last observed value of 10-2yr
PRED<-predict(logit1, newdata = newdata, type = 'response')
coeffs<-coef(logit1)
TenTwoMean=mean(TenTwo$value)
MgrlEffectTenTwo =(exp(-TenTwoMean)/(1+exp(-TenTwoMean))^2)*coeffs[2]
print(paste('For every 1 percentage point (i.e. 100bps) increase in the 10-2yr spread, the probability of a recession changes by ', MgrlEffectTenTwo,' %'))
## [1] "For every 1 percentage point (i.e. 100bps) increase in the 10-2yr spread, the probability of a recession changes by  -0.163825499802387  %"

2.5.4 Logit Confusion Matrix

Use a logistic model to model recessions with 10yr-2yr Treasury Spread

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(fredr)
fred_api_key<-read.csv("../data/fredkey.csv",stringsAsFactors=FALSE)
fredr_set_key(fred_api_key$Key)
TenTwo<-fredr(
  series_id = "T10Y2Y",
  frequency = "m", # monthly
  observation_start = as.Date("1977-01-01"),
  observation_end = as.Date("2022-01-01"),
  units = "lin"
)
Recession<-fredr(
  series_id = "USREC",
  frequency = "m", # monthly
  observation_start = as.Date("1977-01-01"),
  observation_end = as.Date("2022-01-01"),
  units = "lin"
)
MyData<-data.frame(TenTwo=TenTwo$value,Recession=Recession$value,row.names = Recession$date)

library(xts)
MyData<-as.xts(MyData)
MyData$TenTwoLag4 <- lag.xts(MyData$TenTwo,k=4)
MyData<-na.omit(MyData)
logit1<-glm(Recession~TenTwoLag4,data=MyData,family = 'binomial')

Pred<-predict(logit1,MyData,type="response")
cutoff = .3
Pred2<-ifelse(Pred>=cutoff,1,0)
library(caret)
Actual<-factor(MyData$Recession)
Predicted<-factor(Pred2)
C<-confusionMatrix(data=Predicted,reference=Actual)
C
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction   0   1
##          0 477  48
##          1   2  10
##                                           
##                Accuracy : 0.9069          
##                  95% CI : (0.8791, 0.9301)
##     No Information Rate : 0.892           
##     P-Value [Acc > NIR] : 0.148           
##                                           
##                   Kappa : 0.2582          
##                                           
##  Mcnemar's Test P-Value : 1.966e-10       
##                                           
##             Sensitivity : 0.9958          
##             Specificity : 0.1724          
##          Pos Pred Value : 0.9086          
##          Neg Pred Value : 0.8333          
##              Prevalence : 0.8920          
##          Detection Rate : 0.8883          
##    Detection Prevalence : 0.9777          
##       Balanced Accuracy : 0.5841          
##                                           
##        'Positive' Class : 0               
## 

Note: Accuracy = (477+10)/537 (i.e. % of results that are correctly classified) Sensitivity = 477/(477+2) (i.e. % of actual 0’s correctly classified) Specificity = 10/(48+10) (i.e. % of actual 1’s correctly classified)

2.5.5 Logit Prediction

Use a logistic to model recessions with 10yr-2yr Treasury Spread. Note: This is a simplified version of the typical models used.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(fredr)
library(xts)
fred_api_key<-read.csv("../data/fredkey.csv",stringsAsFactors=FALSE)
fredr_set_key(fred_api_key$Key)
TenTwo<-fredr(
  series_id = "T10Y2Y",
  frequency = "m", # monthly
  observation_start = as.Date("1977-01-01"),
  observation_end = as.Date("2022-01-01"),
  units = "lin"
)
Recession<-fredr(
  series_id = "USREC",
  frequency = "m", # monthly
  observation_start = as.Date("1977-01-01"),
  observation_end = as.Date("2022-01-01"),
  units = "lin"
)
MyData<-data.frame(TenTwo=TenTwo$value,Recession=Recession$value,row.names = Recession$date)
MyData<-as.xts(MyData)
MyData$TenTwoLag4 <- lag.xts(MyData$TenTwo,k=4)
MyData<-na.omit(MyData)
logit1<-glm(Recession~TenTwoLag4,data=MyData,family = 'binomial')
newdata<-data.frame(TenTwoLag4=tail(TenTwo$value,n=1)) #last observed value of 10-2yr
#newdata
PRED<-predict(logit1, newdata = newdata, type = 'response')
coeffs<-coef(logit1)
ByHand = 1/(1+exp(-(coeffs[1]+coeffs[2]*newdata)))
#ByHand
print(paste('Predicted Probability of Recession given 10-2yr = ', as.numeric(newdata[1])))
## [1] "Predicted Probability of Recession given 10-2yr =  0.78"
print(paste('Via predict ',PRED,' | By Hand ',ByHand))
## [1] "Via predict  0.100673319918696  | By Hand  0.100673319918696"

2.5.6 Yield Curve Recession

Use a logistic to model recessions with 10yr-2yr Treasury Spread. Note: This is a simplified version of the typical implementation. Recession in Next 3 mths = f(10-2yr Treasury Spread @ time t)

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(fredr)
library(xts)

fred_api_key<-read.csv("../data/fredkey.csv",stringsAsFactors=FALSE)
fredr_set_key(fred_api_key$Key)
TenTwo<-fredr(
  series_id = "T10Y2Y",
  frequency = "m", # monthly
  observation_start = as.Date("1977-01-01"),
  observation_end = as.Date("2022-01-01"),
  units = "lin"
)
Recession<-fredr(
  series_id = "USREC",
  frequency = "m", # monthly
  observation_start = as.Date("1977-01-01"),
  observation_end = as.Date("2022-01-01"),
  units = "lin"
)
MyData<-data.frame(TenTwo=TenTwo$value,Recession=Recession$value,row.names = Recession$date)
MyData<-as.xts(MyData)

USRECLEADS<-lag.xts(MyData$Recession,k=-3:-1)
USRECLEADS$RowSum<-rowSums(USRECLEADS)
USRECLEADS$RecNext3Mths<-ifelse(USRECLEADS$RowSum>=1,1,0)
MyData$RecNext3Mths<-USRECLEADS$RecNext3Mths
MyData<-na.omit(MyData)


logit1<-glm(RecNext3Mths~TenTwo,data=MyData,family = 'binomial')
summary(logit1)
## 
## Call:
## glm(formula = RecNext3Mths ~ TenTwo, family = "binomial", data = MyData)
## 
## Deviance Residuals: 
##     Min       1Q   Median       3Q      Max  
## -0.8367  -0.5824  -0.4896  -0.3788   2.3567  
## 
## Coefficients:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  -1.5011     0.1603  -9.364  < 2e-16 ***
## TenTwo       -0.5134     0.1489  -3.448 0.000565 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 415.98  on 537  degrees of freedom
## Residual deviance: 403.50  on 536  degrees of freedom
## AIC: 407.5
## 
## Number of Fisher Scoring iterations: 5

3 Forecast

3.1 Forecast via Density

If we know the distribution, then we can “predict” what might happen next.

3.1.1 Prob(Ret<.01)

What is the chance of getting a return smaller than .01? Let’s calibrate the mean and variance of Ford.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(corrplot)
Tickers<-'F'
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
F$Returns = F$F.Adjusted/lag.xts(F$F.Adjusted,k=1)-1
mu = mean(na.omit(F$Returns))
sigma = sd(na.omit(F$Returns))
x=.01
ProbX = pnorm(x,mean = mu, sd = sigma)
print(ProbX)
## [1] 0.6819564

3.1.2 Prob(Ret>.01)

What is the chance of getting a return greater than .01? Let’s calibrate the mean and variance of Ford.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(corrplot)
Tickers<-'F'
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
F$Returns = F$F.Adjusted/lag.xts(F$F.Adjusted,k=1)-1
mu = mean(na.omit(F$Returns))
sigma = sd(na.omit(F$Returns))
x=.01
ProbX = 1-pnorm(x,mean = mu, sd = sigma)
print(ProbX)
## [1] 0.3180436

3.1.3 Prob(.01<Ret<.05)

What is the chance of getting a return between .01 and .05? Let’s calibrate the mean and variance of Ford.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(corrplot)
Tickers<-'F'
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
F$Returns = F$F.Adjusted/lag.xts(F$F.Adjusted,k=1)-1
mu = mean(na.omit(F$Returns))
sigma = sd(na.omit(F$Returns))
x=.01
y = .05
ProbX = pnorm(x,mean = mu, sd = sigma)
ProbY = pnorm(y,mean = mu, sd = sigma)
Prob = ProbY - ProbX
print(Prob)
## [1] 0.3100045

3.1.4 Prob(Ret<?)=70%

If there is a 70% chance of getting a return less than y. What is y? Let’s calibrate the mean and variance of Ford.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(corrplot)
Tickers<-'F'
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
F$Returns = F$F.Adjusted/lag.xts(F$F.Adjusted,k=1)-1
mu = mean(na.omit(F$Returns))
sigma = sd(na.omit(F$Returns))
p = .7
X = qnorm(p,mean = mu, sd = sigma)
print(X)
## [1] 0.01105946

3.1.5 Prob(Ret>?)=70%

If there is a 70% chance of getting a return greater than y. What is y? Let’s calibrate the mean and variance of Ford.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(corrplot)
Tickers<-'F'
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
F$Returns = F$F.Adjusted/lag.xts(F$F.Adjusted,k=1)-1
mu = mean(na.omit(F$Returns))
sigma = sd(na.omit(F$Returns))
p = .7
X = qnorm(1-p,mean = mu, sd = sigma)
print(X)
## [1] -0.01063285

3.1.6 Prob(-.01<Ret<Y)=25%

If we have a 25% prob of getting return between -0.01 and y. What is y? Let’s calibrate the mean and variance of Ford.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(corrplot)
Tickers<-'F'
startd = "2015-01-01"
endd = "2021-04-30"
TickersReturned<-getSymbols(Tickers,from=startd,to=endd,src='yahoo')
F$Returns = F$F.Adjusted/lag.xts(F$F.Adjusted,k=1)-1
mu = mean(na.omit(F$Returns))
sigma = sd(na.omit(F$Returns))
p = .25
x = -.01
ProbX = pnorm(x,mean = mu, sd = sigma)
y = qnorm(p+ProbX,mean = mu, sd = sigma)
print(y)
## [1] 0.003373701

3.2 Simulation

3.2.1 Simulate Returns

Simulate returns calibrated to MSFT daily returns. We assume normal distribution.

knitr::opts_chunk$set(echo = TRUE)
rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return<-MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
MSFT = na.omit(MSFT)
MeanMSFT=mean(MSFT$Return)
SigmaMSFT=sd(MSFT$Return)
T = 1000
SimRet<-rnorm(T,mean=MeanMSFT,sd=SigmaMSFT)
plot(SimRet,type="l")

3.2.2 Simulate Returns with Seed

Simulate returns calibrated to MSFT daily returns and using a specific seed. We assume normal distribution.

knitr::opts_chunk$set(echo = TRUE)
rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return<-MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
MSFT = na.omit(MSFT)
MeanMSFT=mean(MSFT$Return)
SigmaMSFT=sd(MSFT$Return)
T = 1000
set.seed(10)
SimRet10<-rnorm(T,mean=MeanMSFT,sd=SigmaMSFT)
set.seed(11)
SimRet11<-rnorm(T,mean=MeanMSFT,sd=SigmaMSFT)
todisplay<-cbind(SimRet10,SimRet11)
head(todisplay)
##          SimRet10     SimRet11
## [1,]  0.001824892 -0.009217571
## [2,] -0.001851214  0.001967015
## [3,] -0.023348022 -0.025977858
## [4,] -0.009364917 -0.023190886
## [5,]  0.006819338  0.022826690
## [6,]  0.008544206 -0.015431139

3.2.3 Simulate Multiple Return Paths

Simulate multiple return paths calibrated to MSFT daily returns. We assume normal distributions.

knitr::opts_chunk$set(echo = TRUE)
rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return<-MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
MSFT = na.omit(MSFT)
H = 2  # of paths to simulate
Means=rep(mean(MSFT$Return),H)
Sigmas=rep(sd(MSFT$Return),H)
Moments=cbind(Means,Sigmas)
T = 1000 # size of each path
func<-function(x) rnorm(T,mean=Moments[1],sd=Moments[1])
SimRet<-as.data.frame(apply(Moments,1,FUN=func))
par(mfrow=c(1,2))
plot(SimRet[,1],type="l")
plot(SimRet[,2],type="l")

3.2.4 Simulate Correlated Returns

Simulate normal returns calibrated to MSFT & GE daily returns.

knitr::opts_chunk$set(echo = TRUE)
rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
Tickers = c('MSFT','GE') # asset tickers
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(Tickers,from = start_date,to = end_date, src=data_src)
## [1] "MSFT" "GE"
library(PerformanceAnalytics)
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
Means<-as.numeric(lapply(Returns,FUN=mean))
Means
## [1]  0.0014854173 -0.0004071207
V<-cov(Returns)
V
##              MSFT           GE
## MSFT 0.0003279361 0.0001634275
## GE   0.0001634275 0.0007530286
library(mvtnorm)
T = 10000
SimReturns<-rmvnorm(T,Means,V)
cov(SimReturns)
##              [,1]         [,2]
## [1,] 0.0003277855 0.0001548615
## [2,] 0.0001548615 0.0007441123
apply(SimReturns,2,FUN=mean)
## [1]  0.0016385866 -0.0006632703

3.2.5 Simulate Portofolio Correlated Return

3.2.6 Sim. Port. Correlated Ret.

Simulate normal returns calibrated to MSFT & GE daily returns.

knitr::opts_chunk$set(echo = TRUE)
rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
Tickers = c('MSFT','GE') # asset tickers
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(Tickers,from = start_date,to = end_date, src=data_src)
## [1] "MSFT" "GE"
library(PerformanceAnalytics)
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
Means<-as.numeric(lapply(Returns,FUN=mean))
V<-cov(Returns)
C<-cor(Returns)
TargetMean=Means[1]+Means[2]
TargetMean
## [1] 0.001078296
TargetVariance=V[1,1]+V[2,2]+2*C[1,2]*sqrt(V[1,1])*sqrt(V[2,2])
sqrt(TargetVariance)
## [1] 0.03752092
T = 10000
Portfolio<-rnorm(T,mean=TargetMean,sd=sqrt(TargetVariance))
plot(Portfolio,type="l")

mean(Portfolio)
## [1] 0.001330043
sd(Portfolio)
## [1] 0.03716606

3.2.7 Geometric Random Walk

Let’s create a random walk calibrated to the mean and variance of MSFT historical returns.

knitr::opts_chunk$set(echo = TRUE)
rm(list=ls()) # clear workspace
cat("\014")  # clear console
library("tidyquant") 
ticker = 'MSFT' # asset ticker
start_date = '2017-01-01' # data start date
end_date = '2021-03-01' # data end date
data_src = 'yahoo' # data source
getSymbols(ticker,from = start_date,to = end_date, src=data_src)
## [1] "MSFT"
library(PerformanceAnalytics)
MSFT$Return<-MSFT$MSFT.Adjusted/lag.xts(MSFT$MSFT.Adjusted,k=1) - 1 # use lag.xts to obtain the lag for xts data
MSFT<-na.omit(MSFT)
MSFTMean=mean(MSFT$Return)
MSFTSd=sd(MSFT$Return)
T=dim(MSFT$Return)[1]
r<-as.data.frame(rnorm(T,mean=MSFTMean,sd=MSFTSd))
colnames(r)<-c('r')
StartPrice = 100
Price<-r
Price<-StartPrice*(exp(cumsum(Price)))
Price<-rbind(StartPrice,Price)
colnames(Price)<-c('Price')

plot(Price$Price,ylab = 'Geometric Random Walk')

3.3 Time Series Forecasting

3.3.1 ARIMA One-Step Ahead

### Houesekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
startd<-"1990-01-01"
endd<-"2020-12-31"
Ticker = "GDPC1"
getSymbols(Ticker,from=startd,to=endd,src='FRED')
## [1] "GDPC1"
Growth<-diff(log(GDPC1))
Growth<-na.omit(Growth)
colnames(Growth)=c("Growth")
# Split Data
Train <-Growth["/2014"]
Test<-Growth["2015/"]
### Dynamic One-Step Ahead Forecast with ARIMA Model
library(forecast)
fit<-auto.arima(Train)

# Make 1-step ahead forecast
StepNum = 1 
ARIMAForecast <- forecast(fit, h = StepNum) # h is the forecast step number
ARIMAForecast  
##     Point Forecast        Lo 80      Hi 80       Lo 95      Hi 95
## 272    0.007302312 -0.004084941 0.01868956 -0.01011299 0.02471761

3.3.2 ARIMA Multi-Step Ahead

### Houesekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
#library(Quandl)
#Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
#Quandl.api_key(Quandl_key)
#startd<-"1990-01-01"
#endd<-"2018-12-31"
#freq<-"quarterly"
#GDP <- Quandl("FRED/GDPC1", type="ts",start_date=startd, end_date=endd)
#GDPGrowth <- diff(log(GDP)) # calculate log growth rate
#GDPGrowth <- na.omit(GDPGrowth) # get rid of the NAs

library(quantmod)
startd<-"1990-01-01"
endd<-"2020-12-31"
Ticker = "GDPC1"
getSymbols(Ticker,from=startd,to=endd,src='FRED')
## [1] "GDPC1"
Growth<-diff(log(GDPC1))
Growth<-na.omit(Growth)
colnames(Growth)=c("Growth")
# Split Data
Train <-Growth["/2014"]
Test<-Growth["2015/"]
### Dynamic Multi-Step Ahead Forecast with ARIMA Model
library(forecast)
fit<-auto.arima(Train)

# Make 5-step ahead forecast
StepNum = 5
ARIMAForecast <- forecast(fit, h = StepNum) # h is the forecast step number
ARIMAForecast  
##     Point Forecast        Lo 80      Hi 80       Lo 95      Hi 95
## 272    0.007302312 -0.004084941 0.01868956 -0.01011299 0.02471761
## 273    0.006537313 -0.005430953 0.01850558 -0.01176657 0.02484120
## 274    0.007493945 -0.004860405 0.01984829 -0.01140040 0.02638829
## 275    0.007738037 -0.004641031 0.02011711 -0.01119411 0.02667019
## 276    0.007800320 -0.004580357 0.02018100 -0.01113429 0.02673493

3.3.3 GARCH One-Step Ahead

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Date
library(quantmod)
startd <-"2012-01-01"  
endd <-"2015-12-31"
ticker <- "^GSPC"
Prices <- Ad(getSymbols(ticker, from = startd, to = endd, auto.assign = FALSE)) # get the adjusted closed price
Returns <- diff(log(Prices)) # get daily log-return
Returns <- na.omit(Returns)
names(Returns)<-"Actual Return"

# Remove mean from data
# Note: means of daily stock returns are VERY small, this step does not matter much
Returns <- Returns - mean(Returns)

# Squared returns/volatility target
SqReturns <- Returns^2

# Split training and test data
Train <- Returns["/2015-11-29"] 
Test <- Returns["2015-11-30/"]
### Specify Model: GARCH(1,1)
library(rugarch)
spec <- ugarchspec(variance.model=list(model="sGARCH", garchOrder=c(1,1)),
                   mean.model=list(include.mean = F, armaOrder=c(0,0)), 
                   distribution.model="norm")

# Estimate coefficients
fit <- ugarchfit(spec = spec, data = Returns, out.sample = length(Test)) # note that out-of-sample data is excluded by indicating out.sample = length(Test)
coef(fit)
##        omega       alpha1        beta1 
## 7.453319e-06 1.503898e-01 7.315826e-01
### Make Static One-Step Ahead Forecast with GARCH Model
Forecast <- ugarchforecast(fitORspec = fit, n.ahead = 1, n.roll=length(Test)-1) 

# Forecast for volatility(sigma)
Forecast_Volatility <- data.frame(t(sigma(Forecast)))
names(Forecast_Volatility) <- rownames(sigma(Forecast))
Forecast_Volatility # Note that the date shown is when the forecast was made
##                    T+1
## 2015-11-27 0.006317410
## 2015-11-30 0.006372426
## 2015-12-01 0.007255539
## 2015-12-02 0.008122458
## 2015-12-03 0.009452887
## 2015-12-04 0.011490110
## 2015-12-07 0.010605657
## 2015-12-08 0.009853298
## 2015-12-09 0.009418555
## 2015-12-10 0.008533645
## 2015-12-11 0.011019910
## 2015-12-14 0.009951533
## 2015-12-15 0.009756998
## 2015-12-16 0.010309862
## 2015-12-17 0.011043903
## 2015-12-18 0.012157083
## 2015-12-21 0.011114263
## 2015-12-22 0.010401170
## 2015-12-23 0.010381049
## 2015-12-24 0.009324293
## 2015-12-28 0.008492470
## 2015-12-29 0.008691666
### Plot Forecasts
# Variance
PlotVariance = cbind(xts(Forecast_Volatility^2, order.by=index(Test)),
                       sigma(fit)^2, SqReturns)
PlotVariance = 252*PlotVariance
colnames(PlotVariance) <- c("Forecast", "ModelFitted", "Proxy(ret^2)")
# Plot
{plot(PlotVariance, col = c("red","blue","black"), lwd = 2, ylab = "Annual Variance",
     main = "Dynamic One-Step Ahead Forecast \n with GARCH(1,1) Model", legend.loc = "topleft")
addEventLines(xts("training", index(Test)[1]), srt=90, pos=2, lty = 2, lwd = 2, col = "blue") }

### MSE for GARCH 
MSE <- rbind("GARCH(1,1)" = c(mean((SqReturns - PlotVariance$ModelFitted)^2,na.rm = T),
                              mean((SqReturns - PlotVariance$Forecast)^2, na.rm = T)))
colnames(MSE) <- c("In-sample MSE", "Out-of-sample MSE")
print(MSE)
##            In-sample MSE Out-of-sample MSE
## GARCH(1,1)   0.000355409      0.0005995481

3.3.4 GARCH Multi-Step Ahead

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Date
library(quantmod)
startd <-"2012-01-01"  
endd <-"2015-12-31"
ticker <- "^GSPC"
Prices <- Ad(getSymbols(ticker, from = startd, to = endd, auto.assign = FALSE)) # get the adjusted closed price
Returns <- diff(log(Prices)) # get daily log-return
Returns <- na.omit(Returns)
names(Returns)<-"Actual Return"

# Remove mean from data
# Note: means of daily stock returns are VERY small, this step does not matter much
Returns <- Returns - mean(Returns)

# Squared returns/volatility target
SqReturns <- Returns^2

# Split training and test data
Train <- Returns["/2015-11-29"] 
Test <- Returns["2015-11-30/"]
### Specify model: GARCH(1,1)
library(rugarch)
spec <- ugarchspec(variance.model=list(model="sGARCH", garchOrder=c(1,1)),
                   mean.model=list(include.mean = F, armaOrder=c(0,0)), 
                   distribution.model="norm")

# Estimate coefficients
fit <- ugarchfit(spec = spec, data = Returns, out.sample = length(Test)) # note that out-of-sample data is excluded by indicating out.sample = length(Test)
coef(fit)
##        omega       alpha1        beta1 
## 7.453319e-06 1.503898e-01 7.315826e-01
### Make Static 5-Step Ahead Forecast
StepNum = 5
StaticForecast <- ugarchforecast(fitORspec = fit, n.ahead = StepNum, n.roll=length(Test)-StepNum) # n.roll is the number of days you want to forecast


# Forecast for volatility(sigma)
StaticForecast_Volatility <- data.frame(t(sigma(StaticForecast)))
names(StaticForecast_Volatility) <- rownames(sigma(StaticForecast))
StaticForecast_Volatility # Note that the date shown is when the forecast was made, so 0.00771 is the one-step forecast made in 2014-12-31, which is the forecast for 2015-01-02 SP500 return. The first 5-step ahead forecast is made for 2015-01-08, which is made in 2014-12-31.
##                    T+1         T+2         T+3         T+4         T+5
## 2015-11-27 0.006317410 0.006530892 0.006713545 0.006870612 0.007006219
## 2015-11-30 0.006372426 0.006577864 0.006753870 0.006905380 0.007036301
## 2015-12-01 0.007255539 0.007340494 0.007414615 0.007479377 0.007536034
## 2015-12-02 0.008122458 0.008101904 0.008083733 0.008067672 0.008053481
## 2015-12-03 0.009452887 0.009287830 0.009139781 0.009007187 0.008888601
## 2015-12-04 0.011490110 0.011130751 0.010803892 0.010507176 0.010238346
## 2015-12-07 0.010605657 0.010327514 0.010075829 0.009848513 0.009643581
## 2015-12-08 0.009853298 0.009647891 0.009463028 0.009296934 0.009147941
## 2015-12-09 0.009418555 0.009257017 0.009112169 0.008982478 0.008866521
## 2015-12-10 0.008533645 0.008466480 0.008406797 0.008353804 0.008306785
## 2015-12-11 0.011019910 0.010703208 0.010415896 0.010155752 0.009920652
## 2015-12-14 0.009951533 0.009736411 0.009542656 0.009368444 0.009212060
## 2015-12-15 0.009756998 0.009561183 0.009385090 0.009226991 0.009085270
## 2015-12-16 0.010309862 0.010059872 0.009834116 0.009630614 0.009447494
## 2015-12-17 0.011043903 0.010724996 0.010435644 0.010173616 0.009936782
## 2015-12-18 0.012157083 0.011739001 0.011357498 0.011010058 0.010694262
## 2015-12-21 0.011114263 0.010788910 0.010493590 0.010226049 0.009984138
## 2015-12-22 0.010401170 0.010142432 0.009908626 0.009697739 0.009507861
#  5-step ahead volatility forecasts over the test set
FiveStep.Vol<-xts(StaticForecast_Volatility$`T+5`,order.by=
                    index(Test[StepNum:length(Test)]))
data.frame(FiveStep.Vol)
##            FiveStep.Vol
## 2015-12-04  0.007006219
## 2015-12-07  0.007036301
## 2015-12-08  0.007536034
## 2015-12-09  0.008053481
## 2015-12-10  0.008888601
## 2015-12-11  0.010238346
## 2015-12-14  0.009643581
## 2015-12-15  0.009147941
## 2015-12-16  0.008866521
## 2015-12-17  0.008306785
## 2015-12-18  0.009920652
## 2015-12-21  0.009212060
## 2015-12-22  0.009085270
## 2015-12-23  0.009447494
## 2015-12-24  0.009936782
## 2015-12-28  0.010694262
## 2015-12-29  0.009984138
## 2015-12-30  0.009507861
### Plot 5-Step Ahead Forecasts
# Variance
PlotVariance = cbind(FiveStep.Vol**2, sigma(fit)**2, SqReturns)
PlotVariance = 252*PlotVariance
colnames(PlotVariance) <- c("Forecast", "ModelFitted", "Proxy(ret^2)")
# Plot
{plot(PlotVariance, col = c("red","blue","black"), lwd = 2, ylab = "Annual Variance",
     main = "Static 5-Step Ahead Forecast \n with GARCH(1,1) Model", legend.loc = "topleft")
addEventLines(xts("training", index(Test)[1]), srt=90, pos=2, lty = 2, lwd = 2, col = "blue") }

### MSE for GARCH 
MSE <- rbind("GARCH(1,1)" = c(mean((SqReturns - PlotVariance$ModelFitted)**2,na.rm = T),
                              mean((SqReturns - PlotVariance$Forecast)**2, na.rm = T)))
colnames(MSE) <- c("In-sample MSE", "Out-of-sample MSE")
print(MSE)
##            In-sample MSE Out-of-sample MSE
## GARCH(1,1)   0.000355409      0.0004489248

3.3.5 ARMA-GARCH

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Date
library(quantmod)
startd <-"2012-01-01"  
endd <-"2015-12-31"
ticker <- "^GSPC"
Prices <- Ad(getSymbols(ticker, from = startd, to = endd, auto.assign = FALSE)) # get the adjusted closed price
Returns <- diff(log(Prices)) # get daily log-return
Returns <- na.omit(Returns)
names(Returns)<-"Actual Return"

# Split data
Train <- Returns["/2014"] 
Test <- Returns["2015/"]
### Specify Model: ARMA(1,1)-GARCH(1,1)
library(rugarch)
spec <- ugarchspec(variance.model=list(model="sGARCH", garchOrder=c(1,1)),
                   mean.model=list(armaOrder=c(1,1)), 
                   distribution.model="norm")

# Estimate coefficients
fit <- ugarchfit(spec = spec, data = Returns, out.sample = length(Test)) # note that out-of-sample data is excluded by indicating out.sample = length(Test)
coef(fit)
##            mu           ar1           ma1         omega        alpha1 
##  8.338081e-04 -9.280023e-01  9.149296e-01  7.539274e-06  1.539060e-01 
##         beta1 
##  7.153080e-01
### Make Static Forecast with ARMA(1,1)-GARCH(1,1) Model
StaticForecast <- ugarchforecast(fitORspec = fit, n.ahead = 1, n.roll=length(Test)-1) 

# Forecast for returns
StaticForecast_Returns <- data.frame(t(fitted(StaticForecast)))
names(StaticForecast_Returns) <- rownames(fitted(StaticForecast))
StaticForecast_Returns # Note that the date shown is when the forecast was made, so 0.00112 is the one-step forecast made on 2014-12-31, which is the forecast for 2015-01-02 Returns
##                     T+1
## 2014-12-31 0.0011174572
## 2015-01-02 0.0005896342
## 2015-01-05 0.0013092652
## 2015-01-06 0.0005264803
## 2015-01-07 0.0009747353
## 2015-01-08 0.0004839885
## 2015-01-09 0.0012750934
## 2015-01-12 0.0005472002
## 2015-01-13 0.0011406866
## 2015-01-14 0.0006401503
## 2015-01-15 0.0011433488
## 2015-01-16 0.0003871771
## 2015-01-20 0.0012330977
## 2015-01-21 0.0004176771
## 2015-01-22 0.0010273309
## 2015-01-23 0.0007396354
## 2015-01-26 0.0008973360
## 2015-01-27 0.0009627824
## 2015-01-28 0.0009043313
## 2015-01-29 0.0006561308
## 2015-01-30 0.0011782236
## 2015-02-02 0.0003612267
## 2015-02-03 0.0010896735
## 2015-02-04 0.0006650534
## 2015-02-05 0.0008652579
## 2015-02-06 0.0008606952
## 2015-02-09 0.0008757490
## 2015-02-10 0.0006675164
## 2015-02-11 0.0009972330
## 2015-02-12 0.0005697103
## 2015-02-13 0.0010331795
## 2015-02-17 0.0006414294
## 2015-02-18 0.0010248307
## 2015-02-19 0.0006838274
## 2015-02-20 0.0009020840
## 2015-02-23 0.0007862066
## 2015-02-24 0.0008522453
## 2015-02-25 0.0008378534
## 2015-02-26 0.0008603170
## 2015-02-27 0.0008591586
## 2015-03-02 0.0007416892
## 2015-03-03 0.0009884566
## 2015-03-04 0.0007607116
## 2015-03-05 0.0008959597
## 2015-03-06 0.0009744614
## 2015-03-09 0.0006645575
## 2015-03-10 0.0012231931
## 2015-03-11 0.0005135417
## 2015-03-12 0.0009740238
## 2015-03-13 0.0007960758
## 2015-03-16 0.0007034954
## 2015-03-17 0.0010074111
## 2015-03-18 0.0005278887
## 2015-03-19 0.0011884564
## 2015-03-20 0.0004029366
## 2015-03-23 0.0012617667
## 2015-03-24 0.0005336625
## 2015-03-25 0.0013110437
## 2015-03-26 0.0004391886
## 2015-03-27 0.0011748304
## 2015-03-30 0.0003737016
## 2015-03-31 0.0013811666
## 2015-04-01 0.0003958549
## 2015-04-02 0.0011993435
## 2015-04-06 0.0004241582
## 2015-04-07 0.0012464916
## 2015-04-08 0.0004321109
## 2015-04-09 0.0011540911
## 2015-04-10 0.0004838327
## 2015-04-13 0.0012249385
## 2015-04-14 0.0004655634
## 2015-04-15 0.0011144980
## 2015-04-16 0.0005980771
## 2015-04-17 0.0012090968
## 2015-04-20 0.0003811714
## 2015-04-21 0.0012782088
## 2015-04-22 0.0003717743
## 2015-04-23 0.0012366512
## 2015-04-24 0.0004467181
## 2015-04-27 0.0012531190
## 2015-04-28 0.0004249170
## 2015-04-29 0.0012678028
## 2015-04-30 0.0005807210
## 2015-05-01 0.0009342461
## 2015-05-04 0.0007144274
## 2015-05-05 0.0011096031
## 2015-05-06 0.0006507538
## 2015-05-07 0.0009629489
## 2015-05-08 0.0005517956
## 2015-05-11 0.0011694341
## 2015-05-12 0.0005762508
## 2015-05-13 0.0010843422
## 2015-05-14 0.0004753267
## 2015-05-15 0.0011626521
## 2015-05-18 0.0005040548
## 2015-05-19 0.0011548227
## 2015-05-20 0.0005631725
## 2015-05-21 0.0010617941
## 2015-05-22 0.0006653527
## 2015-05-26 0.0011339422
## 2015-05-27 0.0004508716
## 2015-05-28 0.0012116387
## 2015-05-29 0.0005818814
## 2015-06-01 0.0010483084
## 2015-06-02 0.0006616473
## 2015-06-03 0.0009745532
## 2015-06-04 0.0008291534
## 2015-06-05 0.0008677553
## 2015-06-08 0.0008985635
## 2015-06-09 0.0007799937
## 2015-06-10 0.0007374579
## 2015-06-11 0.0009101531
## 2015-06-12 0.0008666136
## 2015-06-15 0.0008752631
## 2015-06-16 0.0007326088
## 2015-06-17 0.0009114448
## 2015-06-18 0.0006448577
## 2015-06-19 0.0010871002
## 2015-06-22 0.0005335292
## 2015-06-23 0.0011111324
## 2015-06-24 0.0006874587
## 2015-06-25 0.0010175383
## 2015-06-26 0.0006817085
## 2015-06-29 0.0012595325
## 2015-06-30 0.0004204928
## 2015-07-01 0.0011325028
## 2015-07-02 0.0005754509
## 2015-07-06 0.0011316682
## 2015-07-07 0.0004929331
## 2015-07-08 0.0013761144
## 2015-07-09 0.0003189964
## 2015-07-10 0.0011554143
## 2015-07-13 0.0004065926
## 2015-07-14 0.0011774948
## 2015-07-15 0.0005398709
## 2015-07-16 0.0010092841
## 2015-07-17 0.0006697072
## 2015-07-20 0.0009847708
## 2015-07-21 0.0007624189
## 2015-07-22 0.0009412755
## 2015-07-23 0.0008207957
## 2015-07-24 0.0009972893
## 2015-07-27 0.0007708483
## 2015-07-28 0.0007413860
## 2015-07-29 0.0008339399
## 2015-07-30 0.0008442168
## 2015-07-31 0.0008649137
## 2015-08-03 0.0008523386
## 2015-08-04 0.0008571970
## 2015-08-05 0.0007826550
## 2015-08-06 0.0009932572
## 2015-08-07 0.0007364602
## 2015-08-10 0.0007674005
## 2015-08-11 0.0010310046
## 2015-08-12 0.0006518735
## 2015-08-13 0.0010278467
## 2015-08-14 0.0006161363
## 2015-08-17 0.0009759121
## 2015-08-18 0.0007490610
## 2015-08-19 0.0010306073
## 2015-08-20 0.0009434389
## 2015-08-21 0.0011675575
## 2015-08-24 0.0010650235
## 2015-08-25 0.0008111375
## 2015-08-26 0.0003648793
## 2015-08-27 0.0009599053
## 2015-08-28 0.0007213828
## 2015-08-31 0.0010577342
## 2015-09-01 0.0010323089
## 2015-09-02 0.0004261162
## 2015-09-03 0.0012024995
## 2015-09-04 0.0007093328
## 2015-09-08 0.0006347358
## 2015-09-09 0.0012097984
## 2015-09-10 0.0004318879
## 2015-09-11 0.0011539103
## 2015-09-14 0.0006054097
## 2015-09-15 0.0008870037
## 2015-09-16 0.0006827271
## 2015-09-17 0.0010164596
## 2015-09-18 0.0008906308
## 2015-09-21 0.0007331681
## 2015-09-22 0.0010988220
## 2015-09-23 0.0006290479
## 2015-09-24 0.0010760868
## 2015-09-25 0.0006291309
## 2015-09-28 0.0013718797
## 2015-09-29 0.0003363038
## 2015-09-30 0.0010528691
## 2015-10-01 0.0006185042
## 2015-10-02 0.0008558834
## 2015-10-05 0.0005875734
## 2015-10-06 0.0011169880
## 2015-10-07 0.0004809909
## 2015-10-08 0.0010527357
## 2015-10-09 0.0006349292
## 2015-10-12 0.0010100051
## 2015-10-13 0.0007730330
## 2015-10-14 0.0009621135
## 2015-10-15 0.0005345797
## 2015-10-16 0.0010588688
## 2015-10-19 0.0006352573
## 2015-10-20 0.0010449590
## 2015-10-21 0.0007278967
## 2015-10-22 0.0007260297
## 2015-10-23 0.0007999109
## 2015-10-26 0.0009007552
## 2015-10-27 0.0008168883
## 2015-10-28 0.0007063165
## 2015-10-29 0.0009672352
## 2015-10-30 0.0007856617
## 2015-11-02 0.0007344502
## 2015-11-03 0.0008999990
## 2015-11-04 0.0008305781
## 2015-11-05 0.0008624721
## 2015-11-06 0.0008230278
## 2015-11-09 0.0009836159
## 2015-11-10 0.0006879120
## 2015-11-11 0.0010204610
## 2015-11-12 0.0008581175
## 2015-11-13 0.0009698047
## 2015-11-16 0.0005268924
## 2015-11-17 0.0011430355
## 2015-11-18 0.0003521891
## 2015-11-19 0.0013000460
## 2015-11-20 0.0003684184
## 2015-11-23 0.0012866600
## 2015-11-24 0.0004144156
## 2015-11-25 0.0012301111
## 2015-11-27 0.0004743610
## 2015-11-30 0.0012343887
## 2015-12-01 0.0003393216
## 2015-12-02 0.0014416682
## 2015-12-03 0.0004778234
## 2015-12-04 0.0009047998
## 2015-12-07 0.0008714484
## 2015-12-08 0.0008953871
## 2015-12-09 0.0008899308
## 2015-12-10 0.0007639613
## 2015-12-11 0.0011650195
## 2015-12-14 0.0004796524
## 2015-12-15 0.0010306542
## 2015-12-16 0.0004762218
## 2015-12-17 0.0013699885
## 2015-12-18 0.0005888940
## 2015-12-21 0.0009674960
## 2015-12-22 0.0006076397
## 2015-12-23 0.0008902983
## 2015-12-24 0.0008139389
## 2015-12-28 0.0008913979
## 2015-12-29 0.0006537912
# Forecast for volatility(sigma)
StaticForecast_Volatility <- data.frame(t(sigma(StaticForecast)))
names(StaticForecast_Volatility) <- rownames(sigma(StaticForecast))
StaticForecast_Volatility # Note that the date shown is when the forecast was made
##                    T+1
## 2014-12-31 0.007773264
## 2015-01-02 0.007147567
## 2015-01-05 0.009992923
## 2015-01-06 0.009752694
## 2015-01-07 0.009711921
## 2015-01-08 0.010872737
## 2015-01-09 0.010215442
## 2015-01-12 0.009787212
## 2015-01-13 0.008807111
## 2015-01-14 0.008396470
## 2015-01-15 0.008552667
## 2015-01-16 0.009096061
## 2015-01-20 0.008181096
## 2015-01-21 0.007568807
## 2015-01-22 0.009052101
## 2015-01-23 0.008527761
## 2015-01-26 0.007750568
## 2015-01-27 0.009072748
## 2015-01-28 0.009950038
## 2015-01-29 0.009471051
## 2015-01-30 0.010036429
## 2015-02-02 0.010033134
## 2015-02-03 0.010469139
## 2015-02-04 0.009496747
## 2015-02-05 0.009282142
## 2015-02-06 0.008485308
## 2015-02-09 0.007941757
## 2015-02-10 0.008201537
## 2015-02-11 0.007465205
## 2015-02-12 0.007667371
## 2015-02-13 0.007174473
## 2015-02-17 0.006663871
## 2015-02-18 0.006280500
## 2015-02-19 0.006035313
## 2015-02-20 0.006174324
## 2015-02-23 0.005918792
## 2015-02-24 0.005761472
## 2015-02-25 0.005629090
## 2015-02-26 0.005570439
## 2015-02-27 0.005655278
## 2015-03-02 0.005886738
## 2015-03-03 0.006052700
## 2015-03-04 0.006181455
## 2015-03-05 0.005907669
## 2015-03-06 0.008241862
## 2015-03-09 0.007581516
## 2015-03-10 0.009862132
## 2015-03-11 0.008867434
## 2015-03-12 0.009272607
## 2015-03-13 0.008759530
## 2015-03-16 0.009329550
## 2015-03-17 0.008502859
## 2015-03-18 0.008839777
## 2015-03-19 0.008242762
## 2015-03-20 0.008091021
## 2015-03-23 0.007421469
## 2015-03-24 0.007443851
## 2015-03-25 0.009095696
## 2015-03-26 0.008295485
## 2015-03-27 0.007571954
## 2015-03-30 0.008193388
## 2015-03-31 0.008283080
## 2015-04-01 0.007812080
## 2015-04-02 0.007259408
## 2015-04-06 0.007050023
## 2015-04-07 0.006636637
## 2015-04-08 0.006273817
## 2015-04-09 0.006178667
## 2015-04-10 0.006111707
## 2015-04-13 0.006182482
## 2015-04-14 0.005908097
## 2015-04-15 0.005988592
## 2015-04-16 0.005808975
## 2015-04-17 0.007330927
## 2015-04-20 0.007469369
## 2015-04-21 0.006926865
## 2015-04-22 0.006639199
## 2015-04-23 0.006298779
## 2015-04-24 0.006006411
## 2015-04-27 0.006049570
## 2015-04-28 0.005836917
## 2015-04-29 0.005881216
## 2015-04-30 0.007242411
## 2015-05-01 0.007831551
## 2015-05-04 0.007213069
## 2015-05-05 0.008323267
## 2015-05-06 0.007866225
## 2015-05-07 0.007300345
## 2015-05-08 0.008327430
## 2015-05-11 0.007878053
## 2015-05-12 0.007385849
## 2015-05-13 0.006832232
## 2015-05-14 0.007431267
## 2015-05-15 0.006859624
## 2015-05-18 0.006460808
## 2015-05-19 0.006131916
## 2015-05-20 0.005924922
## 2015-05-21 0.005756144
## 2015-05-22 0.005737064
## 2015-05-26 0.007050345
## 2015-05-27 0.007274153
## 2015-05-28 0.006770751
## 2015-05-29 0.007007465
## 2015-06-01 0.006557379
## 2015-06-02 0.006240868
## 2015-06-03 0.005977058
## 2015-06-04 0.006883440
## 2015-06-05 0.006497872
## 2015-06-08 0.006788652
## 2015-06-09 0.006367128
## 2015-06-10 0.007470697
## 2015-06-11 0.006900388
## 2015-06-12 0.007160650
## 2015-06-15 0.006990860
## 2015-06-16 0.006785398
## 2015-06-17 0.006380580
## 2015-06-18 0.006997750
## 2015-06-19 0.006930972
## 2015-06-22 0.006762592
## 2015-06-23 0.006344589
## 2015-06-24 0.006887009
## 2015-06-25 0.006598090
## 2015-06-26 0.006243800
## 2015-06-29 0.010409508
## 2015-06-30 0.009238409
## 2015-07-01 0.008664599
## 2015-07-02 0.007846057
## 2015-07-06 0.007390164
## 2015-07-07 0.007095597
## 2015-07-08 0.009462596
## 2015-07-09 0.008468094
## 2015-07-10 0.008988275
## 2015-07-13 0.008958809
## 2015-07-14 0.008213279
## 2015-07-15 0.007507034
## 2015-07-16 0.007508440
## 2015-07-17 0.006918626
## 2015-07-20 0.006463805
## 2015-07-21 0.006455729
## 2015-07-22 0.006235446
## 2015-07-23 0.006490251
## 2015-07-24 0.007636437
## 2015-07-27 0.007506416
## 2015-07-28 0.008266643
## 2015-07-29 0.007938894
## 2015-07-30 0.007261004
## 2015-07-31 0.006837283
## 2015-08-03 0.006557584
## 2015-08-04 0.006307320
## 2015-08-05 0.006064392
## 2015-08-06 0.006718535
## 2015-08-07 0.006491156
## 2015-08-10 0.007733420
## 2015-08-11 0.008177463
## 2015-08-12 0.007441343
## 2015-08-13 0.006908001
## 2015-08-14 0.006553439
## 2015-08-17 0.006441341
## 2015-08-18 0.006262434
## 2015-08-19 0.006940080
## 2015-08-20 0.010904889
## 2015-08-21 0.016229483
## 2015-08-24 0.021435249
## 2015-08-25 0.019218887
## 2015-08-26 0.022089610
## 2015-08-27 0.021038110
## 2015-08-28 0.018004307
## 2015-08-31 0.015883672
## 2015-09-01 0.018348755
## 2015-09-02 0.017127351
## 2015-09-03 0.014746387
## 2015-09-04 0.014344253
## 2015-09-08 0.015615534
## 2015-09-09 0.014659605
## 2015-09-10 0.012798110
## 2015-09-11 0.011279144
## 2015-09-14 0.010138308
## 2015-09-15 0.010186315
## 2015-09-16 0.009543470
## 2015-09-17 0.008620364
## 2015-09-18 0.010335590
## 2015-09-21 0.009274625
## 2015-09-22 0.009777237
## 2015-09-23 0.008800307
## 2015-09-24 0.008086795
## 2015-09-25 0.007394842
## 2015-09-28 0.012482038
## 2015-09-29 0.010908170
## 2015-09-30 0.012069247
## 2015-10-01 0.010576665
## 2015-10-02 0.010770541
## 2015-10-05 0.011679654
## 2015-10-06 0.010383138
## 2015-10-07 0.009589327
## 2015-10-08 0.009160516
## 2015-10-09 0.008220762
## 2015-10-12 0.007479535
## 2015-10-13 0.007553905
## 2015-10-14 0.007280952
## 2015-10-15 0.008642363
## 2015-10-16 0.007966173
## 2015-10-19 0.007282054
## 2015-10-20 0.006791339
## 2015-10-21 0.006916044
## 2015-10-22 0.008943980
## 2015-10-23 0.008995041
## 2015-10-26 0.008157802
## 2015-10-27 0.007548732
## 2015-10-28 0.008171033
## 2015-10-29 0.007450041
## 2015-10-30 0.007238672
## 2015-11-02 0.007981516
## 2015-11-03 0.007329196
## 2015-11-04 0.007000968
## 2015-11-05 0.006572088
## 2015-11-06 0.006217756
## 2015-11-09 0.007266046
## 2015-11-10 0.006734005
## 2015-11-11 0.006507125
## 2015-11-12 0.008541908
## 2015-11-13 0.009075889
## 2015-11-16 0.009791334
## 2015-11-17 0.008755146
## 2015-11-18 0.009823095
## 2015-11-19 0.008769087
## 2015-11-20 0.007969215
## 2015-11-23 0.007305021
## 2015-11-24 0.006761002
## 2015-11-25 0.006346834
## 2015-11-27 0.006034560
## 2015-11-30 0.006134504
## 2015-12-01 0.006930134
## 2015-12-02 0.007866416
## 2015-12-03 0.009529289
## 2015-12-04 0.011535859
## 2015-12-07 0.010600981
## 2015-12-08 0.009813982
## 2015-12-09 0.009380182
## 2015-12-10 0.008412008
## 2015-12-11 0.011048358
## 2015-12-14 0.009840016
## 2015-12-15 0.009614913
## 2015-12-16 0.010060810
## 2015-12-17 0.010841874
## 2015-12-18 0.012211179
## 2015-12-21 0.011049421
## 2015-12-22 0.010210785
## 2015-12-23 0.010164057
## 2015-12-24 0.009076935
## 2015-12-28 0.008237382
## 2015-12-29 0.008396680
### Plot Forecasts
# Returns
PlotReturns = cbind(xts(StaticForecast_Returns, order.by = index(Test)), Returns)
colnames(PlotReturns) <- c("Forecast", "Actual")

{plot(PlotReturns, col = c("red","black"), lwd = 2, ylab = "Returns",
     main = "Static One-Step Ahead Forecast with ARMA(1,1)-GARCH(1,1) Model", legend.loc = "topleft")
addEventLines(xts("training", index(Test)[1]), srt=90, pos=2, lwd = 2, col = "blue") }

# Volatility
PlotVolatility = cbind(xts(StaticForecast_Volatility, order.by = index(Test)), sigma(fit))
PlotVolatility = sqrt(252)*PlotVolatility
colnames(PlotVolatility) <- c("Forecast", "ModelFitted")
{plot(PlotVolatility*100, col = c("red","black"), lwd = 2, ylab = "Annual Volatility(%)",
     main = "Static One-Step Ahead Forecast with ARMA(1,1)-GARCH(1,1) Model", legend.loc = "topleft")
addEventLines(xts("training", index(Test)[1]), srt=90, pos=2, lwd = 2, col = "blue") }

### MSE
MSE <- rbind("ARMA(1,1)-GARCH(1,1)" = c(mean((Returns - fitted(fit))**2,na.rm = T),
                              mean((Returns - PlotReturns$Forecast)**2, na.rm = T)))
colnames(MSE) <- c("In-sample MSE", "Out-of-sample MSE")
print(MSE)
##                      In-sample MSE Out-of-sample MSE
## ARMA(1,1)-GARCH(1,1)  5.445976e-05      9.623136e-05

3.3.6 Rolling ARMA-GARCH

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)

startd <-"2012-01-01"  
endd <-"2015-12-31"
ticker <- "^GSPC"
Prices <- Ad(getSymbols(ticker, from = startd, to = endd, auto.assign = FALSE)) # get the adjusted closed price
Returns <- diff(log(Prices)) # get daily log-return
Returns <- na.omit(Returns)
names(Returns)<-"Actual Return"

# Split data
Train <- Returns["/2014"] 
Test <- Returns["2015/"]
### Specify Model: ARMA(1,1)-GARCH(1,1)
library(rugarch)
spec <- ugarchspec(variance.model=list(model="sGARCH", garchOrder=c(1,1)),
                   mean.model=list(armaOrder=c(1,1)), 
                   distribution.model="norm")

# Estimate coefficients
fit <- ugarchfit(spec = spec, data = Returns, out.sample = length(Test)) # note that out-of-sample data is excluded by indicating out.sample = length(Test)
coef(fit)
##            mu           ar1           ma1         omega        alpha1 
##  8.338081e-04 -9.280023e-01  9.149296e-01  7.539274e-06  1.539060e-01 
##         beta1 
##  7.153080e-01
### Make Rolling 1-day ahead Forecast with ARMA(1,1)-GARCH(1,1) (Refitted the Model for Every 22 Steps). Continue for length(Test)=251 periods
RollForecast <- ugarchroll(spec, data = Returns, n.ahead = 1, forecast.length = length(Test), refit.every = 22, refit.window = "moving") 

# Forecast for returns
RollForecast_Returns <- xts(RollForecast@forecast$density$Mu, index(Test))
data.frame(RollForecast_Returns)
##            RollForecast_Returns
## 2015-01-02         1.117457e-03
## 2015-01-05         5.896342e-04
## 2015-01-06         1.309265e-03
## 2015-01-07         5.264803e-04
## 2015-01-08         9.747353e-04
## 2015-01-09         4.839885e-04
## 2015-01-12         1.275093e-03
## 2015-01-13         5.472002e-04
## 2015-01-14         1.140687e-03
## 2015-01-15         6.401503e-04
## 2015-01-16         1.143349e-03
## 2015-01-20         3.871771e-04
## 2015-01-21         1.233098e-03
## 2015-01-22         4.176771e-04
## 2015-01-23         1.027331e-03
## 2015-01-26         7.396354e-04
## 2015-01-27         8.973360e-04
## 2015-01-28         9.627824e-04
## 2015-01-29         9.043313e-04
## 2015-01-30         6.561308e-04
## 2015-02-02         1.178224e-03
## 2015-02-03         3.612267e-04
## 2015-02-04         1.625153e-03
## 2015-02-05         1.809785e-03
## 2015-02-06         1.409925e-03
## 2015-02-09         1.567154e-03
## 2015-02-10         1.756172e-03
## 2015-02-11         1.341576e-03
## 2015-02-12         1.362358e-03
## 2015-02-13         9.936762e-04
## 2015-02-17         8.529920e-04
## 2015-02-18         8.138339e-04
## 2015-02-19         8.523832e-04
## 2015-02-20         9.207284e-04
## 2015-02-23         6.983757e-04
## 2015-02-24         7.378448e-04
## 2015-02-25         6.532031e-04
## 2015-02-26         7.119091e-04
## 2015-02-27         7.986706e-04
## 2015-03-02         9.443858e-04
## 2015-03-03         7.218186e-04
## 2015-03-04         9.326493e-04
## 2015-03-05         1.134897e-03
## 2015-03-06         1.108620e-03
## 2015-03-09         1.794990e-03
## 2015-03-10         1.654072e-03
## 2015-03-11         2.354461e-03
## 2015-03-12         2.441915e-03
## 2015-03-13         1.952065e-03
## 2015-03-16         2.209981e-03
## 2015-03-17         1.685631e-03
## 2015-03-18         1.835642e-03
## 2015-03-19         1.369087e-03
## 2015-03-20         1.584360e-03
## 2015-03-23         1.244465e-03
## 2015-03-24         1.335713e-03
## 2015-03-25         1.602153e-03
## 2015-03-26         2.205613e-03
## 2015-03-27         2.312891e-03
## 2015-03-30         2.229697e-03
## 2015-03-31         1.756269e-03
## 2015-04-01         2.125493e-03
## 2015-04-02         2.297123e-03
## 2015-04-06         2.167871e-03
## 2015-04-07         1.917577e-03
## 2015-04-08         2.014999e-03
## 2015-04-09         6.315623e-04
## 2015-04-10         6.609238e-04
## 2015-04-13         6.295868e-04
## 2015-04-14         9.184734e-04
## 2015-04-15         6.407908e-04
## 2015-04-16         6.381663e-04
## 2015-04-17         8.071255e-04
## 2015-04-20         1.049214e-03
## 2015-04-21         3.801685e-04
## 2015-04-22         9.172394e-04
## 2015-04-23         5.432975e-04
## 2015-04-24         7.512251e-04
## 2015-04-27         6.815540e-04
## 2015-04-28         8.877605e-04
## 2015-04-29         6.192135e-04
## 2015-04-30         8.981018e-04
## 2015-05-01         9.834675e-04
## 2015-05-04         3.556555e-04
## 2015-05-05         8.002598e-04
## 2015-05-06         1.066738e-03
## 2015-05-07         7.621582e-04
## 2015-05-08         6.346436e-04
## 2015-05-11         1.619719e-03
## 2015-05-12         1.907331e-03
## 2015-05-13         2.079090e-03
## 2015-05-14         2.110742e-03
## 2015-05-15         1.569392e-03
## 2015-05-18         1.552936e-03
## 2015-05-19         1.418591e-03
## 2015-05-20         1.477686e-03
## 2015-05-21         1.550819e-03
## 2015-05-22         1.453276e-03
## 2015-05-26         1.594555e-03
## 2015-05-27         2.154235e-03
## 2015-05-28         1.695347e-03
## 2015-05-29         1.782708e-03
## 2015-06-01         2.132061e-03
## 2015-06-02         2.040263e-03
## 2015-06-03         2.109053e-03
## 2015-06-04         2.014520e-03
## 2015-06-05         2.480978e-03
## 2015-06-08         2.565416e-03
## 2015-06-09         2.911236e-03
## 2015-06-10         2.892908e-03
## 2015-06-11         6.118868e-04
## 2015-06-12         9.150872e-04
## 2015-06-15         7.597495e-04
## 2015-06-16         8.698779e-04
## 2015-06-17         6.173132e-04
## 2015-06-18         9.065297e-04
## 2015-06-19         5.223503e-04
## 2015-06-22         1.101157e-03
## 2015-06-23         3.958364e-04
## 2015-06-24         1.132529e-03
## 2015-06-25         5.622871e-04
## 2015-06-26         1.029900e-03
## 2015-06-29         5.563103e-04
## 2015-06-30         1.298825e-03
## 2015-07-01         2.613069e-04
## 2015-07-02         1.166675e-03
## 2015-07-06         4.276015e-04
## 2015-07-07         1.168428e-03
## 2015-07-08         3.333206e-04
## 2015-07-09         1.444272e-03
## 2015-07-10         1.314559e-04
## 2015-07-13         1.209918e-03
## 2015-07-14         1.802805e-03
## 2015-07-15         1.614033e-03
## 2015-07-16         1.656002e-03
## 2015-07-17         1.315869e-03
## 2015-07-20         1.284222e-03
## 2015-07-21         1.267934e-03
## 2015-07-22         1.472436e-03
## 2015-07-23         1.590032e-03
## 2015-07-24         1.849263e-03
## 2015-07-27         2.324129e-03
## 2015-07-28         2.570818e-03
## 2015-07-29         2.020446e-03
## 2015-07-30         1.702117e-03
## 2015-07-31         1.708674e-03
## 2015-08-03         1.815741e-03
## 2015-08-04         1.941617e-03
## 2015-08-05         2.042367e-03
## 2015-08-06         1.906377e-03
## 2015-08-07         2.249743e-03
## 2015-08-10         2.370803e-03
## 2015-08-11         1.806815e-03
## 2015-08-12         2.232033e-03
## 2015-08-13         2.049614e-03
## 2015-08-14         2.104421e-03
## 2015-08-17         1.918266e-03
## 2015-08-18         1.676855e-03
## 2015-08-19         1.803403e-03
## 2015-08-20         2.188647e-03
## 2015-08-21         3.167432e-03
## 2015-08-24         4.632959e-03
## 2015-08-25         6.425201e-03
## 2015-08-26         6.943160e-03
## 2015-08-27         5.047535e-03
## 2015-08-28         3.859198e-03
## 2015-08-31         3.782359e-03
## 2015-09-01         4.125345e-03
## 2015-09-02         5.458788e-03
## 2015-09-03         4.532300e-03
## 2015-09-04         4.413220e-03
## 2015-09-08         5.065462e-03
## 2015-09-09         3.841237e-03
## 2015-09-10         4.440317e-03
## 2015-09-11         4.133857e-03
## 2015-09-14         3.871335e-03
## 2015-09-15         8.151381e-04
## 2015-09-16         2.035760e-04
## 2015-09-17         4.279584e-04
## 2015-09-18         7.790908e-04
## 2015-09-21         1.197646e-03
## 2015-09-22         4.291911e-04
## 2015-09-23         1.113658e-03
## 2015-09-24         6.658621e-04
## 2015-09-25         7.732611e-04
## 2015-09-28         6.594246e-04
## 2015-09-29         1.544817e-03
## 2015-09-30         4.938787e-04
## 2015-10-01         3.914811e-05
## 2015-10-02         6.789140e-04
## 2015-10-05         1.727422e-04
## 2015-10-06         1.102500e-04
## 2015-10-07         8.585346e-04
## 2015-10-08         3.591277e-04
## 2015-10-09         4.024246e-04
## 2015-10-12         6.706563e-04
## 2015-10-13         6.144830e-04
## 2015-10-14         8.989375e-04
## 2015-10-15         8.457181e-04
## 2015-10-16         2.097895e-04
## 2015-10-19         5.596828e-04
## 2015-10-20         6.912564e-04
## 2015-10-21         7.433400e-04
## 2015-10-22         8.868205e-04
## 2015-10-23         1.514950e-04
## 2015-10-26         3.511825e-04
## 2015-10-27         7.681891e-04
## 2015-10-28         7.785637e-04
## 2015-10-29         3.089077e-04
## 2015-10-30         7.212748e-04
## 2015-11-02         8.539376e-04
## 2015-11-03         3.058848e-04
## 2015-11-04         6.173624e-04
## 2015-11-05         8.149971e-04
## 2015-11-06         7.306950e-04
## 2015-11-09         7.071319e-04
## 2015-11-10         1.019735e-03
## 2015-11-11         6.388934e-04
## 2015-11-12         8.040164e-04
## 2015-11-13         1.155435e-03
## 2015-11-16         1.324891e-03
## 2015-11-17         3.254770e-04
## 2015-11-18         7.889027e-04
## 2015-11-19         1.971026e-04
## 2015-11-20         7.612205e-04
## 2015-11-23         6.588983e-04
## 2015-11-24         8.353850e-04
## 2015-11-25         7.684901e-04
## 2015-11-27         8.098156e-04
## 2015-11-30         7.885399e-04
## 2015-12-01         9.851685e-04
## 2015-12-02         4.329106e-04
## 2015-12-03         1.175366e-03
## 2015-12-04         1.418104e-03
## 2015-12-07         1.290954e-04
## 2015-12-08         9.753530e-04
## 2015-12-09         1.084286e-03
## 2015-12-10         1.148707e-03
## 2015-12-11         7.767645e-04
## 2015-12-14         1.553456e-03
## 2015-12-15         7.429525e-04
## 2015-12-16         3.985845e-04
## 2015-12-17         9.108451e-05
## 2015-12-18         1.483311e-03
## 2015-12-21         1.634127e-03
## 2015-12-22         4.235103e-04
## 2015-12-23         3.587556e-04
## 2015-12-24         1.897876e-04
## 2015-12-28         8.451995e-04
## 2015-12-29         8.813657e-04
## 2015-12-30         2.801701e-04
# Forecast for volatility(sigma)
RollForecast_Volatility <- xts(RollForecast@forecast$density$Sigma, index(Test))
data.frame(RollForecast_Volatility)
##            RollForecast_Volatility
## 2015-01-02             0.007773264
## 2015-01-05             0.007147567
## 2015-01-06             0.009992923
## 2015-01-07             0.009752694
## 2015-01-08             0.009711921
## 2015-01-09             0.010872737
## 2015-01-12             0.010215442
## 2015-01-13             0.009787212
## 2015-01-14             0.008807111
## 2015-01-15             0.008396470
## 2015-01-16             0.008552667
## 2015-01-20             0.009096061
## 2015-01-21             0.008181096
## 2015-01-22             0.007568807
## 2015-01-23             0.009052101
## 2015-01-26             0.008527761
## 2015-01-27             0.007750568
## 2015-01-28             0.009072748
## 2015-01-29             0.009950038
## 2015-01-30             0.009471051
## 2015-02-02             0.010036429
## 2015-02-03             0.010033134
## 2015-02-04             0.010316162
## 2015-02-05             0.009467731
## 2015-02-06             0.009134067
## 2015-02-09             0.008443926
## 2015-02-10             0.008010325
## 2015-02-11             0.008130868
## 2015-02-12             0.007434674
## 2015-02-13             0.007611473
## 2015-02-17             0.007109049
## 2015-02-18             0.006613717
## 2015-02-19             0.006238160
## 2015-02-20             0.005982331
## 2015-02-23             0.006101534
## 2015-02-24             0.005842685
## 2015-02-25             0.005690259
## 2015-02-26             0.005547317
## 2015-02-27             0.005481674
## 2015-03-02             0.005569111
## 2015-03-03             0.005806612
## 2015-03-04             0.005988519
## 2015-03-05             0.006125690
## 2015-03-06             0.005847387
## 2015-03-09             0.008386938
## 2015-03-10             0.007649486
## 2015-03-11             0.010277060
## 2015-03-12             0.009276968
## 2015-03-13             0.009238366
## 2015-03-16             0.008884142
## 2015-03-17             0.009173325
## 2015-03-18             0.008469148
## 2015-03-19             0.008694343
## 2015-03-20             0.008234960
## 2015-03-23             0.008043719
## 2015-03-24             0.007426943
## 2015-03-25             0.007473778
## 2015-03-26             0.009472639
## 2015-03-27             0.008665142
## 2015-03-30             0.007822463
## 2015-03-31             0.008185059
## 2015-04-01             0.008563286
## 2015-04-02             0.008116814
## 2015-04-06             0.007403827
## 2015-04-07             0.007053191
## 2015-04-08             0.006746502
## 2015-04-09             0.006363594
## 2015-04-10             0.006222219
## 2015-04-13             0.006196160
## 2015-04-14             0.006264609
## 2015-04-15             0.005963131
## 2015-04-16             0.006006143
## 2015-04-17             0.005789743
## 2015-04-20             0.007430409
## 2015-04-21             0.007589972
## 2015-04-22             0.007016545
## 2015-04-23             0.006740344
## 2015-04-24             0.006360173
## 2015-04-27             0.006058555
## 2015-04-28             0.006115845
## 2015-04-29             0.005892430
## 2015-04-30             0.005940508
## 2015-05-01             0.007235690
## 2015-05-04             0.007783622
## 2015-05-05             0.007203103
## 2015-05-06             0.008397035
## 2015-05-07             0.007928789
## 2015-05-08             0.007341954
## 2015-05-11             0.008166520
## 2015-05-12             0.007879482
## 2015-05-13             0.007431764
## 2015-05-14             0.006884647
## 2015-05-15             0.007305280
## 2015-05-18             0.006726178
## 2015-05-19             0.006307867
## 2015-05-20             0.006024421
## 2015-05-21             0.005839005
## 2015-05-22             0.005628204
## 2015-05-26             0.005670578
## 2015-05-27             0.007353465
## 2015-05-28             0.007332734
## 2015-05-29             0.006847524
## 2015-06-01             0.007185730
## 2015-06-02             0.006626904
## 2015-06-03             0.006327972
## 2015-06-04             0.005979912
## 2015-06-05             0.007200933
## 2015-06-08             0.006829571
## 2015-06-09             0.007359991
## 2015-06-10             0.006836698
## 2015-06-11             0.007511848
## 2015-06-12             0.006879633
## 2015-06-15             0.007156604
## 2015-06-16             0.006951210
## 2015-06-17             0.006732927
## 2015-06-18             0.006306907
## 2015-06-19             0.006987776
## 2015-06-22             0.006892032
## 2015-06-23             0.006711454
## 2015-06-24             0.006267593
## 2015-06-25             0.006873044
## 2015-06-26             0.006546056
## 2015-06-29             0.006172022
## 2015-06-30             0.010565868
## 2015-07-01             0.009245630
## 2015-07-02             0.008627525
## 2015-07-06             0.007738954
## 2015-07-07             0.007249093
## 2015-07-08             0.006956466
## 2015-07-09             0.009481167
## 2015-07-10             0.008384430
## 2015-07-13             0.008995741
## 2015-07-14             0.008395694
## 2015-07-15             0.007624828
## 2015-07-16             0.007033492
## 2015-07-17             0.006980060
## 2015-07-20             0.006496522
## 2015-07-21             0.006146469
## 2015-07-22             0.006270416
## 2015-07-23             0.006164845
## 2015-07-24             0.006544436
## 2015-07-27             0.007876949
## 2015-07-28             0.007823453
## 2015-07-29             0.008061289
## 2015-07-30             0.007582798
## 2015-07-31             0.006973132
## 2015-08-03             0.006671883
## 2015-08-04             0.006516484
## 2015-08-05             0.006368551
## 2015-08-06             0.006064996
## 2015-08-07             0.006937537
## 2015-08-10             0.006763374
## 2015-08-11             0.007501031
## 2015-08-12             0.008179818
## 2015-08-13             0.007344028
## 2015-08-14             0.006880617
## 2015-08-17             0.006460188
## 2015-08-18             0.006249982
## 2015-08-19             0.006197808
## 2015-08-20             0.007094104
## 2015-08-21             0.011191035
## 2015-08-24             0.016764543
## 2015-08-25             0.022296570
## 2015-08-26             0.020109729
## 2015-08-27             0.020687063
## 2015-08-28             0.018747268
## 2015-08-31             0.015751235
## 2015-09-01             0.014102003
## 2015-09-02             0.017787717
## 2015-09-03             0.015705571
## 2015-09-04             0.013322137
## 2015-09-08             0.013695897
## 2015-09-09             0.013910227
## 2015-09-10             0.013677335
## 2015-09-11             0.011647779
## 2015-09-14             0.010044557
## 2015-09-15             0.008682117
## 2015-09-16             0.009365087
## 2015-09-17             0.008930282
## 2015-09-18             0.007817437
## 2015-09-21             0.010566498
## 2015-09-22             0.009012256
## 2015-09-23             0.009804325
## 2015-09-24             0.008450158
## 2015-09-25             0.007588925
## 2015-09-28             0.006772854
## 2015-09-29             0.013937035
## 2015-09-30             0.011380299
## 2015-10-01             0.012806172
## 2015-10-02             0.010570166
## 2015-10-05             0.010907683
## 2015-10-06             0.012407123
## 2015-10-07             0.010379707
## 2015-10-08             0.009356367
## 2015-10-09             0.008915632
## 2015-10-12             0.007681831
## 2015-10-13             0.006818095
## 2015-10-14             0.007136485
## 2015-10-15             0.007201531
## 2015-10-16             0.008969086
## 2015-10-19             0.008146448
## 2015-10-20             0.007294740
## 2015-10-21             0.006719208
## 2015-10-22             0.006866759
## 2015-10-23             0.009322839
## 2015-10-26             0.009464502
## 2015-10-27             0.008360792
## 2015-10-28             0.007597098
## 2015-10-29             0.008398095
## 2015-10-30             0.007491814
## 2015-11-02             0.007222204
## 2015-11-03             0.008160190
## 2015-11-04             0.007380479
## 2015-11-05             0.006962644
## 2015-11-06             0.006466230
## 2015-11-09             0.006065687
## 2015-11-10             0.007392305
## 2015-11-11             0.006731052
## 2015-11-12             0.006466365
## 2015-11-13             0.008896030
## 2015-11-16             0.009680060
## 2015-11-17             0.010317758
## 2015-11-18             0.008928765
## 2015-11-19             0.010355868
## 2015-11-20             0.008946589
## 2015-11-23             0.007969692
## 2015-11-24             0.007173815
## 2015-11-25             0.006542889
## 2015-11-27             0.006100082
## 2015-11-30             0.005775492
## 2015-12-01             0.006051778
## 2015-12-02             0.007159125
## 2015-12-03             0.008284630
## 2015-12-04             0.010118956
## 2015-12-07             0.012117150
## 2015-12-08             0.010767635
## 2015-12-09             0.009822978
## 2015-12-10             0.009381663
## 2015-12-11             0.008198175
## 2015-12-14             0.011622146
## 2015-12-15             0.010006100
## 2015-12-16             0.009693903
## 2015-12-17             0.010631298
## 2015-12-18             0.011433183
## 2015-12-21             0.013113462
## 2015-12-22             0.011404748
## 2015-12-23             0.010438239
## 2015-12-24             0.010487419
## 2015-12-28             0.009064076
## 2015-12-29             0.008060442
## 2015-12-30             0.008415867
### Plot Forecasts
# Returns
PlotReturns = cbind(RollForecast_Returns, Returns)
colnames(PlotReturns) <- c("Forecast", "Actual")

{plot(PlotReturns, col = c("red","black"), lwd = 2, ylab = "Returns",
     main = "Rolling One-Step Ahead Forecast with ARMA(1,1)-GARCH(1,1) Model", legend.loc = "topleft")
addEventLines(xts("training", index(Test)[1]), srt=90, pos=2, lwd = 2, col = "blue") }

# Volatility
PlotVolatility = cbind(RollForecast_Volatility, sigma(fit))
PlotVolatility = sqrt(252)*PlotVolatility
colnames(PlotVolatility) <- c("Forecast", "ModelFitted")

{plot(PlotVolatility*100, col = c("red","black"), lwd = 2, ylab = "Annual Volatility(%)",
     main = "Rolling One-Step Ahead Forecast with ARMA(1,1)-GARCH(1,1) Model", legend.loc = "topleft")
addEventLines(xts("training", index(Test)[1]), srt=90, pos=2, lwd = 2, col = "blue") }

### MSE
MSE <- rbind("ARMA(1,1)-GARCH(1,1) Rolling" = c(mean((Returns - fitted(fit))**2,na.rm = T),
                              mean((Returns - RollForecast_Returns)**2, na.rm = T)))
colnames(MSE) <- c("In-sample MSE", "Out-of-sample MSE")
print(MSE)
##                              In-sample MSE Out-of-sample MSE
## ARMA(1,1)-GARCH(1,1) Rolling  5.445976e-05        9.7482e-05

3.3.7 OOS Forecast Accuracy

### Housekeeping 
rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
startd<-"1990-01-01"
endd<-"2020-12-31"
Ticker = "GDPC1"
getSymbols(Ticker,from=startd,to=endd,src='FRED')
## [1] "GDPC1"
Growth<-diff(log(GDPC1))
Growth<-na.omit(Growth)
colnames(Growth)=c("Growth")
# Split Data
Train <-Growth["/2014"]
Test<-Growth["2015/"]
### Build Models
library(forecast)
fit <- auto.arima(Train)  


### Make One-step Ahead Forecast by Feeding Test Data to Model
# Note that we included training data because the model needs historical data (Train) to make the first few predictions
fit.out <- Arima(Growth, model = fit) 
# Extra one-step ahead forecasts over the test set
forecast.out <- subset(fitted(fit.out), start=length(Train)+1) 
data.frame(OneStepForecast=forecast.out, row.names=as.yearqtr(index(Test)))
##         OneStepForecast
## 2015 Q1    0.0073023116
## 2015 Q2    0.0067954395
## 2015 Q3    0.0073812380
## 2015 Q4    0.0061770504
## 2016 Q1    0.0051017306
## 2016 Q2    0.0064978340
## 2016 Q3    0.0065027623
## 2016 Q4    0.0066713043
## 2017 Q1    0.0068783544
## 2017 Q2    0.0065590235
## 2017 Q3    0.0067785598
## 2017 Q4    0.0074984886
## 2018 Q1    0.0084136453
## 2018 Q2    0.0080566674
## 2018 Q3    0.0078099286
## 2018 Q4    0.0068937777
## 2019 Q1    0.0055157751
## 2019 Q2    0.0065067308
## 2019 Q3    0.0080190998
## 2019 Q4    0.0077474413
## 2020 Q1    0.0065888700
## 2020 Q2    0.0005585852
## 2020 Q3   -0.0281756907
## 2020 Q4    0.0137190738
## 2021 Q1    0.0273079139
## 2021 Q2    0.0083938289
## 2021 Q3    0.0082622226
## 2021 Q4    0.0085732439
## 2022 Q1    0.0101527974
### Calculate Forecast Accuracy
# Train Sample
accuracy(fit)
##                        ME        RMSE         MAE       MPE     MAPE      MASE
## Training set 2.383421e-05 0.008819701 0.006511951 -215.3185 331.5274 0.7920155
##                    ACF1
## Training set 0.00248218
# Test Sample
accuracy(forecast.out, Test) # the first input is the fitted/forecast value
##                    ME       RMSE        MAE       MPE     MAPE
## Test set -0.001846066 0.02628291 0.01031618 -8.837743 74.21872

3.3.8 Diebold Mariano Test

The Diebold-Mariano test compares the forecast accuracy of two forecast methods.

3.3.8.1 In-Sample One-Step Forecast

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01"
endd<-"2018-01-01"
freq<-"quarterly"
GDP <- Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)
GDPGrowth <- diff(log(GDP)) # calculate log growth rate
GDPGrowth <- na.omit(GDPGrowth) # get rid of the NAs

### Make Forecasts
# Build models
Model1<-arima(GDPGrowth, order = c(1, 0, 0)) # fit an AR(1) model 
Model2<-arima(GDPGrowth, order = c(0, 0, 3)) # fit an MA(3) model 


### Apply DM Test to Model Residuals
# Extra residuals
M1Residuals <- Model1$residuals
M2Residuals <- Model2$residuals
# DM test on residuals
library(forecast)
dm.test(M1Residuals, M2Residuals, h=1) #h is the forecast horizon used in calculating residuals
## 
##  Diebold-Mariano Test
## 
## data:  M1ResidualsM2Residuals
## DM = 0.80227, Forecast horizon = 1, Loss function power = 2, p-value =
## 0.4241
## alternative hypothesis: two.sided
# Interpret: High p-value -->  we cannot reject the null hypothesis that Model1 and Model2 have the same levels of accuracy over the training set

3.3.8.2 Out-of-Sample One-Step Forecast

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01"
endd<-"2018-01-01"
freq<-"quarterly"
GDP <- Quandl("FRED/GDPC1", type="xts",start_date=startd, end_date=endd)
GDPGrowth <- diff(log(GDP)) # calculate log growth rate
GDPGrowth <- na.omit(GDPGrowth) # get rid of the NAs

# Split Data
Train <- GDPGrowth["/2014"] 
Test <- GDPGrowth["2015/"]


### Build Models
Model1<-arima(Train, order = c(1, 0, 0)) # fit an AR(1) model 
Model2<-arima(Train, order = c(0, 0, 3)) # fit an MA(3) model 


### Make One-step Ahead Forecast by Feeding Test Data to Model
# Note that we included training data because the model needs historical data (Train) to make the first few predictions
library(forecast)
Model1.out <- Arima(GDPGrowth, model = Model1) 
Model2.out <- Arima(GDPGrowth, model = Model2)


### Apply DM test
# Extra residuals over the test set
M1Residuals.out <- subset(Model1.out$residuals, start=length(Train)+1) 
M2Residuals.out <- subset(Model2.out$residuals, start=length(Train)+1)
# DM test on residuals
dm.test(M1Residuals.out,M2Residuals.out, h = 1) 
## 
##  Diebold-Mariano Test
## 
## data:  M1Residuals.outM2Residuals.out
## DM = 1.0293, Forecast horizon = 1, Loss function power = 2, p-value =
## 0.3236
## alternative hypothesis: two.sided
# Interpret: High p-value -->  we cannot reject the null hypothesis that Model1 and Model2 have the same levels of forecast accuracy over the test set

3.3.9 NNETAR One-Step Ahead

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load data
library(Quandl)
Quandl_key = read.csv("quandlkey.csv",stringsAsFactors=FALSE)$Key
Quandl.api_key(Quandl_key)
startd<-"1990-01-01"
endd<-"2018-12-31"
freq<-"quarterly"
GDP <- Quandl("FRED/GDPC1", type="ts",start_date=startd, end_date=endd)
GDPGrowth <- diff(log(GDP)) # calculate log growth rate
GDPGrowth <- na.omit(GDPGrowth) # get rid of the NAs

# Split data
Train <- window(GDPGrowth,  end = 2014.9999)
Test <- window(GDPGrowth,  start = 2015)
### One-Step Ahead Forecasts with NNETAR model
library(forecast)
set.seed(123)
fit <- nnetar(Train) 
fit
## Series: Train 
## Model:  NNAR(14,1,8)[4] 
## Call:   nnetar(y = Train)
## 
## Average of 20 networks, each of which is
## a 14-8-1 network with 129 weights
## options were - linear output units 
## 
## sigma^2 estimated as 2.091e-09
NNETARForecast <- forecast(fit, h = 1, PI=FALSE) # h is the forecast period number; set PI = TRUE to calculate the prediction intervals(take some time)
data.frame(PointForecast=NNETARForecast$mean, row.names=as.yearqtr(index(Test)[1]))
##         PointForecast
## 2015 Q1   0.004302383

The above command will only give you the first one-step-ahead forecast, if you want to forecast for all out-of-sample period you can refit the model with testing data:

### Make Forecasts Over The Whole Test Set 
fit.out <- nnetar(Test, model = fit) # Note that the length of the test data must be greater than the data frequency (4) or the maximum order of NNETAR (14), whichever is greater
NNETARForecast.out <- fitted(fit.out)
data.frame(OneStepForecast=NNETARForecast.out, row.names=as.yearqtr(index(Test))) 
##         OneStepForecast
## 2015 Q1              NA
## 2015 Q2              NA
## 2015 Q3              NA
## 2015 Q4              NA
## 2016 Q1              NA
## 2016 Q2              NA
## 2016 Q3              NA
## 2016 Q4              NA
## 2017 Q1              NA
## 2017 Q2              NA
## 2017 Q3              NA
## 2017 Q4              NA
## 2018 Q1              NA
## 2018 Q2              NA
## 2018 Q3     0.008899046
## 2018 Q4     0.004010705
# Note that you can't get the first few (14) fitted values that way because the second call to nnetar knows nothing about the earlier data. This problem can be solved by refitting the model with training and testing data together:
fit.out2 <- nnetar(GDPGrowth, model = fit)
NNETARForecast.out2 <- subset(fitted(fit.out2), start=length(Train)+1)  
data.frame(OneStepForecast=NNETARForecast.out2, row.names=as.yearqtr(index(Test))) 
##         OneStepForecast
## 2015 Q1    0.0043023825
## 2015 Q2    0.0112161243
## 2015 Q3    0.0027718595
## 2015 Q4    0.0044836787
## 2016 Q1    0.0126382375
## 2016 Q2   -0.0001183569
## 2016 Q3   -0.0035165914
## 2016 Q4    0.0132423986
## 2017 Q1    0.0030158720
## 2017 Q2   -0.0023111000
## 2017 Q3    0.0002952316
## 2017 Q4    0.0074882225
## 2018 Q1    0.0064936085
## 2018 Q2    0.0083218970
## 2018 Q3    0.0088990458
## 2018 Q4    0.0040107055
### Plot Forecasts
library(ggplot2)
CutDate <- tsp(Test)[1] #test sample start time
autoplot(GDPGrowth, series = "GDP Growth Rate", lwd = 1) + 
  autolayer(NNETARForecast.out2, series="NNETAR",lwd = 1) +
  geom_vline(xintercept = CutDate, lwd = 1,color = "blue") +
  annotate("text", x = CutDate - 1/2, y = 0.017,srt=90, label = "training") +
  scale_color_manual(values = c("green","black")) +
  ylab("") + xlab("") + ggtitle("One-Step Ahead Forecast with NNETAR Model")

### MSE 
MSE <- rbind("NNETAR" = c(mean((GDPGrowth - fit$fitted)**2,na.rm = T),
                          mean((GDPGrowth - NNETARForecast.out2)**2, na.rm = T)))
colnames(MSE) <- c("In-sample MSE", "Out-of-sample MSE")
print(MSE)
##        In-sample MSE Out-of-sample MSE
## NNETAR  2.090753e-09      2.531095e-05

4 Protect

4.1 Constructing Portfolios

Note: Using Return.portfolio() does NOT include transaction costs, nor does it produce an Optimal portfolio.

4.1.1 Buy&Hold Equal Weights

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
port1<-Return.portfolio(R=Returns, verbose = TRUE)

4.1.2 Buy&Hold NonEqual Weights

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
wF = .05
wGE = .25
wCAT = .70
startingweights<-c(wF,wGE,wCAT)
port1<-Return.portfolio(R=Returns, weights = startingweights, verbose = TRUE)

4.1.3 Buy&Hold With Shorts

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
library(PerformanceAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
wF = -.05
wGE = .25
wCAT = .8
startingweights<-c(wF,wGE,wCAT)
port1<-Return.portfolio(R=Returns, weights = startingweights, verbose = TRUE)

4.1.4 Buy&Hold Fully Invested

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
wF = .05
wGE = .25
wCAT = 1-wF-wGE
startingweights<-c(wF,wGE,wCAT)
port1<-Return.portfolio(R=Returns, weights = startingweights, verbose = TRUE)

4.1.5 Buy&Hold Fully Invested v2

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
weightmatrix<-Returns #stealing the data type and dates
T = dim(weightmatrix)[1] #Number of days
wF = .05
wGE = .25
wCAT = .70
weightmatrix$F<-rep(wF,T)
weightmatrix$GE<-rep(wGE,T)
weightmatrix$CAT<-rep(wCAT,T)
weightmatrix$GE<-rep(.05,dim(weightmatrix$GE)[1]) # change a weight arbitrarily for this example
weightmatrix$CAT<-1-rowSums(weightmatrix[,-3]) # "-3" uses CAT to ensure the portfolio is always fully invested
head(weightmatrix)
##               F   GE CAT
## 2015-01-05 0.05 0.05 0.9
## 2015-01-06 0.05 0.05 0.9
## 2015-01-07 0.05 0.05 0.9
## 2015-01-08 0.05 0.05 0.9
## 2015-01-09 0.05 0.05 0.9
## 2015-01-12 0.05 0.05 0.9
port1<-Return.portfolio(R=Returns, weights = weightmatrix, verbose = TRUE)

4.1.6 Compute Portfolio Value

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
port1<-Return.portfolio(R=Returns, verbose = TRUE)
StartingEquity = 1 # change to whatever value you like
port1$GrossReturns<-port1$returns+1
port1$GrossReturns[1,1]=StartingEquity
colnames(port1$GrossReturns)<- "GrossReturns"
port1$PortfolioIndex=cumprod(port1$GrossReturns)
colnames(port1$PortfolioIndex)<- "PortfolioIndex"
tail(port1$PortfolioIndex)
##            PortfolioIndex
## 2021-04-22       1.632678
## 2021-04-23       1.653915
## 2021-04-26       1.657846
## 2021-04-27       1.662295
## 2021-04-28       1.664663
## 2021-04-29       1.606665
chart.TimeSeries(port1$PortfolioIndex)

4.1.7 Compute Portfolio Value v2

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
port1<-Return.portfolio(R=Returns, verbose = TRUE)
chart.CumReturns(port1$returns)

4.1.8 Portfolio Drawdown

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
port1<-Return.portfolio(R=Returns, verbose = TRUE)
chart.Drawdown(port1$returns)

4.1.9 Portfolio Value/Return Drawdown

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
port1<-Return.portfolio(R=Returns, verbose = TRUE)
charts.PerformanceSummary(port1$returns)

4.1.10 Chart Evolution of Weights

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
port1<-Return.portfolio(R=Returns, verbose = TRUE)
chart.StackedBar(port1$BOP.Weight["/2015-02-28",])

4.1.11 Pie Chart of Weights

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
port1<-Return.portfolio(R=Returns, verbose = TRUE)
LastWeight = data.matrix(tail(port1$EOP.Weight,n=1))
pie(LastWeight,Tickers)

4.1.12 Chart Asset Contributions

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers
port1<-Return.portfolio(R=Returns, verbose = TRUE)
chart.TimeSeries(port1$contribution,legend.loc = 'bottomright')

Contributions<-cumsum(port1$contribution)
chart.TimeSeries(Contributions,legend.loc = 'topleft')

4.1.13 Regular Rebalancing

Note: Using Return.portfolio does NOT include transaction costs, nor does it produce an Optimal portfolio. Can alter rebalance_on to (days,weeks,months,quarters,years).

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
library(PerformanceAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
wF = .05
wGE = .25
wCAT = .70
startingweights<-c(wF,wGE,wCAT)
port1<-Return.portfolio(R=Returns, weights = startingweights, rebalance_on = "days",   verbose = TRUE)
head(port1$BOP.Weight)
##            daily.returns daily.returns.1 daily.returns.2
## 2015-01-05          0.05            0.25             0.7
## 2015-01-06          0.05            0.25             0.7
## 2015-01-07          0.05            0.25             0.7
## 2015-01-08          0.05            0.25             0.7
## 2015-01-09          0.05            0.25             0.7
## 2015-01-12          0.05            0.25             0.7
head(port1$EOP.Weight)
##            daily.returns daily.returns.1 daily.returns.2
## 2015-01-05    0.05023157       0.2565699       0.6931985
## 2015-01-06    0.05004444       0.2471757       0.7027798
## 2015-01-07    0.05080700       0.2470435       0.7021495
## 2015-01-08    0.05068305       0.2501470       0.6991700
## 2015-01-09    0.04994503       0.2496409       0.7004140
## 2015-01-12    0.05036311       0.2511263       0.6985106

4.1.14 Irregular Rebalancing

You can control the precise date of rebalancing by defining a weight matrix that contains the weights for each period on every ticker.

rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PortfolioAnalytics)
Tickers<-c('F','GE','CAT')
startd = "2015-01-01"
endd = "2021-04-30"
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "F"   "GE"  "CAT"
Prices = do.call(merge,lapply(Tickers, function(x) Ad(get(x))))
Prices = na.omit(Prices[-1,])
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-names(Prices)
weightmatrix<-Returns #stealing the data type and dates
T = dim(weightmatrix)[1] #Number of days
wF = .05
wGE = .25
wCAT = .70
weightmatrix$F.Adjusted<-rep(wF,T)
weightmatrix$GE.Adjusted<-rep(wGE,T)
weightmatrix$CAT.Adjusted<-rep(wCAT,T)
port1<-Return.portfolio(R=Returns, weights = weightmatrix, verbose = TRUE)
head(port1$BOP.Weight)
##            F.Adjusted GE.Adjusted CAT.Adjusted
## 2015-01-06       0.05        0.25          0.7
## 2015-01-07       0.05        0.25          0.7
## 2015-01-08       0.05        0.25          0.7
## 2015-01-09       0.05        0.25          0.7
## 2015-01-12       0.05        0.25          0.7
## 2015-01-13       0.05        0.25          0.7

4.2 Optimal Portfolios

4.2.1 Min Variance-Long Only

Minimum Variance, Subject to Target Return, Fully Invested, Long Only

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
library(quantmod)
startd = "2015-01-01"
endd = "2021-04-30"
Tickers = c("IBM","GE","F","MSFT")
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "IBM"  "GE"   "F"    "MSFT"
### Compute Returns
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers

### Construct Portfolio
# Create the portfolio specification
library(PortfolioAnalytics)
init.portf <-portfolio.spec(assets = colnames(Returns))

# Add a target return constraint
init.portf <- add.constraint(portfolio=init.portf, type="return", return_target=0.001)

# Add a full investment constraint such that the weights sum to 1
init.portf <- add.constraint(portfolio=init.portf, type="full_investment")

# Add a long only constraint such that the weight of an asset is between 0 and 1
init.portf <- add.constraint(portfolio=init.portf, type="long_only")

# Add objective to minimize portfolio variance
init.portf <- add.objective(portfolio=init.portf, type="risk", name = "var")

### Solve Portfolio: Minimize Variance
library(ROI)
opt.MinVar <- optimize.portfolio(R=Returns, 
                                portfolio=init.portf, 
                                optimize_method="ROI", 
                                trace=TRUE)


summary(opt.MinVar) # The optimization summary should be read carefully since not every constraint is guaranteed to be satisfied
## **************************************************
## PortfolioAnalytics Optimization Summary 
## **************************************************
## 
## Call:
## optimize.portfolio(R = Returns, portfolio = init.portf, optimize_method = "ROI", 
##     trace = TRUE)
## 
## Optimal Weights:
##    IBM     GE      F   MSFT 
## 0.1718 0.0000 0.0972 0.7310 
## 
## Objective Measures:
##  StdDev 
## 0.01529 
## 
## 
## Portfolio Assets and Initial Weights:
##  IBM   GE    F MSFT 
## 0.25 0.25 0.25 0.25 
## 
## **************************************************
## PortfolioAnalytics Portfolio Specification 
## **************************************************
## 
## Call:
## portfolio.spec(assets = colnames(Returns))
## 
## Number of assets: 4 
## Asset Names
## [1] "IBM"  "GE"   "F"    "MSFT"
## 
## Constraints
## Enabled constraint types
##      - return 
##      - full_investment 
##      - long_only 
## 
## Objectives:
## Enabled objective names
##      - var 
## 
## ****************************************
## Constraints
## ****************************************
## Leverage Constraint:
## min_sum = 1
## max_sum = 1
## actual_leverage = 1
## 
## Box Constraints:
## min:
##  IBM   GE    F MSFT 
##    0    0    0    0 
## max:
##  IBM   GE    F MSFT 
##    1    1    1    1 
## 
## Position Limit Constraints:
## Maximum number of non-zero weights, max_pos:
## [1] "Unconstrained"
## Realized number of non-zero weights (i.e. positions):
## [1] 3
## 
## Maximum number of long positions, max_pos_long:
## [1] "Unconstrained"
## Realized number of long positions:
## [1] 3
## 
## Maximum number of short positions, max_pos_short:
## [1] "Unconstrained"
## Realized number of short positions:
## [1] 0
## 
## 
## Diversification Target Constraint:
## [1] "Unconstrained"
## 
## Realized diversification:
## [1] 0.426643
## 
## Turnover Target Constraint:
## [1] "Unconstrained"
## 
## Realized turnover from initial weights:
## [1] 0.2405153
## 
## ****************************************
## Objectives
## ****************************************
## 
## Objective: portfolio_risk_objective 
## $name
## [1] "var"
## 
## $target
## NULL
## 
## $arguments
## $arguments$portfolio_method
## [1] "single"
## 
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] 1
## 
## $call
## add.objective(portfolio = init.portf, type = "risk", name = "var")
## 
## attr(,"class")
## [1] "portfolio_risk_objective" "objective"               
## 
## ****************************************
## 
## Elapsed Time:
## Time difference of 0.0149951 secs
### Visualize result
library(PerformanceAnalytics)
# Optimal weights
OptWeight = extractWeights(opt.MinVar)
sum(OptWeight)
## [1] 1
chart.Weights(opt.MinVar)

# Portfolio returns
PortReturn <- Return.portfolio(R = Returns, 
                              weights = OptWeight, 
                              geometric = FALSE) #use arithmetic(FALSE) to aggregate returns
chart.TimeSeries(PortReturn)

mean(PortReturn)
## [1] 0.001
# Variance
var(PortReturn)
##                   portfolio.returns
## portfolio.returns      0.0002338933

4.2.2 Min Variance-Long/Short

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
library(quantmod)
startd = "2015-01-01"
endd = "2021-04-30"
Tickers = c("IBM","GE","F","MSFT")
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "IBM"  "GE"   "F"    "MSFT"
### Compute Returns
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers

### Construct Portfolio
# Create the portfolio specification
library(PortfolioAnalytics)
init.portf <-portfolio.spec(assets = colnames(Returns))

# Add a target return constraint
init.portf <- add.constraint(portfolio=init.portf, type="return", return_target=0.001)

# Add a full investment constraint such that the weights sum to 1
init.portf <- add.constraint(portfolio=init.portf, type="full_investment")

# Add objective to minimize portfolio variance
init.portf <- add.objective(portfolio=init.portf, type="risk", name = "var")

### Solve Portfolio: Minimize Variance
library(ROI)
opt.MinVar <- optimize.portfolio(R=Returns, 
                                portfolio=init.portf, 
                                optimize_method="ROI", 
                                trace=TRUE)


summary(opt.MinVar) # The optimization summary should be read carefully since not every constraint is guaranteed to be satisfied
## **************************************************
## PortfolioAnalytics Optimization Summary 
## **************************************************
## 
## Call:
## optimize.portfolio(R = Returns, portfolio = init.portf, optimize_method = "ROI", 
##     trace = TRUE)
## 
## Optimal Weights:
##     IBM      GE       F    MSFT 
##  0.1923 -0.0286  0.1114  0.7249 
## 
## Objective Measures:
##  StdDev 
## 0.01528 
## 
## 
## Portfolio Assets and Initial Weights:
##  IBM   GE    F MSFT 
## 0.25 0.25 0.25 0.25 
## 
## **************************************************
## PortfolioAnalytics Portfolio Specification 
## **************************************************
## 
## Call:
## portfolio.spec(assets = colnames(Returns))
## 
## Number of assets: 4 
## Asset Names
## [1] "IBM"  "GE"   "F"    "MSFT"
## 
## Constraints
## Enabled constraint types
##      - return 
##      - full_investment 
## 
## Objectives:
## Enabled objective names
##      - var 
## 
## ****************************************
## Constraints
## ****************************************
## Leverage Constraint:
## min_sum = 1
## max_sum = 1
## actual_leverage = 1
## 
## Box Constraints:
## min:
## [1] -Inf -Inf -Inf -Inf
## max:
## [1] Inf Inf Inf Inf
## 
## Position Limit Constraints:
## Maximum number of non-zero weights, max_pos:
## [1] "Unconstrained"
## Realized number of non-zero weights (i.e. positions):
## [1] 4
## 
## Maximum number of long positions, max_pos_long:
## [1] "Unconstrained"
## Realized number of long positions:
## [1] 3
## 
## Maximum number of short positions, max_pos_short:
## [1] "Unconstrained"
## Realized number of short positions:
## [1] 1
## 
## 
## Diversification Target Constraint:
## [1] "Unconstrained"
## 
## Realized diversification:
## [1] 0.4243172
## 
## Turnover Target Constraint:
## [1] "Unconstrained"
## 
## Realized turnover from initial weights:
## [1] 0.2374475
## 
## ****************************************
## Objectives
## ****************************************
## 
## Objective: portfolio_risk_objective 
## $name
## [1] "var"
## 
## $target
## NULL
## 
## $arguments
## $arguments$portfolio_method
## [1] "single"
## 
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] 1
## 
## $call
## add.objective(portfolio = init.portf, type = "risk", name = "var")
## 
## attr(,"class")
## [1] "portfolio_risk_objective" "objective"               
## 
## ****************************************
## 
## Elapsed Time:
## Time difference of 0.01793098 secs
### Visualize result
library(PerformanceAnalytics)
# Optimal weights
OptWeight = extractWeights(opt.MinVar)
sum(OptWeight)
## [1] 1
chart.Weights(opt.MinVar)

# Portfolio returns
PortReturn <- Return.portfolio(R = Returns, 
                              weights = OptWeight, 
                              geometric = FALSE) #use arithmetic(FALSE) to aggregate returns
chart.TimeSeries(PortReturn)

mean(PortReturn)
## [1] 0.001
# Variance
var(PortReturn)
##                   portfolio.returns
## portfolio.returns      0.0002335734

4.2.3 Max Sharpe-LongOnly

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
library(quantmod)
startd = "2015-01-01"
endd = "2021-04-30"
Tickers = c("IBM","GE","F","MSFT")
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "IBM"  "GE"   "F"    "MSFT"
### Compute Returns
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers

### Construct Portfolio
# Create the portfolio specification
library(PortfolioAnalytics)
init.portf <-portfolio.spec(assets = colnames(Returns))


# Add a full investment constraint such that the weights sum to 1
init.portf <- add.constraint(portfolio=init.portf, type="full_investment")


# Add risk and return objective(we need them to calculate the Sharpe ratio)
init.portf <- add.objective(portfolio=init.portf, type="return", name = "mean")
init.portf <- add.objective(portfolio=init.portf, type="risk", name = "StdDev")
### Solve Portfolio
library(ROI)
opt <- optimize.portfolio(R=Returns, 
                                portfolio=init.portf, 
                                optimize_method="ROI",
                                maxSR=TRUE,
                                trace=TRUE)


summary(opt) # The optimization summary should be read carefully since not every constraint is guaranteed to be satisfied
## **************************************************
## PortfolioAnalytics Optimization Summary 
## **************************************************
## 
## Call:
## optimize.portfolio(R = Returns, portfolio = init.portf, optimize_method = "ROI", 
##     trace = TRUE, maxSR = TRUE)
## 
## Optimal Weights:
##     IBM      GE       F    MSFT 
##  0.0218 -0.0804  0.0757  0.9830 
## 
## Objective Measures:
##  StdDev 
## 0.01733 
## 
## 
##     mean 
## 0.001288 
## 
## 
## Portfolio Assets and Initial Weights:
##  IBM   GE    F MSFT 
## 0.25 0.25 0.25 0.25 
## 
## **************************************************
## PortfolioAnalytics Portfolio Specification 
## **************************************************
## 
## Call:
## portfolio.spec(assets = colnames(Returns))
## 
## Number of assets: 4 
## Asset Names
## [1] "IBM"  "GE"   "F"    "MSFT"
## 
## Constraints
## Enabled constraint types
##      - full_investment 
## 
## Objectives:
## Enabled objective names
##      - mean 
##      - StdDev 
## 
## ****************************************
## Constraints
## ****************************************
## Leverage Constraint:
## min_sum = 1
## max_sum = 1
## actual_leverage = 1
## 
## Box Constraints:
## min:
## [1] -Inf -Inf -Inf -Inf
## max:
## [1] Inf Inf Inf Inf
## 
## Position Limit Constraints:
## Maximum number of non-zero weights, max_pos:
## [1] "Unconstrained"
## Realized number of non-zero weights (i.e. positions):
## [1] 4
## 
## Maximum number of long positions, max_pos_long:
## [1] "Unconstrained"
## Realized number of long positions:
## [1] 3
## 
## Maximum number of short positions, max_pos_short:
## [1] "Unconstrained"
## Realized number of short positions:
## [1] 1
## 
## 
## Diversification Target Constraint:
## [1] "Unconstrained"
## 
## Realized diversification:
## [1] 0.02113157
## 
## Turnover Target Constraint:
## [1] "Unconstrained"
## 
## Realized turnover from initial weights:
## [1] 0.3664756
## 
## ****************************************
## Objectives
## ****************************************
## 
## Objective: return_objective 
## $name
## [1] "mean"
## 
## $target
## NULL
## 
## $arguments
## list()
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] -1
## 
## $call
## add.objective(portfolio = init.portf, type = "return", name = "mean")
## 
## attr(,"class")
## [1] "return_objective" "objective"       
## 
## ****************************************
## Objective: portfolio_risk_objective 
## $name
## [1] "StdDev"
## 
## $target
## NULL
## 
## $arguments
## $arguments$portfolio_method
## [1] "single"
## 
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] 1
## 
## $call
## add.objective(portfolio = init.portf, type = "risk", name = "StdDev")
## 
## attr(,"class")
## [1] "portfolio_risk_objective" "objective"               
## 
## ****************************************
## 
## Elapsed Time:
## Time difference of 0.1982899 secs
### Visualize result
library(PerformanceAnalytics)
# Optimal weights
OptWeight = extractWeights(opt)
sum(OptWeight)
## [1] 1
chart.Weights(opt)

# Portfolio returns
PortReturn <- Return.portfolio(R = Returns, 
                              weights = OptWeight, 
                              geometric = FALSE) #use arithmetic(FALSE) to aggregate returns
chart.TimeSeries(PortReturn)

mean(PortReturn)
## [1] 0.001287984
# Variance
var(PortReturn)
##                   portfolio.returns
## portfolio.returns      0.0003003913

4.2.4 Max Sharpe-Long/Short

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
library(quantmod)
startd = "2015-01-01"
endd = "2021-04-30"
Tickers = c("IBM","GE","F","MSFT")
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "IBM"  "GE"   "F"    "MSFT"
### Compute Returns
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers

### Construct Portfolio
# Create the portfolio specification
library(PortfolioAnalytics)
init.portf <-portfolio.spec(assets = colnames(Returns))


# Add a full investment constraint such that the weights sum to 1
init.portf <- add.constraint(portfolio=init.portf, type="full_investment")


# Add risk and return objective(we need them to calculate the Sharpe ratio)
init.portf <- add.objective(portfolio=init.portf, type="return", name = "mean")
init.portf <- add.objective(portfolio=init.portf, type="risk", name = "StdDev")
### Solve Portfolio
library(ROI)
opt <- optimize.portfolio(R=Returns, 
                                portfolio=init.portf, 
                                optimize_method="ROI",
                                maxSR=TRUE,
                                trace=TRUE)


summary(opt) # The optimization summary should be read carefully since not every constraint is guaranteed to be satisfied
## **************************************************
## PortfolioAnalytics Optimization Summary 
## **************************************************
## 
## Call:
## optimize.portfolio(R = Returns, portfolio = init.portf, optimize_method = "ROI", 
##     trace = TRUE, maxSR = TRUE)
## 
## Optimal Weights:
##     IBM      GE       F    MSFT 
##  0.0218 -0.0804  0.0757  0.9830 
## 
## Objective Measures:
##  StdDev 
## 0.01733 
## 
## 
##     mean 
## 0.001288 
## 
## 
## Portfolio Assets and Initial Weights:
##  IBM   GE    F MSFT 
## 0.25 0.25 0.25 0.25 
## 
## **************************************************
## PortfolioAnalytics Portfolio Specification 
## **************************************************
## 
## Call:
## portfolio.spec(assets = colnames(Returns))
## 
## Number of assets: 4 
## Asset Names
## [1] "IBM"  "GE"   "F"    "MSFT"
## 
## Constraints
## Enabled constraint types
##      - full_investment 
## 
## Objectives:
## Enabled objective names
##      - mean 
##      - StdDev 
## 
## ****************************************
## Constraints
## ****************************************
## Leverage Constraint:
## min_sum = 1
## max_sum = 1
## actual_leverage = 1
## 
## Box Constraints:
## min:
## [1] -Inf -Inf -Inf -Inf
## max:
## [1] Inf Inf Inf Inf
## 
## Position Limit Constraints:
## Maximum number of non-zero weights, max_pos:
## [1] "Unconstrained"
## Realized number of non-zero weights (i.e. positions):
## [1] 4
## 
## Maximum number of long positions, max_pos_long:
## [1] "Unconstrained"
## Realized number of long positions:
## [1] 3
## 
## Maximum number of short positions, max_pos_short:
## [1] "Unconstrained"
## Realized number of short positions:
## [1] 1
## 
## 
## Diversification Target Constraint:
## [1] "Unconstrained"
## 
## Realized diversification:
## [1] 0.0211316
## 
## Turnover Target Constraint:
## [1] "Unconstrained"
## 
## Realized turnover from initial weights:
## [1] 0.3664757
## 
## ****************************************
## Objectives
## ****************************************
## 
## Objective: return_objective 
## $name
## [1] "mean"
## 
## $target
## NULL
## 
## $arguments
## list()
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] -1
## 
## $call
## add.objective(portfolio = init.portf, type = "return", name = "mean")
## 
## attr(,"class")
## [1] "return_objective" "objective"       
## 
## ****************************************
## Objective: portfolio_risk_objective 
## $name
## [1] "StdDev"
## 
## $target
## NULL
## 
## $arguments
## $arguments$portfolio_method
## [1] "single"
## 
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] 1
## 
## $call
## add.objective(portfolio = init.portf, type = "risk", name = "StdDev")
## 
## attr(,"class")
## [1] "portfolio_risk_objective" "objective"               
## 
## ****************************************
## 
## Elapsed Time:
## Time difference of 0.207566 secs
### Visualize result
library(PerformanceAnalytics)
# Optimal weights
OptWeight = extractWeights(opt)
sum(OptWeight)
## [1] 1
chart.Weights(opt)

# Portfolio returns
PortReturn <- Return.portfolio(R = Returns, 
                              weights = OptWeight, 
                              geometric = FALSE) #use arithmetic(FALSE) to aggregate returns
chart.TimeSeries(PortReturn)

mean(PortReturn)
## [1] 0.001287984
# Variance
var(PortReturn)
##                   portfolio.returns
## portfolio.returns      0.0003003911

4.2.5 Max Sharpe-Weight Bounds

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
library(quantmod)
startd = "2015-01-01"
endd = "2021-04-30"
Tickers = c("IBM","GE","F","MSFT")
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "IBM"  "GE"   "F"    "MSFT"
### Compute Returns
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers

### Construct Portfolio
# Create the portfolio specification
library(PortfolioAnalytics)
init.portf <-portfolio.spec(assets = colnames(Returns))


# Add a full investment constraint such that the weights sum to 1
init.portf <- add.constraint(portfolio=init.portf, type="full_investment")

# Add upper and lower bounds on the weights of assets
init.portf <- add.constraint(portfolio=init.portf, type="box", min=0, max=0.5)

# Add risk and return objective(we need them to calculate the Sharpe ratio)
init.portf <- add.objective(portfolio=init.portf, type="return", name = "mean")
init.portf <- add.objective(portfolio=init.portf, type="risk", name = "StdDev")
### Solve Portfolio
library(ROI)
opt <- optimize.portfolio(R=Returns, 
                                portfolio=init.portf, 
                                optimize_method="ROI",
                                maxSR=TRUE,
                                trace=TRUE)


summary(opt) # The optimization summary should be read carefully since not every constraint is guaranteed to be satisfied
## **************************************************
## PortfolioAnalytics Optimization Summary 
## **************************************************
## 
## Call:
## optimize.portfolio(R = Returns, portfolio = init.portf, optimize_method = "ROI", 
##     trace = TRUE, maxSR = TRUE)
## 
## Optimal Weights:
##    IBM     GE      F   MSFT 
## 0.3541 0.0000 0.1459 0.5000 
## 
## Objective Measures:
##  StdDev 
## 0.01426 
## 
## 
##      mean 
## 0.0007529 
## 
## 
## Portfolio Assets and Initial Weights:
##  IBM   GE    F MSFT 
## 0.25 0.25 0.25 0.25 
## 
## **************************************************
## PortfolioAnalytics Portfolio Specification 
## **************************************************
## 
## Call:
## portfolio.spec(assets = colnames(Returns))
## 
## Number of assets: 4 
## Asset Names
## [1] "IBM"  "GE"   "F"    "MSFT"
## 
## Constraints
## Enabled constraint types
##      - full_investment 
##      - box 
## 
## Objectives:
## Enabled objective names
##      - mean 
##      - StdDev 
## 
## ****************************************
## Constraints
## ****************************************
## Leverage Constraint:
## min_sum = 1
## max_sum = 1
## actual_leverage = 1
## 
## Box Constraints:
## min:
##  IBM   GE    F MSFT 
##    0    0    0    0 
## max:
##  IBM   GE    F MSFT 
##  0.5  0.5  0.5  0.5 
## 
## Position Limit Constraints:
## Maximum number of non-zero weights, max_pos:
## [1] "Unconstrained"
## Realized number of non-zero weights (i.e. positions):
## [1] 3
## 
## Maximum number of long positions, max_pos_long:
## [1] "Unconstrained"
## Realized number of long positions:
## [1] 3
## 
## Maximum number of short positions, max_pos_short:
## [1] "Unconstrained"
## Realized number of short positions:
## [1] 0
## 
## 
## Diversification Target Constraint:
## [1] "Unconstrained"
## 
## Realized diversification:
## [1] 0.6033234
## 
## Turnover Target Constraint:
## [1] "Unconstrained"
## 
## Realized turnover from initial weights:
## [1] 0.1770536
## 
## ****************************************
## Objectives
## ****************************************
## 
## Objective: return_objective 
## $name
## [1] "mean"
## 
## $target
## NULL
## 
## $arguments
## list()
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] -1
## 
## $call
## add.objective(portfolio = init.portf, type = "return", name = "mean")
## 
## attr(,"class")
## [1] "return_objective" "objective"       
## 
## ****************************************
## Objective: portfolio_risk_objective 
## $name
## [1] "StdDev"
## 
## $target
## NULL
## 
## $arguments
## $arguments$portfolio_method
## [1] "single"
## 
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] 1
## 
## $call
## add.objective(portfolio = init.portf, type = "risk", name = "StdDev")
## 
## attr(,"class")
## [1] "portfolio_risk_objective" "objective"               
## 
## ****************************************
## 
## Elapsed Time:
## Time difference of 0.1486819 secs
### Visualize result
library(PerformanceAnalytics)
# Optimal weights
OptWeight = extractWeights(opt)
sum(OptWeight)
## [1] 1
chart.Weights(opt)

# Portfolio returns
PortReturn <- Return.portfolio(R = Returns, 
                              weights = OptWeight, 
                              geometric = FALSE) #use arithmetic(FALSE) to aggregate returns
chart.TimeSeries(PortReturn)

mean(PortReturn)
## [1] 0.0007528538
# Variance
var(PortReturn)
##                   portfolio.returns
## portfolio.returns      0.0002033753

4.2.6 Max Sharpe-Limit #Positions

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
library(quantmod)
startd = "2015-01-01"
endd = "2021-04-30"
Tickers = c("IBM","GE","F","MSFT")
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "IBM"  "GE"   "F"    "MSFT"
### Compute Returns
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers

### Construct Portfolio
# Create the portfolio specification
library(PortfolioAnalytics)
init.portf <-portfolio.spec(assets = colnames(Returns))


# Add a full investment constraint such that the weights sum to 1
init.portf <- add.constraint(portfolio=init.portf, type="full_investment")


# Add position limit constraint such that we have a maximum number of long positions and short positions
init.portf <- add.constraint(portfolio=init.portf, 
                             type="position_limit", 
                             max_pos_long = 3, 
                             max_pos_short = 2)

# Add risk and return objective(we need them to calculate the Sharpe ratio)
init.portf <- add.objective(portfolio=init.portf, type="return", name = "mean")
init.portf <- add.objective(portfolio=init.portf, type="risk", name = "StdDev")
### Solve Portfolio
library(ROI)
opt <- optimize.portfolio(R=Returns, 
                                portfolio=init.portf, 
                                optimize_method="ROI",
                                maxSR=TRUE,
                                trace=TRUE)


summary(opt) # The optimization summary should be read carefully since not every constraint is guaranteed to be satisfied
## **************************************************
## PortfolioAnalytics Optimization Summary 
## **************************************************
## 
## Call:
## optimize.portfolio(R = Returns, portfolio = init.portf, optimize_method = "ROI", 
##     trace = TRUE, maxSR = TRUE)
## 
## Optimal Weights:
##     IBM      GE       F    MSFT 
##  0.0218 -0.0804  0.0757  0.9830 
## 
## Objective Measures:
##  StdDev 
## 0.01733 
## 
## 
##     mean 
## 0.001288 
## 
## 
## Portfolio Assets and Initial Weights:
##  IBM   GE    F MSFT 
## 0.25 0.25 0.25 0.25 
## 
## **************************************************
## PortfolioAnalytics Portfolio Specification 
## **************************************************
## 
## Call:
## portfolio.spec(assets = colnames(Returns))
## 
## Number of assets: 4 
## Asset Names
## [1] "IBM"  "GE"   "F"    "MSFT"
## 
## Constraints
## Enabled constraint types
##      - full_investment 
##      - position_limit 
## 
## Objectives:
## Enabled objective names
##      - mean 
##      - StdDev 
## 
## ****************************************
## Constraints
## ****************************************
## Leverage Constraint:
## min_sum = 1
## max_sum = 1
## actual_leverage = 1
## 
## Box Constraints:
## min:
## [1] -Inf -Inf -Inf -Inf
## max:
## [1] Inf Inf Inf Inf
## 
## Position Limit Constraints:
## Maximum number of non-zero weights, max_pos:
## [1] "Unconstrained"
## Realized number of non-zero weights (i.e. positions):
## [1] 4
## 
## Maximum number of long positions, max_pos_long:
## [1] 3
## Realized number of long positions:
## [1] 3
## 
## Maximum number of short positions, max_pos_short:
## [1] 2
## Realized number of short positions:
## [1] 1
## 
## 
## Diversification Target Constraint:
## [1] "Unconstrained"
## 
## Realized diversification:
## [1] 0.02113155
## 
## Turnover Target Constraint:
## [1] "Unconstrained"
## 
## Realized turnover from initial weights:
## [1] 0.3664756
## 
## ****************************************
## Objectives
## ****************************************
## 
## Objective: return_objective 
## $name
## [1] "mean"
## 
## $target
## NULL
## 
## $arguments
## list()
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] -1
## 
## $call
## add.objective(portfolio = init.portf, type = "return", name = "mean")
## 
## attr(,"class")
## [1] "return_objective" "objective"       
## 
## ****************************************
## Objective: portfolio_risk_objective 
## $name
## [1] "StdDev"
## 
## $target
## NULL
## 
## $arguments
## $arguments$portfolio_method
## [1] "single"
## 
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] 1
## 
## $call
## add.objective(portfolio = init.portf, type = "risk", name = "StdDev")
## 
## attr(,"class")
## [1] "portfolio_risk_objective" "objective"               
## 
## ****************************************
## 
## Elapsed Time:
## Time difference of 0.2360849 secs
### Visualize result
library(PerformanceAnalytics)
# Optimal weights
OptWeight = extractWeights(opt)
sum(OptWeight)
## [1] 1
chart.Weights(opt)

# Portfolio returns
PortReturn <- Return.portfolio(R = Returns, 
                              weights = OptWeight, 
                              geometric = FALSE) #use arithmetic(FALSE) to aggregate returns
chart.TimeSeries(PortReturn)

mean(PortReturn)
## [1] 0.001287984
# Variance
var(PortReturn)
##                   portfolio.returns
## portfolio.returns       0.000300391

4.2.7 Max Sharpe-FactorExposure

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
library(quantmod)
startd = "2015-01-01"
endd = "2021-04-30"
Tickers = c("IBM","GE","F","MSFT")
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "IBM"  "GE"   "F"    "MSFT"
### Compute Returns
Return = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Return = na.omit(Return[-1,])
colnames(Return)<-Tickers


### Estimate Market Risk Exposure(assume Rf = 0)
# Load market data(we need this because we want it to calculate the market risk exposure)
getSymbols("^GSPC", from = startd, to = endd)
## [1] "^GSPC"
SP500 <- periodReturn(Ad(get("GSPC")),period='daily',type='arithmetic') 
na.omit(SP500)
##            daily.returns
## 2015-01-02  0.000000e+00
## 2015-01-05 -1.827811e-02
## 2015-01-06 -8.893472e-03
## 2015-01-07  1.162984e-02
## 2015-01-08  1.788828e-02
## 2015-01-09 -8.403811e-03
## 2015-01-12 -8.093685e-03
## 2015-01-13 -2.578555e-03
## 2015-01-14 -5.813067e-03
## 2015-01-15 -9.247876e-03
## 2015-01-16  1.342420e-02
## 2015-01-20  1.549952e-03
## 2015-01-21  4.731624e-03
## 2015-01-22  1.526972e-02
## 2015-01-23 -5.491522e-03
## 2015-01-26  2.568461e-03
## 2015-01-27 -1.338786e-02
## 2015-01-28 -1.349561e-02
## 2015-01-29  9.534685e-03
## 2015-01-30 -1.299197e-02
## 2015-02-02  1.296246e-02
## 2015-02-03  1.443949e-02
## 2015-02-04 -4.156046e-03
## 2015-02-05  1.029141e-02
## 2015-02-06 -3.418172e-03
## 2015-02-09 -4.247195e-03
## 2015-02-10  1.067556e-02
## 2015-02-11 -2.903379e-05
## 2015-02-12  9.644506e-03
## 2015-02-13  4.074739e-03
## 2015-02-17  1.597575e-03
## 2015-02-18 -3.143091e-04
## 2015-02-19 -1.062058e-03
## 2015-02-20  6.126534e-03
## 2015-02-23 -3.033393e-04
## 2015-02-24  2.758771e-03
## 2015-02-25 -7.657236e-04
## 2015-02-26 -1.476028e-03
## 2015-02-27 -2.956304e-03
## 2015-03-02  6.124919e-03
## 2015-03-03 -4.538542e-03
## 2015-03-04 -4.388503e-03
## 2015-03-05  1.196080e-03
## 2015-03-06 -1.417395e-02
## 2015-03-09  3.944421e-03
## 2015-03-10 -1.696133e-02
## 2015-03-11 -1.917680e-03
## 2015-03-12  1.260144e-02
## 2015-03-13 -6.074711e-03
## 2015-03-16  1.353367e-02
## 2015-03-17 -3.320174e-03
## 2015-03-18  1.215842e-02
## 2015-03-19 -4.872579e-03
## 2015-03-20  9.012755e-03
## 2015-03-23 -1.745731e-03
## 2015-03-24 -6.139422e-03
## 2015-03-25 -1.455891e-02
## 2015-03-26 -2.377500e-03
## 2015-03-27  2.368562e-03
## 2015-03-30  1.223664e-02
## 2015-03-31 -8.795775e-03
## 2015-04-01 -3.965372e-03
## 2015-04-02  3.529667e-03
## 2015-04-06  6.608815e-03
## 2015-04-07 -2.061904e-03
## 2015-04-08  2.682533e-03
## 2015-04-09  4.457481e-03
## 2015-04-10  5.202865e-03
## 2015-04-13 -4.581281e-03
## 2015-04-14  1.629759e-03
## 2015-04-15  5.148196e-03
## 2015-04-16 -7.784438e-04
## 2015-04-17 -1.131125e-02
## 2015-04-20  9.235131e-03
## 2015-04-21 -1.480605e-03
## 2015-04-22  5.087480e-03
## 2015-04-23  2.357716e-03
## 2015-04-24  2.252800e-03
## 2015-04-27 -4.141314e-03
## 2015-04-28  2.769232e-03
## 2015-04-29 -3.740336e-03
## 2015-04-30 -1.012891e-02
## 2015-05-01  1.092300e-02
## 2015-05-04  2.940749e-03
## 2015-05-05 -1.183738e-02
## 2015-05-06 -4.455725e-03
## 2015-05-07  3.773814e-03
## 2015-05-08  1.345790e-02
## 2015-05-11 -5.089561e-03
## 2015-05-12 -2.949638e-03
## 2015-05-13 -3.049549e-04
## 2015-05-14  1.077929e-02
## 2015-05-15  7.684135e-04
## 2015-05-18  3.047948e-03
## 2015-05-19 -6.433745e-04
## 2015-05-20 -9.305160e-04
## 2015-05-21  2.337874e-03
## 2015-05-22 -2.233886e-03
## 2015-05-26 -1.028198e-02
## 2015-05-27  9.162641e-03
## 2015-05-28 -1.266761e-03
## 2015-05-29 -6.318469e-03
## 2015-06-01  2.059461e-03
## 2015-06-02 -1.008596e-03
## 2015-06-03  2.118871e-03
## 2015-06-04 -8.623167e-03
## 2015-06-05 -1.436183e-03
## 2015-06-08 -6.474510e-03
## 2015-06-09  4.183530e-04
## 2015-06-10  1.204242e-02
## 2015-06-11  1.738626e-03
## 2015-06-12 -6.994300e-03
## 2015-06-15 -4.622572e-03
## 2015-06-16  5.689856e-03
## 2015-06-17  1.979641e-03
## 2015-06-18  9.902711e-03
## 2015-06-19 -5.303502e-03
## 2015-06-22  6.094867e-03
## 2015-06-23  6.358683e-04
## 2015-06-24 -7.353297e-03
## 2015-06-25 -2.973574e-03
## 2015-06-26 -3.900799e-04
## 2015-06-29 -2.086619e-02
## 2015-06-30  2.658489e-03
## 2015-07-01  6.936040e-03
## 2015-07-02 -3.080229e-04
## 2015-07-06 -3.861757e-03
## 2015-07-07  6.080975e-03
## 2015-07-08 -1.665275e-02
## 2015-07-09  2.262203e-03
## 2015-07-10  1.233848e-02
## 2015-07-13  1.106605e-02
## 2015-07-14  4.453159e-03
## 2015-07-15 -7.349861e-04
## 2015-07-16  8.014681e-03
## 2015-07-17  1.106183e-03
## 2015-07-20  7.712335e-04
## 2015-07-21 -4.261689e-03
## 2015-07-22 -2.387710e-03
## 2015-07-23 -5.676040e-03
## 2015-07-24 -1.070333e-02
## 2015-07-27 -5.775015e-03
## 2015-07-28  1.238615e-02
## 2015-07-29  7.318795e-03
## 2015-07-30  2.836757e-05
## 2015-07-31 -2.271520e-03
## 2015-08-03 -2.756887e-03
## 2015-08-04 -2.249705e-03
## 2015-08-05  3.114679e-03
## 2015-08-06 -7.752985e-03
## 2015-08-07 -2.874883e-03
## 2015-08-10  1.280817e-02
## 2015-08-11 -9.557103e-03
## 2015-08-12  9.500549e-04
## 2015-08-13 -1.275212e-03
## 2015-08-14  3.911964e-03
## 2015-08-17  5.211424e-03
## 2015-08-18 -2.625530e-03
## 2015-08-19 -8.254877e-03
## 2015-08-20 -2.110017e-02
## 2015-08-21 -3.185097e-02
## 2015-08-24 -3.941369e-02
## 2015-08-25 -1.352200e-02
## 2015-08-26  3.903386e-02
## 2015-08-27  2.429775e-02
## 2015-08-28  6.087364e-04
## 2015-08-31 -8.391670e-03
## 2015-09-01 -2.957645e-02
## 2015-09-02  1.829297e-02
## 2015-09-03  1.164794e-03
## 2015-09-04 -1.532960e-02
## 2015-09-08  2.508305e-02
## 2015-09-09 -1.389756e-02
## 2015-09-10  5.277955e-03
## 2015-09-11  4.487043e-03
## 2015-09-14 -4.089656e-03
## 2015-09-15  1.283131e-02
## 2015-09-16  8.705414e-03
## 2015-09-17 -2.561060e-03
## 2015-09-18 -1.616417e-02
## 2015-09-21  4.565784e-03
## 2015-09-22 -1.231843e-02
## 2015-09-23 -2.048643e-03
## 2015-09-24 -3.362985e-03
## 2015-09-25 -4.657931e-04
## 2015-09-28 -2.566609e-02
## 2015-09-29  1.232853e-03
## 2015-09-30  1.907556e-02
## 2015-10-01  1.973884e-03
## 2015-10-02  1.431529e-02
## 2015-10-05  1.828984e-02
## 2015-10-06 -3.588236e-03
## 2015-10-07  8.035634e-03
## 2015-10-08  8.818436e-03
## 2015-10-09  7.251114e-04
## 2015-10-12  1.275477e-03
## 2015-10-13 -6.825424e-03
## 2015-10-14 -4.716274e-03
## 2015-10-15  1.485277e-02
## 2015-10-16  4.570474e-03
## 2015-10-19  2.705456e-04
## 2015-10-20 -1.421090e-03
## 2015-10-21 -5.825415e-03
## 2015-10-22  1.662757e-02
## 2015-10-23  1.103034e-02
## 2015-10-26 -1.913100e-03
## 2015-10-27 -2.554119e-03
## 2015-10-28  1.184003e-02
## 2015-10-29 -4.497744e-04
## 2015-10-30 -4.809877e-03
## 2015-11-02  1.187382e-02
## 2015-11-03  2.728067e-03
## 2015-11-04 -3.545367e-03
## 2015-11-05 -1.132148e-03
## 2015-11-06 -3.476216e-04
## 2015-11-09 -9.822729e-03
## 2015-11-10  1.510595e-03
## 2015-11-11 -3.228086e-03
## 2015-11-12 -1.399038e-02
## 2015-11-13 -1.120736e-02
## 2015-11-16  1.490327e-02
## 2015-11-17 -1.339379e-03
## 2015-11-18  1.616245e-02
## 2015-11-19 -1.123109e-03
## 2015-11-20  3.810196e-03
## 2015-11-23 -1.234861e-03
## 2015-11-24  1.221996e-03
## 2015-11-25 -1.291326e-04
## 2015-11-27  5.936176e-04
## 2015-11-30 -4.640997e-03
## 2015-12-01  1.068057e-02
## 2015-12-02 -1.099569e-02
## 2015-12-03 -1.437353e-02
## 2015-12-04  2.052567e-02
## 2015-12-07 -6.989503e-03
## 2015-12-08 -6.489901e-03
## 2015-12-09 -7.738985e-03
## 2015-12-10  2.251387e-03
## 2015-12-11 -1.942277e-02
## 2015-12-14  4.755560e-03
## 2015-12-15  1.061856e-02
## 2015-12-16  1.451497e-02
## 2015-12-17 -1.504052e-02
## 2015-12-18 -1.779722e-02
## 2015-12-21  7.778402e-03
## 2015-12-22  8.816736e-03
## 2015-12-23  1.241807e-02
## 2015-12-24 -1.598636e-03
## 2015-12-28 -2.178560e-03
## 2015-12-29  1.062976e-02
## 2015-12-30 -7.217229e-03
## 2015-12-31 -9.411913e-03
## 2016-01-04 -1.530373e-02
## 2016-01-05  2.012226e-03
## 2016-01-06 -1.311540e-02
## 2016-01-07 -2.370044e-02
## 2016-01-08 -1.083837e-02
## 2016-01-11  8.532723e-04
## 2016-01-12  7.802799e-03
## 2016-01-13 -2.496545e-02
## 2016-01-14  1.669591e-02
## 2016-01-15 -2.159910e-02
## 2016-01-19  5.318216e-04
## 2016-01-20 -1.169386e-02
## 2016-01-21  5.195438e-03
## 2016-01-22  2.028370e-02
## 2016-01-25 -1.563798e-02
## 2016-01-26  1.414434e-02
## 2016-01-27 -1.086348e-02
## 2016-01-28  5.528577e-03
## 2016-01-29  2.476022e-02
## 2016-02-01 -4.432364e-04
## 2016-02-02 -1.874309e-02
## 2016-02-03  4.992039e-03
## 2016-02-04  1.526733e-03
## 2016-02-05 -1.848125e-02
## 2016-02-08 -1.415394e-02
## 2016-02-09 -6.636201e-04
## 2016-02-10 -1.889505e-04
## 2016-02-11 -1.230116e-02
## 2016-02-12  1.951805e-02
## 2016-02-16  1.651665e-02
## 2016-02-17  1.648044e-02
## 2016-02-18 -4.665714e-03
## 2016-02-19 -2.603307e-05
## 2016-02-22  1.445420e-02
## 2016-02-23 -1.245437e-02
## 2016-02-24  4.439787e-03
## 2016-02-25  1.134828e-02
## 2016-02-26 -1.870114e-03
## 2016-02-29 -8.120977e-03
## 2016-03-01  2.386879e-02
## 2016-03-02  4.094308e-03
## 2016-03-03  3.498741e-03
## 2016-03-04  3.305892e-03
## 2016-03-07  8.850144e-04
## 2016-03-08 -1.124011e-02
## 2016-03-09  5.052393e-03
## 2016-03-10  1.558047e-04
## 2016-03-11  1.639550e-02
## 2016-03-14 -1.260973e-03
## 2016-03-15 -1.836942e-03
## 2016-03-16  5.600352e-03
## 2016-03-17  6.595236e-03
## 2016-03-18  4.405644e-03
## 2016-03-21  9.855775e-04
## 2016-03-22 -8.773879e-04
## 2016-03-23 -6.386032e-03
## 2016-03-24 -3.780705e-04
## 2016-03-28  5.452558e-04
## 2016-03-29  8.816652e-03
## 2016-03-30  4.350315e-03
## 2016-03-31 -2.039759e-03
## 2016-04-01  6.330915e-03
## 2016-04-04 -3.208322e-03
## 2016-04-05 -1.014449e-02
## 2016-04-06  1.050762e-02
## 2016-04-07 -1.197579e-02
## 2016-04-08  2.786578e-03
## 2016-04-11 -2.739786e-03
## 2016-04-12  9.662134e-03
## 2016-04-13  1.004014e-02
## 2016-04-14  1.729272e-04
## 2016-04-15 -9.842849e-04
## 2016-04-18  6.541026e-03
## 2016-04-19  3.084485e-03
## 2016-04-20  7.615446e-04
## 2016-04-21 -5.194027e-03
## 2016-04-22  4.785989e-05
## 2016-04-25 -1.812046e-03
## 2016-04-26  1.872752e-03
## 2016-04-27  1.649353e-03
## 2016-04-28 -9.230768e-03
## 2016-04-29 -5.063088e-03
## 2016-05-02  7.809947e-03
## 2016-05-03 -8.676638e-03
## 2016-05-04 -5.936889e-03
## 2016-05-05 -2.390079e-04
## 2016-05-06  3.174639e-03
## 2016-05-09  7.534966e-04
## 2016-05-10  1.248364e-02
## 2016-05-11 -9.561518e-03
## 2016-05-12 -1.694651e-04
## 2016-05-13 -8.478289e-03
## 2016-05-16  9.796653e-03
## 2016-05-17 -9.411297e-03
## 2016-05-18  2.051788e-04
## 2016-05-19 -3.706708e-03
## 2016-05-20  6.019504e-03
## 2016-05-23 -2.085459e-03
## 2016-05-24  1.368138e-02
## 2016-05-25  6.974740e-03
## 2016-05-26 -2.104437e-04
## 2016-05-27  4.286857e-03
## 2016-05-31 -1.005263e-03
## 2016-06-01  1.135042e-03
## 2016-06-02  2.824678e-03
## 2016-06-03 -2.911815e-03
## 2016-06-06  4.897281e-03
## 2016-06-07  1.289446e-03
## 2016-06-08  3.309566e-03
## 2016-06-09 -1.717759e-03
## 2016-06-10 -9.175181e-03
## 2016-06-13 -8.115191e-03
## 2016-06-14 -1.798886e-03
## 2016-06-15 -1.840713e-03
## 2016-06-16  3.132991e-03
## 2016-06-17 -3.257965e-03
## 2016-06-20  5.808185e-03
## 2016-06-21  2.712061e-03
## 2016-06-22 -1.651564e-03
## 2016-06-23  1.336408e-02
## 2016-06-24 -3.591980e-02
## 2016-06-27 -1.809650e-02
## 2016-06-28  1.777017e-02
## 2016-06-29  1.703267e-02
## 2016-06-30  1.356504e-02
## 2016-07-01  1.948602e-03
## 2016-07-05 -6.847477e-03
## 2016-07-06  5.352963e-03
## 2016-07-07 -8.715778e-04
## 2016-07-08  1.525335e-02
## 2016-07-11  3.408616e-03
## 2016-07-12  7.009293e-03
## 2016-07-13  1.347677e-04
## 2016-07-14  5.259204e-03
## 2016-07-15 -9.289474e-04
## 2016-07-18  2.382295e-03
## 2016-07-19 -1.435174e-03
## 2016-07-20  4.270301e-03
## 2016-07-21 -3.612529e-03
## 2016-07-22  4.553965e-03
## 2016-07-25 -3.011475e-03
## 2016-07-26  3.227846e-04
## 2016-07-27 -1.198542e-03
## 2016-07-28  1.606209e-03
## 2016-07-29  1.631309e-03
## 2016-08-01 -1.269787e-03
## 2016-08-02 -6.361620e-03
## 2016-08-03  3.133943e-03
## 2016-08-04  2.125719e-04
## 2016-08-05  8.603496e-03
## 2016-08-08 -9.071653e-04
## 2016-08-09  3.897936e-04
## 2016-08-10 -2.864686e-03
## 2016-08-11  4.734588e-03
## 2016-08-12 -7.960463e-04
## 2016-08-15  2.792909e-03
## 2016-08-16 -5.479077e-03
## 2016-08-17  1.868590e-03
## 2016-08-18  2.199617e-03
## 2016-08-19 -1.440272e-03
## 2016-08-22 -5.633229e-04
## 2016-08-23  1.951769e-03
## 2016-08-24 -5.240277e-03
## 2016-08-25 -1.365227e-03
## 2016-08-26 -1.578817e-03
## 2016-08-29  5.228047e-03
## 2016-08-30 -1.953681e-03
## 2016-08-31 -2.375864e-03
## 2016-09-01 -4.138465e-05
## 2016-09-02  4.201041e-03
## 2016-09-06  2.981679e-03
## 2016-09-07 -1.463851e-04
## 2016-09-08 -2.223014e-03
## 2016-09-09 -2.452207e-02
## 2016-09-12  1.467705e-02
## 2016-09-13 -1.483067e-02
## 2016-09-14 -5.876767e-04
## 2016-09-15  1.010927e-02
## 2016-09-16 -3.772295e-03
## 2016-09-19 -1.860310e-05
## 2016-09-20  2.991384e-04
## 2016-09-21  1.091716e-02
## 2016-09-22  6.499785e-03
## 2016-09-23 -5.736775e-03
## 2016-09-26 -8.587762e-03
## 2016-09-27  6.444170e-03
## 2016-09-28  5.296554e-03
## 2016-09-29 -9.321411e-03
## 2016-09-30  7.967969e-03
## 2016-10-03 -3.260696e-03
## 2016-10-04 -4.955562e-03
## 2016-10-05  4.296691e-03
## 2016-10-06  4.815602e-04
## 2016-10-07 -3.253484e-03
## 2016-10-10  4.605905e-03
## 2016-10-11 -1.244647e-02
## 2016-10-12  1.146589e-03
## 2016-10-13 -3.099264e-03
## 2016-10-14  2.016042e-04
## 2016-10-17 -3.037994e-03
## 2016-10-18  6.160403e-03
## 2016-10-19  2.191971e-03
## 2016-10-20 -1.375724e-03
## 2016-10-21 -8.414170e-05
## 2016-10-24  4.749840e-03
## 2016-10-25 -3.797728e-03
## 2016-10-26 -1.740411e-03
## 2016-10-27 -2.986727e-03
## 2016-10-28 -3.108299e-03
## 2016-10-31 -1.222765e-04
## 2016-11-01 -6.786883e-03
## 2016-11-02 -6.525501e-03
## 2016-11-03 -4.423401e-03
## 2016-11-04 -1.666131e-03
## 2016-11-07  2.222354e-02
## 2016-11-08  3.771974e-03
## 2016-11-09  1.107702e-02
## 2016-11-10  1.950746e-03
## 2016-11-11 -1.397950e-03
## 2016-11-14 -1.155028e-04
## 2016-11-15  7.480798e-03
## 2016-11-16 -1.582264e-03
## 2016-11-17  4.676370e-03
## 2016-11-18 -2.386798e-03
## 2016-11-21  7.461401e-03
## 2016-11-22  2.165432e-03
## 2016-11-23  8.080248e-04
## 2016-11-25  3.914387e-03
## 2016-11-28 -5.254536e-03
## 2016-11-29  1.335293e-03
## 2016-11-30 -2.653404e-03
## 2016-12-01 -3.515529e-03
## 2016-12-02  3.970065e-04
## 2016-12-05  5.821305e-03
## 2016-12-06  3.410888e-03
## 2016-12-07  1.316324e-02
## 2016-12-08  2.159343e-03
## 2016-12-09  5.938985e-03
## 2016-12-12 -1.137435e-03
## 2016-12-13  6.539775e-03
## 2016-12-14 -8.117172e-03
## 2016-12-15  3.883228e-03
## 2016-12-16 -1.750623e-03
## 2016-12-19  1.975121e-03
## 2016-12-20  3.637512e-03
## 2016-12-21 -2.457361e-03
## 2016-12-22 -1.862974e-03
## 2016-12-23  1.251715e-03
## 2016-12-27  2.248373e-03
## 2016-12-28 -8.356529e-03
## 2016-12-29 -2.933047e-04
## 2016-12-30 -4.637050e-03
## 2017-01-03  8.486575e-03
## 2017-01-04  5.722274e-03
## 2017-01-05 -7.706705e-04
## 2017-01-06  3.516959e-03
## 2017-01-09 -3.548594e-03
## 2017-01-10  0.000000e+00
## 2017-01-11  2.829638e-03
## 2017-01-12 -2.144809e-03
## 2017-01-13  1.849841e-03
## 2017-01-17 -2.967503e-03
## 2017-01-18  1.763754e-03
## 2017-01-19 -3.609309e-03
## 2017-01-20  3.366238e-03
## 2017-01-23 -2.690125e-03
## 2017-01-24  6.564594e-03
## 2017-01-25  8.026091e-03
## 2017-01-26 -7.353842e-04
## 2017-01-27 -8.664642e-04
## 2017-01-30 -6.009543e-03
## 2017-01-31 -8.899053e-04
## 2017-02-01  2.983636e-04
## 2017-02-02  5.703095e-04
## 2017-02-03  7.264758e-03
## 2017-02-06 -2.115357e-03
## 2017-02-07  2.268290e-04
## 2017-02-08  6.933225e-04
## 2017-02-09  5.752546e-03
## 2017-02-10  3.566050e-03
## 2017-02-13  5.245845e-03
## 2017-02-14  4.007335e-03
## 2017-02-15  4.992309e-03
## 2017-02-16 -8.641179e-04
## 2017-02-17  1.678556e-03
## 2017-02-21  6.048066e-03
## 2017-02-22 -1.082200e-03
## 2017-02-23  4.189870e-04
## 2017-02-24  1.493364e-03
## 2017-02-27  1.017983e-03
## 2017-02-28 -2.578376e-03
## 2017-03-01  1.367385e-02
## 2017-03-02 -5.859880e-03
## 2017-03-03  5.038771e-04
## 2017-03-06 -3.277241e-03
## 2017-03-07 -2.913374e-03
## 2017-03-08 -2.284216e-03
## 2017-03-09  7.998955e-04
## 2017-03-10  3.268670e-03
## 2017-03-13  3.666328e-04
## 2017-03-14 -3.379027e-03
## 2017-03-15  8.374753e-03
## 2017-03-16 -1.626710e-03
## 2017-03-17 -1.314315e-03
## 2017-03-20 -2.009893e-03
## 2017-03-21 -1.240797e-02
## 2017-03-22  1.889886e-03
## 2017-03-23 -1.060270e-03
## 2017-03-24 -8.439961e-04
## 2017-03-27 -1.019587e-03
## 2017-03-28  7.251474e-03
## 2017-03-29  1.085325e-03
## 2017-03-30  2.935110e-03
## 2017-03-31 -2.255048e-03
## 2017-04-03 -1.642126e-03
## 2017-04-04  5.595225e-04
## 2017-04-05 -3.054861e-03
## 2017-04-06  1.929509e-03
## 2017-04-07 -8.271301e-04
## 2017-04-10  6.876865e-04
## 2017-04-11 -1.433879e-03
## 2017-04-12 -3.759951e-03
## 2017-04-13 -6.814694e-03
## 2017-04-17  8.613349e-03
## 2017-04-18 -2.903380e-03
## 2017-04-19 -1.716351e-03
## 2017-04-20  7.557263e-03
## 2017-04-21 -3.035073e-03
## 2017-04-24  1.084007e-02
## 2017-04-25  6.090687e-03
## 2017-04-26 -4.857034e-04
## 2017-04-27  5.529201e-04
## 2017-04-28 -1.913147e-03
## 2017-05-01  1.732291e-03
## 2017-05-02  1.189050e-03
## 2017-05-03 -1.271361e-03
## 2017-05-04  5.821028e-04
## 2017-05-05  4.088695e-03
## 2017-05-08  3.744608e-05
## 2017-05-09 -1.025249e-03
## 2017-05-10  1.130601e-03
## 2017-05-11 -2.162809e-03
## 2017-05-12 -1.478441e-03
## 2017-05-15  4.776514e-03
## 2017-05-16 -6.868968e-04
## 2017-05-17 -1.817821e-02
## 2017-05-18  3.686819e-03
## 2017-05-19  6.767500e-03
## 2017-05-22  5.160132e-03
## 2017-05-23  1.837872e-03
## 2017-05-24  2.489127e-03
## 2017-05-25  4.441948e-03
## 2017-05-26  3.105500e-04
## 2017-05-30 -1.204624e-03
## 2017-05-31 -4.599687e-04
## 2017-06-01  7.571113e-03
## 2017-06-02  3.707731e-03
## 2017-06-05 -1.217665e-03
## 2017-06-06 -2.779040e-03
## 2017-06-07  1.568257e-03
## 2017-06-08  2.672045e-04
## 2017-06-09 -8.299890e-04
## 2017-06-12 -9.787632e-04
## 2017-06-13  4.511505e-03
## 2017-06-14 -9.958309e-04
## 2017-06-15 -2.239598e-03
## 2017-06-16  2.836392e-04
## 2017-06-19  8.347229e-03
## 2017-06-20 -6.696638e-03
## 2017-06-21 -5.826444e-04
## 2017-06-22 -4.557819e-04
## 2017-06-23  1.560916e-03
## 2017-06-26  3.158016e-04
## 2017-06-27 -8.072825e-03
## 2017-06-28  8.808066e-03
## 2017-06-29 -8.600023e-03
## 2017-06-30  1.533232e-03
## 2017-07-03  2.310834e-03
## 2017-07-05  1.453279e-03
## 2017-07-06 -9.368824e-03
## 2017-07-07  6.403126e-03
## 2017-07-10  9.277662e-04
## 2017-07-11 -7.826809e-04
## 2017-07-12  7.305608e-03
## 2017-07-13  1.874584e-03
## 2017-07-14  4.673503e-03
## 2017-07-17 -5.291286e-05
## 2017-07-18  5.978570e-04
## 2017-07-19  5.372639e-03
## 2017-07-20 -1.536593e-04
## 2017-07-21 -3.678716e-04
## 2017-07-24 -1.063735e-03
## 2017-07-25  2.923172e-03
## 2017-07-26  2.826638e-04
## 2017-07-27 -9.726882e-04
## 2017-07-28 -1.341115e-03
## 2017-07-31 -7.281457e-04
## 2017-08-01  2.449115e-03
## 2017-08-02  4.926484e-04
## 2017-08-03 -2.183654e-03
## 2017-08-04  1.889104e-03
## 2017-08-07  1.647200e-03
## 2017-08-08 -2.414433e-03
## 2017-08-09 -3.636085e-04
## 2017-08-10 -1.447444e-02
## 2017-08-11  1.275570e-03
## 2017-08-14  1.004375e-02
## 2017-08-15 -4.988081e-04
## 2017-08-16  1.420103e-03
## 2017-08-17 -1.543695e-02
## 2017-08-18 -1.835367e-03
## 2017-08-21  1.162651e-03
## 2017-08-22  9.940780e-03
## 2017-08-23 -3.453593e-03
## 2017-08-24 -2.074462e-03
## 2017-08-25  1.672869e-03
## 2017-08-28  4.870719e-04
## 2017-08-29  8.428219e-04
## 2017-08-30  4.615149e-03
## 2017-08-31  5.720976e-03
## 2017-09-01  1.982541e-03
## 2017-09-05 -7.550807e-03
## 2017-09-06  3.128727e-03
## 2017-09-07 -1.784360e-04
## 2017-09-08 -1.488851e-03
## 2017-09-11  1.083930e-02
## 2017-09-12  3.363948e-03
## 2017-09-13  7.571208e-04
## 2017-09-14 -1.100718e-03
## 2017-09-15  1.847181e-03
## 2017-09-18  1.455921e-03
## 2017-09-19  1.110195e-03
## 2017-09-20  6.343479e-04
## 2017-09-21 -3.045917e-03
## 2017-09-22  6.477937e-04
## 2017-09-25 -2.222050e-03
## 2017-09-26  7.216682e-05
## 2017-09-27  4.085144e-03
## 2017-09-28  1.204616e-03
## 2017-09-29  3.705110e-03
## 2017-10-02  3.874004e-03
## 2017-10-03  2.158838e-03
## 2017-10-04  1.246720e-03
## 2017-10-05  5.646787e-03
## 2017-10-06 -1.073634e-03
## 2017-10-09 -1.804434e-03
## 2017-10-10  2.322413e-03
## 2017-10-11  1.803507e-03
## 2017-10-12 -1.686753e-03
## 2017-10-13  8.781072e-04
## 2017-10-16  1.750753e-03
## 2017-10-17  6.725787e-04
## 2017-10-18  7.423352e-04
## 2017-10-19  3.279979e-04
## 2017-10-20  5.116843e-03
## 2017-10-23 -3.972484e-03
## 2017-10-24  1.617909e-03
## 2017-10-25 -4.663050e-03
## 2017-10-26  1.270946e-03
## 2017-10-27  8.073022e-03
## 2017-10-30 -3.192470e-03
## 2017-10-31  9.444588e-04
## 2017-11-01  1.592110e-03
## 2017-11-02  1.899661e-04
## 2017-11-03  3.097075e-03
## 2017-11-06  1.271251e-03
## 2017-11-07 -1.891028e-04
## 2017-11-08  1.443655e-03
## 2017-11-09 -3.761888e-03
## 2017-11-10 -8.976437e-04
## 2017-11-13  9.836343e-04
## 2017-11-14 -2.309609e-03
## 2017-11-15 -5.525676e-03
## 2017-11-16  8.196058e-03
## 2017-11-17 -2.625963e-03
## 2017-11-20  1.275683e-03
## 2017-11-21  6.541139e-03
## 2017-11-22 -7.502611e-04
## 2017-11-24  2.056095e-03
## 2017-11-27 -3.842577e-04
## 2017-11-28  9.848513e-03
## 2017-11-29 -3.692258e-04
## 2017-11-30  8.190951e-03
## 2017-12-01 -2.024531e-03
## 2017-12-04 -1.052157e-03
## 2017-12-05 -3.739382e-03
## 2017-12-06 -1.141053e-04
## 2017-12-07  2.932358e-03
## 2017-12-08  5.506306e-03
## 2017-12-11  3.201957e-03
## 2017-12-12  1.548922e-03
## 2017-12-13 -4.729568e-04
## 2017-12-14 -4.070859e-03
## 2017-12-15  8.974344e-03
## 2017-12-18  5.362807e-03
## 2017-12-19 -3.230269e-03
## 2017-12-20 -8.278933e-04
## 2017-12-21  1.985656e-03
## 2017-12-22 -4.581665e-04
## 2017-12-26 -1.058415e-03
## 2017-12-27  7.909409e-04
## 2017-12-28  1.833999e-03
## 2017-12-29 -5.183153e-03
## 2018-01-02  8.303362e-03
## 2018-01-03  6.398819e-03
## 2018-01-04  4.028636e-03
## 2018-01-05  7.033767e-03
## 2018-01-08  1.662344e-03
## 2018-01-09  1.302932e-03
## 2018-01-10 -1.112227e-03
## 2018-01-11  7.033647e-03
## 2018-01-12  6.749603e-03
## 2018-01-16 -3.524487e-03
## 2018-01-17  9.415052e-03
## 2018-01-18 -1.616390e-03
## 2018-01-19  4.385235e-03
## 2018-01-22  8.066727e-03
## 2018-01-23  2.174365e-03
## 2018-01-24 -5.599758e-04
## 2018-01-25  6.026209e-04
## 2018-01-26  1.184120e-02
## 2018-01-29 -6.731974e-03
## 2018-01-30 -1.089882e-02
## 2018-01-31  4.889854e-04
## 2018-02-01 -6.480886e-04
## 2018-02-02 -2.120855e-02
## 2018-02-05 -4.097923e-02
## 2018-02-06  1.744092e-02
## 2018-02-07 -5.001589e-03
## 2018-02-08 -3.753642e-02
## 2018-02-09  1.493609e-02
## 2018-02-12  1.391458e-02
## 2018-02-13  2.612930e-03
## 2018-02-14  1.340246e-02
## 2018-02-15  1.206911e-02
## 2018-02-16  3.734695e-04
## 2018-02-20 -5.841389e-03
## 2018-02-21 -5.496503e-03
## 2018-02-22  9.735511e-04
## 2018-02-23  1.602838e-02
## 2018-02-26  1.175702e-02
## 2018-02-27 -1.270689e-02
## 2018-02-28 -1.109579e-02
## 2018-03-01 -1.332440e-02
## 2018-03-02  5.071603e-03
## 2018-03-05  1.103203e-02
## 2018-03-06  2.638859e-03
## 2018-03-07 -4.838746e-04
## 2018-03-08  4.463078e-03
## 2018-03-09  1.737883e-02
## 2018-03-12 -1.273985e-03
## 2018-03-13 -6.363577e-03
## 2018-03-14 -5.724522e-03
## 2018-03-15 -7.819304e-04
## 2018-03-16  1.703447e-03
## 2018-03-19 -1.420420e-02
## 2018-03-20  1.481805e-03
## 2018-03-21 -1.843990e-03
## 2018-03-22 -2.516289e-02
## 2018-03-23 -2.096688e-02
## 2018-03-26  2.715726e-02
## 2018-03-27 -1.727631e-02
## 2018-03-28 -2.916657e-03
## 2018-03-29  1.376972e-02
## 2018-04-02 -2.233742e-02
## 2018-04-03  1.261487e-02
## 2018-04-04  1.156648e-02
## 2018-04-05  6.862864e-03
## 2018-04-06 -2.192025e-02
## 2018-04-09  3.336549e-03
## 2018-04-10  1.672695e-02
## 2018-04-11 -5.525365e-03
## 2018-04-12  8.250750e-03
## 2018-04-13 -2.886625e-03
## 2018-04-16  8.109038e-03
## 2018-04-17  1.066150e-02
## 2018-04-18  8.313658e-04
## 2018-04-19 -5.726125e-03
## 2018-04-20 -8.536532e-03
## 2018-04-23  5.623151e-05
## 2018-04-24 -1.338056e-02
## 2018-04-25  1.837059e-03
## 2018-04-26  1.043420e-02
## 2018-04-27  1.113625e-03
## 2018-04-30 -8.187491e-03
## 2018-05-01  2.549045e-03
## 2018-05-02 -7.205864e-03
## 2018-05-03 -2.253674e-03
## 2018-05-04  1.281118e-02
## 2018-05-07  3.457946e-03
## 2018-05-08 -2.656413e-04
## 2018-05-09  9.682220e-03
## 2018-05-10  9.370644e-03
## 2018-05-11  1.707596e-03
## 2018-05-14  8.834895e-04
## 2018-05-15 -6.842140e-03
## 2018-05-16  4.060562e-03
## 2018-05-17 -8.558723e-04
## 2018-05-18 -2.632195e-03
## 2018-05-21  7.386753e-03
## 2018-05-22 -3.135762e-03
## 2018-05-23  3.248410e-03
## 2018-05-24 -2.023213e-03
## 2018-05-25 -2.357221e-03
## 2018-05-29 -1.156419e-02
## 2018-05-30  1.269579e-02
## 2018-05-31 -6.879560e-03
## 2018-06-01  1.084923e-02
## 2018-06-04  4.479598e-03
## 2018-06-05  7.025931e-04
## 2018-06-06  8.567393e-03
## 2018-06-07 -7.141887e-04
## 2018-06-08  3.125904e-03
## 2018-06-11  1.068708e-03
## 2018-06-12  1.743385e-03
## 2018-06-13 -4.026128e-03
## 2018-06-14  2.471550e-03
## 2018-06-15 -1.017103e-03
## 2018-06-18 -2.126128e-03
## 2018-06-19 -4.023402e-03
## 2018-06-20  1.712154e-03
## 2018-06-21 -6.345510e-03
## 2018-06-22  1.861934e-03
## 2018-06-25 -1.372467e-02
## 2018-06-26  2.204577e-03
## 2018-06-27 -8.604355e-03
## 2018-06-28  6.178690e-03
## 2018-06-29  7.584031e-04
## 2018-07-02  3.067958e-03
## 2018-07-03 -4.947351e-03
## 2018-07-05  8.620803e-03
## 2018-07-06  8.481282e-03
## 2018-07-09  8.822986e-03
## 2018-07-10  3.473267e-03
## 2018-07-11 -7.094203e-03
## 2018-07-12  8.749042e-03
## 2018-07-13  1.079238e-03
## 2018-07-16 -1.028136e-03
## 2018-07-17  3.973699e-03
## 2018-07-18  2.160512e-03
## 2018-07-19 -3.952993e-03
## 2018-07-20 -9.484477e-04
## 2018-07-23  1.838049e-03
## 2018-07-24  4.780911e-03
## 2018-07-25  9.101605e-03
## 2018-07-26 -3.032296e-03
## 2018-07-27 -6.562209e-03
## 2018-07-30 -5.754170e-03
## 2018-07-31  4.884729e-03
## 2018-08-01 -1.040352e-03
## 2018-08-02  4.926445e-03
## 2018-08-03  4.644183e-03
## 2018-08-06  3.538227e-03
## 2018-08-07  2.824182e-03
## 2018-08-08 -2.623800e-04
## 2018-08-09 -1.441674e-03
## 2018-08-10 -7.113888e-03
## 2018-08-13 -4.005992e-03
## 2018-08-14  6.389255e-03
## 2018-08-15 -7.602165e-03
## 2018-08-16  7.919408e-03
## 2018-08-17  3.323116e-03
## 2018-08-20  2.428018e-03
## 2018-08-21  2.068536e-03
## 2018-08-22 -3.981519e-04
## 2018-08-23 -1.691262e-03
## 2018-08-24  6.198840e-03
## 2018-08-27  7.670409e-03
## 2018-08-28  2.692786e-04
## 2018-08-29  5.701434e-03
## 2018-08-30 -4.430329e-03
## 2018-08-31  1.344776e-04
## 2018-09-04 -1.654322e-03
## 2018-09-05 -2.803127e-03
## 2018-09-06 -3.652305e-03
## 2018-09-07 -2.213345e-03
## 2018-09-10  1.897827e-03
## 2018-09-11  3.739842e-03
## 2018-09-12  3.566718e-04
## 2018-09-13  5.282254e-03
## 2018-09-14  2.754816e-04
## 2018-09-17 -5.569722e-03
## 2018-09-18  5.369015e-03
## 2018-09-19  1.253273e-03
## 2018-09-20  7.840592e-03
## 2018-09-21 -3.685330e-04
## 2018-09-24 -3.515688e-03
## 2018-09-25 -1.305096e-03
## 2018-09-26 -3.289278e-03
## 2018-09-27  2.763287e-03
## 2018-09-28 -6.870281e-06
## 2018-10-01  3.641105e-03
## 2018-10-02 -3.966901e-04
## 2018-10-03  7.115197e-04
## 2018-10-04 -8.169483e-03
## 2018-10-05 -5.527979e-03
## 2018-10-08 -3.951164e-04
## 2018-10-09 -1.417904e-03
## 2018-10-10 -3.286423e-02
## 2018-10-11 -2.057301e-02
## 2018-10-12  1.420620e-02
## 2018-10-15 -5.904979e-03
## 2018-10-16  2.149560e-02
## 2018-10-17 -2.526624e-04
## 2018-10-18 -1.439192e-02
## 2018-10-19 -3.611699e-04
## 2018-10-22 -4.299527e-03
## 2018-10-23 -5.511830e-03
## 2018-10-24 -3.086443e-02
## 2018-10-25  1.862504e-02
## 2018-10-26 -1.732726e-02
## 2018-10-29 -6.559599e-03
## 2018-10-30  1.566678e-02
## 2018-10-31  1.085133e-02
## 2018-11-01  1.055784e-02
## 2018-11-02 -6.316686e-03
## 2018-11-05  5.600317e-03
## 2018-11-06  6.259296e-03
## 2018-11-07  2.120886e-02
## 2018-11-08 -2.508917e-03
## 2018-11-09 -9.199014e-03
## 2018-11-12 -1.970149e-02
## 2018-11-13 -1.481920e-03
## 2018-11-14 -7.567411e-03
## 2018-11-15  1.059375e-02
## 2018-11-16  2.223306e-03
## 2018-11-19 -1.664311e-02
## 2018-11-20 -1.815124e-02
## 2018-11-21  3.043291e-03
## 2018-11-23 -6.554842e-03
## 2018-11-26  1.553237e-02
## 2018-11-27  3.261692e-03
## 2018-11-28  2.297398e-02
## 2018-11-29 -2.183108e-03
## 2018-11-30  8.170748e-03
## 2018-12-03  1.094143e-02
## 2018-12-04 -3.236490e-02
## 2018-12-06 -1.522228e-03
## 2018-12-07 -2.332012e-02
## 2018-12-10  1.762154e-03
## 2018-12-11 -3.563464e-04
## 2018-12-12  5.419504e-03
## 2018-12-13 -1.999302e-04
## 2018-12-14 -1.908671e-02
## 2018-12-17 -2.077348e-02
## 2018-12-18  8.640070e-05
## 2018-12-19 -1.539571e-02
## 2018-12-20 -1.577211e-02
## 2018-12-21 -2.058823e-02
## 2018-12-24 -2.711225e-02
## 2018-12-26  4.959374e-02
## 2018-12-27  8.562681e-03
## 2018-12-28 -1.241583e-03
## 2018-12-31  8.492484e-03
## 2019-01-02  1.268497e-03
## 2019-01-03 -2.475673e-02
## 2019-01-04  3.433571e-02
## 2019-01-07  7.010435e-03
## 2019-01-08  9.695285e-03
## 2019-01-09  4.098046e-03
## 2019-01-10  4.518419e-03
## 2019-01-11 -1.462979e-04
## 2019-01-14 -5.257525e-03
## 2019-01-15  1.072169e-02
## 2019-01-16  2.221986e-03
## 2019-01-17  7.591400e-03
## 2019-01-18  1.318305e-02
## 2019-01-22 -1.415731e-02
## 2019-01-23  2.202913e-03
## 2019-01-24  1.375726e-03
## 2019-01-25  8.488694e-03
## 2019-01-28 -7.846827e-03
## 2019-01-29 -1.456247e-03
## 2019-01-30  1.554926e-02
## 2019-01-31  8.597396e-03
## 2019-02-01  8.986099e-04
## 2019-02-04  6.776237e-03
## 2019-02-05  4.708420e-03
## 2019-02-06 -2.224438e-03
## 2019-02-07 -9.357140e-03
## 2019-02-08  6.762011e-04
## 2019-02-11  7.091031e-04
## 2019-02-12  1.289022e-02
## 2019-02-13  3.023995e-03
## 2019-02-14 -2.651642e-03
## 2019-02-15  1.087875e-02
## 2019-02-19  1.498743e-03
## 2019-02-20  1.777111e-03
## 2019-02-21 -3.526437e-03
## 2019-02-22  6.411102e-03
## 2019-02-25  1.231862e-03
## 2019-02-26 -7.904571e-04
## 2019-02-27 -5.440492e-04
## 2019-02-28 -2.825508e-03
## 2019-03-01  6.895321e-03
## 2019-03-04 -3.880558e-03
## 2019-03-05 -1.131533e-03
## 2019-03-06 -6.524099e-03
## 2019-03-07 -8.125717e-03
## 2019-03-08 -2.131689e-03
## 2019-03-11  1.466604e-02
## 2019-03-12  2.953318e-03
## 2019-03-13  6.949584e-03
## 2019-03-14 -8.680226e-04
## 2019-03-15  4.984903e-03
## 2019-03-18  3.705947e-03
## 2019-03-19 -1.305615e-04
## 2019-03-20 -2.944354e-03
## 2019-03-21  1.085248e-02
## 2019-03-22 -1.897450e-02
## 2019-03-25 -8.390208e-04
## 2019-03-26  7.182726e-03
## 2019-03-27 -4.644325e-03
## 2019-03-28  3.589481e-03
## 2019-03-29  6.734280e-03
## 2019-04-01  1.156860e-02
## 2019-04-02  1.745577e-05
## 2019-04-03  2.148377e-03
## 2019-04-04  2.084635e-03
## 2019-04-05  4.636433e-03
## 2019-04-08  1.047460e-03
## 2019-04-09 -6.067495e-03
## 2019-04-10  3.477872e-03
## 2019-04-11  3.812292e-05
## 2019-04-12  6.609324e-03
## 2019-04-15 -6.293691e-04
## 2019-04-16  5.093582e-04
## 2019-04-17 -2.273812e-03
## 2019-04-18  1.579092e-03
## 2019-04-22  1.012018e-03
## 2019-04-23  8.841206e-03
## 2019-04-24 -2.191763e-03
## 2019-04-25 -3.689736e-04
## 2019-04-26  4.685292e-03
## 2019-04-29  1.071522e-03
## 2019-04-30  9.514171e-04
## 2019-05-01 -7.502163e-03
## 2019-05-02 -2.123985e-03
## 2019-05-03  9.638279e-03
## 2019-05-06 -4.470988e-03
## 2019-05-07 -1.651165e-02
## 2019-05-08 -1.605425e-03
## 2019-05-09 -3.021425e-03
## 2019-05-10  3.720297e-03
## 2019-05-13 -2.413056e-02
## 2019-05-14  8.015945e-03
## 2019-05-15  5.838975e-03
## 2019-05-16  8.895287e-03
## 2019-05-17 -5.837333e-03
## 2019-05-20 -6.749378e-03
## 2019-05-21  8.495836e-03
## 2019-05-22 -2.824396e-03
## 2019-05-23 -1.191415e-02
## 2019-05-24  1.353559e-03
## 2019-05-28 -8.375677e-03
## 2019-05-29 -6.911912e-03
## 2019-05-30  2.098471e-03
## 2019-05-31 -1.319537e-02
## 2019-06-03 -2.765241e-03
## 2019-06-04  2.143237e-02
## 2019-06-05  8.161854e-03
## 2019-06-06  6.135587e-03
## 2019-06-07  1.049770e-02
## 2019-06-10  4.660044e-03
## 2019-06-11 -3.498800e-04
## 2019-06-12 -2.037579e-03
## 2019-06-13  4.097382e-03
## 2019-06-14 -1.611512e-03
## 2019-06-17  9.317494e-04
## 2019-06-18  9.717400e-03
## 2019-06-19  2.985164e-03
## 2019-06-20  9.472185e-03
## 2019-06-21 -1.259223e-03
## 2019-06-24 -1.731887e-03
## 2019-06-25 -9.496397e-03
## 2019-06-26 -1.233934e-03
## 2019-06-27  3.823176e-03
## 2019-06-28  5.757453e-03
## 2019-07-01  7.672301e-03
## 2019-07-02  2.928126e-03
## 2019-07-03  7.672378e-03
## 2019-07-05 -1.805902e-03
## 2019-07-08 -4.835444e-03
## 2019-07-09  1.236557e-03
## 2019-07-10  4.510689e-03
## 2019-07-11  2.285227e-03
## 2019-07-12  4.620175e-03
## 2019-07-15  1.758691e-04
## 2019-07-16 -3.403779e-03
## 2019-07-17 -6.531244e-03
## 2019-07-18  3.581998e-03
## 2019-07-19 -6.176735e-03
## 2019-07-22  2.828695e-03
## 2019-07-23  6.847483e-03
## 2019-07-24  4.688148e-03
## 2019-07-25 -5.262401e-03
## 2019-07-26  7.387691e-03
## 2019-07-29 -1.616114e-03
## 2019-07-30 -2.578655e-03
## 2019-07-31 -1.088553e-02
## 2019-08-01 -8.998794e-03
## 2019-08-02 -7.282740e-03
## 2019-08-05 -2.977782e-02
## 2019-08-06  1.301702e-02
## 2019-08-07  7.668759e-04
## 2019-08-08  1.876230e-02
## 2019-08-09 -6.616607e-03
## 2019-08-12 -1.231732e-02
## 2019-08-13  1.513169e-02
## 2019-08-14 -2.929275e-02
## 2019-08-15  2.464268e-03
## 2019-08-16  1.442612e-02
## 2019-08-19  1.210587e-02
## 2019-08-20 -7.914727e-03
## 2019-08-21  8.246799e-03
## 2019-08-22 -5.060750e-04
## 2019-08-23 -2.594634e-02
## 2019-08-26  1.098299e-02
## 2019-08-27 -3.203181e-03
## 2019-08-28  6.545480e-03
## 2019-08-29  1.268729e-02
## 2019-08-30  6.427873e-04
## 2019-09-03 -6.899100e-03
## 2019-09-04  1.084208e-02
## 2019-09-05  1.300981e-02
## 2019-09-06  9.106052e-04
## 2019-09-09 -9.401016e-05
## 2019-09-10  3.223044e-04
## 2019-09-11  7.229681e-03
## 2019-09-12  2.879153e-03
## 2019-09-13 -7.244141e-04
## 2019-09-16 -3.135587e-03
## 2019-09-17  2.581752e-03
## 2019-09-18  3.426919e-04
## 2019-09-19  1.997486e-05
## 2019-09-20 -4.895577e-03
## 2019-09-23 -9.693590e-05
## 2019-09-24 -8.416371e-03
## 2019-09-25  6.158572e-03
## 2019-09-26 -2.428916e-03
## 2019-09-27 -5.316352e-03
## 2019-09-30  5.047607e-03
## 2019-10-01 -1.225837e-02
## 2019-10-02 -1.790320e-02
## 2019-10-03  7.971913e-03
## 2019-10-04  1.421690e-02
## 2019-10-07 -4.478295e-03
## 2019-10-08 -1.556082e-02
## 2019-10-09  9.104492e-03
## 2019-10-10  6.415696e-03
## 2019-10-11  1.093898e-02
## 2019-10-14 -1.387119e-03
## 2019-10-15  9.955677e-03
## 2019-10-16 -1.999543e-03
## 2019-10-17  2.762832e-03
## 2019-10-18 -3.919345e-03
## 2019-10-21  6.871616e-03
## 2019-10-22 -3.568667e-03
## 2019-10-23  2.847149e-03
## 2019-10-24  1.920446e-03
## 2019-10-25  4.072701e-03
## 2019-10-28  5.581338e-03
## 2019-10-29 -8.324052e-04
## 2019-10-30  3.253370e-03
## 2019-10-31 -3.022861e-03
## 2019-11-01  9.662312e-03
## 2019-11-04  3.704089e-03
## 2019-11-05 -1.185699e-03
## 2019-11-06  7.024972e-04
## 2019-11-07  2.730095e-03
## 2019-11-08  2.560676e-03
## 2019-11-11 -1.962467e-03
## 2019-11-12  1.564646e-03
## 2019-11-13  7.115345e-04
## 2019-11-14  8.370428e-04
## 2019-11-15  7.695488e-03
## 2019-11-18  5.031527e-04
## 2019-11-19 -5.925942e-04
## 2019-11-20 -3.756184e-03
## 2019-11-21 -1.582752e-03
## 2019-11-22  2.174936e-03
## 2019-11-25  7.507292e-03
## 2019-11-26  2.195570e-03
## 2019-11-27  4.174424e-03
## 2019-11-29 -4.011220e-03
## 2019-12-02 -8.631021e-03
## 2019-12-03 -6.638095e-03
## 2019-12-04  6.323568e-03
## 2019-12-05  1.500251e-03
## 2019-12-06  9.135724e-03
## 2019-12-09 -3.162821e-03
## 2019-12-10 -1.096934e-03
## 2019-12-11  2.908158e-03
## 2019-12-12  8.575226e-03
## 2019-12-13  7.258195e-05
## 2019-12-16  7.147785e-03
## 2019-12-17  3.352924e-04
## 2019-12-18 -4.323002e-04
## 2019-12-19  4.459292e-03
## 2019-12-20  4.944781e-03
## 2019-12-23  8.661436e-04
## 2019-12-24 -1.954482e-04
## 2019-12-26  5.128167e-03
## 2019-12-27  3.398490e-05
## 2019-12-30 -5.780823e-03
## 2019-12-31  2.946022e-03
## 2020-01-02  8.378803e-03
## 2020-01-03 -7.059871e-03
## 2020-01-06  3.533373e-03
## 2020-01-07 -2.803238e-03
## 2020-01-08  4.902451e-03
## 2020-01-09  6.655262e-03
## 2020-01-10 -2.855179e-03
## 2020-01-13  6.976215e-03
## 2020-01-14 -1.514533e-03
## 2020-01-15  1.870197e-03
## 2020-01-16  8.366553e-03
## 2020-01-17  3.862162e-03
## 2020-01-21 -2.651978e-03
## 2020-01-22  2.890761e-04
## 2020-01-23  1.140977e-03
## 2020-01-24 -9.042161e-03
## 2020-01-27 -1.573071e-02
## 2020-01-28  1.005358e-02
## 2020-01-29 -8.668742e-04
## 2020-01-30  3.134359e-03
## 2020-01-31 -1.770582e-02
## 2020-02-03  7.254614e-03
## 2020-02-04  1.498041e-02
## 2020-02-05  1.125060e-02
## 2020-02-06  3.325673e-03
## 2020-02-07 -5.400854e-03
## 2020-02-10  7.326398e-03
## 2020-02-11  1.688473e-03
## 2020-02-12  6.462646e-03
## 2020-02-13 -1.630446e-03
## 2020-02-14  1.843533e-03
## 2020-02-18 -2.919943e-03
## 2020-02-19  4.705786e-03
## 2020-02-20 -3.815520e-03
## 2020-02-21 -1.051810e-02
## 2020-02-24 -3.351363e-02
## 2020-02-25 -3.028000e-02
## 2020-02-26 -3.778540e-03
## 2020-02-27 -4.416324e-02
## 2020-02-28 -8.238340e-03
## 2020-03-02  4.603923e-02
## 2020-03-03 -2.810790e-02
## 2020-03-04  4.220259e-02
## 2020-03-05 -3.392208e-02
## 2020-03-06 -1.705385e-02
## 2020-03-09 -7.596970e-02
## 2020-03-10  4.939631e-02
## 2020-03-11 -4.886844e-02
## 2020-03-12 -9.511268e-02
## 2020-03-13  9.287125e-02
## 2020-03-16 -1.198406e-01
## 2020-03-17  5.995485e-02
## 2020-03-18 -5.183076e-02
## 2020-03-19  4.707808e-03
## 2020-03-20 -4.335951e-02
## 2020-03-23 -2.929387e-02
## 2020-03-24  9.382774e-02
## 2020-03-25  1.153501e-02
## 2020-03-26  6.241416e-02
## 2020-03-27 -3.368735e-02
## 2020-03-30  3.351601e-02
## 2020-03-31 -1.601272e-02
## 2020-04-01 -4.414243e-02
## 2020-04-02  2.282935e-02
## 2020-04-03 -1.513713e-02
## 2020-04-06  7.033132e-02
## 2020-04-07 -1.603053e-03
## 2020-04-08  3.405645e-02
## 2020-04-09  1.448741e-02
## 2020-04-13 -1.010466e-02
## 2020-04-14  3.057259e-02
## 2020-04-15 -2.203044e-02
## 2020-04-16  5.816690e-03
## 2020-04-17  2.679359e-02
## 2020-04-20 -1.788105e-02
## 2020-04-21 -3.067480e-02
## 2020-04-22  2.293025e-02
## 2020-04-23 -5.394222e-04
## 2020-04-24  1.391806e-02
## 2020-04-27  1.471407e-02
## 2020-04-28 -5.242380e-03
## 2020-04-29  2.658392e-02
## 2020-04-30 -9.212446e-03
## 2020-05-01 -2.805903e-02
## 2020-05-04  4.249827e-03
## 2020-05-05  9.040556e-03
## 2020-05-06 -6.979410e-03
## 2020-05-07  1.150463e-02
## 2020-05-08  1.687154e-02
## 2020-05-11  1.330780e-04
## 2020-05-12 -2.050032e-02
## 2020-05-13 -1.746272e-02
## 2020-05-14  1.152482e-02
## 2020-05-15  3.926363e-03
## 2020-05-18  3.150119e-02
## 2020-05-19 -1.048440e-02
## 2020-05-20  1.665110e-02
## 2020-05-21 -7.773596e-03
## 2020-05-22  2.353711e-03
## 2020-05-26  1.228918e-02
## 2020-05-27  1.482730e-02
## 2020-05-28 -2.107915e-03
## 2020-05-29  4.812336e-03
## 2020-06-01  3.751235e-03
## 2020-06-02  8.210833e-03
## 2020-06-03  1.364898e-02
## 2020-06-04 -3.368702e-03
## 2020-06-05  2.621165e-02
## 2020-06-08  1.204158e-02
## 2020-06-09 -7.799171e-03
## 2020-06-10 -5.313091e-03
## 2020-06-11 -5.894406e-02
## 2020-06-12  1.306084e-02
## 2020-06-15  8.312217e-03
## 2020-06-16  1.896240e-02
## 2020-06-17 -3.600300e-03
## 2020-06-18  5.942200e-04
## 2020-06-19 -5.649495e-03
## 2020-06-22  6.495095e-03
## 2020-06-23  4.307420e-03
## 2020-06-24 -2.585515e-02
## 2020-06-25  1.095945e-02
## 2020-06-26 -2.422691e-02
## 2020-06-29  1.468568e-02
## 2020-06-30  1.540988e-02
## 2020-07-01  5.022133e-03
## 2020-07-02  4.541251e-03
## 2020-07-06  1.588173e-02
## 2020-07-07 -1.081853e-02
## 2020-07-08  7.827462e-03
## 2020-07-09 -5.643606e-03
## 2020-07-10  1.046620e-02
## 2020-07-13 -9.362541e-03
## 2020-07-14  1.340637e-02
## 2020-07-15  9.082051e-03
## 2020-07-16 -3.406101e-03
## 2020-07-17  2.848612e-03
## 2020-07-20  8.406939e-03
## 2020-07-21  1.679037e-03
## 2020-07-22  5.747082e-03
## 2020-07-23 -1.231986e-02
## 2020-07-24 -6.190400e-03
## 2020-07-27  7.395139e-03
## 2020-07-28 -6.473392e-03
## 2020-07-29  1.242838e-02
## 2020-07-30 -3.750252e-03
## 2020-07-31  7.670505e-03
## 2020-08-03  7.181023e-03
## 2020-08-04  3.611931e-03
## 2020-08-05  6.429743e-03
## 2020-08-06  6.427695e-03
## 2020-08-07  6.330295e-04
## 2020-08-10  2.742218e-03
## 2020-08-11 -7.969132e-03
## 2020-08-12  1.399655e-02
## 2020-08-13 -2.047174e-03
## 2020-08-14 -1.718826e-04
## 2020-08-17  2.709842e-03
## 2020-08-18  2.303389e-03
## 2020-08-19 -4.404395e-03
## 2020-08-20  3.158633e-03
## 2020-08-21  3.441107e-03
## 2020-08-24  1.004372e-02
## 2020-08-25  3.596351e-03
## 2020-08-26  1.019563e-02
## 2020-08-27  1.673044e-03
## 2020-08-28  6.732565e-03
## 2020-08-31 -2.194963e-03
## 2020-09-01  7.525003e-03
## 2020-09-02  1.536591e-02
## 2020-09-03 -3.512584e-02
## 2020-09-04 -8.133027e-03
## 2020-09-08 -2.775634e-02
## 2020-09-09  2.014499e-02
## 2020-09-10 -1.758480e-02
## 2020-09-11  5.330724e-04
## 2020-09-14  1.274183e-02
## 2020-09-15  5.219360e-03
## 2020-09-16 -4.618947e-03
## 2020-09-17 -8.412366e-03
## 2020-09-18 -1.118258e-02
## 2020-09-21 -1.157110e-02
## 2020-09-22  1.051794e-02
## 2020-09-23 -2.372145e-02
## 2020-09-24  2.987459e-03
## 2020-09-25  1.597672e-02
## 2020-09-28  1.611059e-02
## 2020-09-29 -4.812665e-03
## 2020-09-30  8.253718e-03
## 2020-10-01  5.292908e-03
## 2020-10-02 -9.577652e-03
## 2020-10-05  1.797271e-02
## 2020-10-06 -1.397352e-02
## 2020-10-07  1.739675e-02
## 2020-10-08  8.010124e-03
## 2020-10-09  8.793533e-03
## 2020-10-12  1.641581e-02
## 2020-10-13 -6.306919e-03
## 2020-10-14 -6.623142e-03
## 2020-10-15 -1.527755e-03
## 2020-10-16  1.349196e-04
## 2020-10-19 -1.632986e-02
## 2020-10-20  4.727334e-03
## 2020-10-21 -2.195700e-03
## 2020-10-22  5.218925e-03
## 2020-10-23  3.445762e-03
## 2020-10-26 -1.858952e-02
## 2020-10-27 -3.025619e-03
## 2020-10-28 -3.528788e-02
## 2020-10-29  1.194733e-02
## 2020-10-30 -1.212955e-02
## 2020-11-02  1.231820e-02
## 2020-11-03  1.779929e-02
## 2020-11-04  2.204705e-02
## 2020-11-05  1.946019e-02
## 2020-11-06 -2.877153e-04
## 2020-11-09  1.169989e-02
## 2020-11-10 -1.399795e-03
## 2020-11-11  7.651855e-03
## 2020-11-12 -9.978532e-03
## 2020-11-13  1.361034e-02
## 2020-11-16  1.164805e-02
## 2020-11-17 -4.791926e-03
## 2020-11-18 -1.156383e-02
## 2020-11-19  3.946442e-03
## 2020-11-20 -6.792563e-03
## 2020-11-23  5.635931e-03
## 2020-11-24  1.616167e-02
## 2020-11-25 -1.584418e-03
## 2020-11-27  2.396979e-03
## 2020-11-30 -4.595549e-03
## 2020-12-01  1.127119e-02
## 2020-12-02  1.791167e-03
## 2020-12-03 -6.241572e-04
## 2020-12-04  8.836275e-03
## 2020-12-07 -1.935638e-03
## 2020-12-08  2.787148e-03
## 2020-12-09 -7.949202e-03
## 2020-12-10 -1.285108e-03
## 2020-12-11 -1.264997e-03
## 2020-12-14 -4.359259e-03
## 2020-12-15  1.292125e-02
## 2020-12-16  1.772795e-03
## 2020-12-17  5.757655e-03
## 2020-12-18 -3.511118e-03
## 2020-12-21 -3.906279e-03
## 2020-12-22 -2.073093e-03
## 2020-12-23  7.458113e-04
## 2020-12-24  3.536589e-03
## 2020-12-28  8.722529e-03
## 2020-12-29 -2.227380e-03
## 2020-12-30  1.341547e-03
## 2020-12-31  6.438845e-03
## 2021-01-04 -1.475483e-02
## 2021-01-05  7.082595e-03
## 2021-01-06  5.709843e-03
## 2021-01-07  1.484740e-02
## 2021-01-08  5.491863e-03
## 2021-01-11 -6.554751e-03
## 2021-01-12  4.157885e-04
## 2021-01-13  2.275642e-03
## 2021-01-14 -3.753451e-03
## 2021-01-15 -7.190028e-03
## 2021-01-19  8.136379e-03
## 2021-01-20  1.393563e-02
## 2021-01-21  3.167231e-04
## 2021-01-22 -3.010611e-03
## 2021-01-25  3.615839e-03
## 2021-01-26 -1.488834e-03
## 2021-01-27 -2.567788e-02
## 2021-01-28  9.760626e-03
## 2021-01-29 -1.931148e-02
## 2021-02-01  1.605177e-02
## 2021-02-02  1.389822e-02
## 2021-02-03  1.008769e-03
## 2021-02-04  1.085332e-02
## 2021-02-05  3.897495e-03
## 2021-02-08  7.399348e-03
## 2021-02-09 -1.113525e-03
## 2021-02-10 -3.451848e-04
## 2021-02-11  1.662455e-03
## 2021-02-12  4.711033e-03
## 2021-02-16 -5.692724e-04
## 2021-02-17 -3.204021e-04
## 2021-02-18 -4.415836e-03
## 2021-02-19 -1.854897e-03
## 2021-02-22 -7.732839e-03
## 2021-02-23  1.256318e-03
## 2021-02-24  1.135161e-02
## 2021-02-25 -2.447881e-02
## 2021-02-26 -4.750214e-03
## 2021-03-01  2.379076e-02
## 2021-03-02 -8.080852e-03
## 2021-03-03 -1.306622e-02
## 2021-03-04 -1.341721e-02
## 2021-03-05  1.949597e-02
## 2021-03-08 -5.359231e-03
## 2021-03-09  1.415464e-02
## 2021-03-10  6.030314e-03
## 2021-03-11  1.039549e-02
## 2021-03-12  1.015398e-03
## 2021-03-15  6.491921e-03
## 2021-03-16 -1.569684e-03
## 2021-03-17  2.879382e-03
## 2021-03-18 -1.476054e-02
## 2021-03-19 -6.027039e-04
## 2021-03-22  7.025118e-03
## 2021-03-23 -7.630854e-03
## 2021-03-24 -5.467336e-03
## 2021-03-25  5.240266e-03
## 2021-03-26  1.663120e-02
## 2021-03-29 -8.680126e-04
## 2021-03-30 -3.157833e-03
## 2021-03-31  3.622499e-03
## 2021-04-01  1.182520e-02
## 2021-04-05  1.443823e-02
## 2021-04-06 -9.735308e-04
## 2021-04-07  1.475233e-03
## 2021-04-08  4.220633e-03
## 2021-04-09  7.719934e-03
## 2021-04-12 -1.960790e-04
## 2021-04-13  3.294487e-03
## 2021-04-14 -4.087727e-03
## 2021-04-15  1.109419e-02
## 2021-04-16  3.608820e-03
## 2021-04-19 -5.306560e-03
## 2021-04-20 -6.802320e-03
## 2021-04-21  9.306056e-03
## 2021-04-22 -9.210658e-03
## 2021-04-23  1.092870e-02
## 2021-04-26  1.782271e-03
## 2021-04-27 -2.148958e-04
## 2021-04-28 -8.455399e-04
## 2021-04-29  6.762807e-03
SP500 <- SP500[-1,] # remove the first row
names(SP500) <- "SP500" 
Betas <- sapply(colnames(Return), function(x) lm(Return[,x]~1+SP500)$coef[2]) #First regression
MktExposure <- lm(colMeans(Return)~1+Betas)$coef[2] #Second regression
### Construct Portfolio
# Create the portfolio specification
library(PortfolioAnalytics)
init.portf <-portfolio.spec(assets = colnames(Return))


# Add a factor exposure constraint: the factor exposure constraint allows the user to set upper and lower bounds on exposures to risk factors. The exposures can be passed in as a vector or matrix. Here we specify a vector for B with arbitrary values, e.g. betas of the assets, with a market risk exposure range from "lower" to "upper" of the estimated value
init.portf <- add.constraint(portfolio=init.portf, 
                             type="factor_exposure",
                             B = scale(Betas),
                             lower=MktExposure*0.2, 
                             upper=MktExposure*0.8)

# Add risk and return objective(we need them to calculate the Sharpe ratio)
init.portf <- add.objective(portfolio=init.portf, type="return", name = "mean")
init.portf <- add.objective(portfolio=init.portf, type="risk", name = "StdDev")
### Solve Portfolio: Maximize Sharpe Ratio
# The default action if “mean” and “StdDev” are specified as objectives with optimize_method="ROI" is to maximize quadratic utility. If we want to maximize Sharpe Ratio, we need to pass in maxSR=TRUE to optimize.portfolio
library(ROI)
opt.MaxSR <- optimize.portfolio(R=Return, 
                                portfolio=init.portf, 
                                optimize_method="ROI", 
                                maxSR=TRUE,
                                trace=TRUE)
# Warning: Inf or -Inf values detected in box constraints, maximum return objectives must have finite box constraint values. 


summary(opt.MaxSR) # The optimization summary should be read carefully since not every constraint is guaranteed to be satisfied
## **************************************************
## PortfolioAnalytics Optimization Summary 
## **************************************************
## 
## Call:
## optimize.portfolio(R = Return, portfolio = init.portf, optimize_method = "ROI", 
##     trace = TRUE, maxSR = TRUE)
## 
## Optimal Weights:
##     IBM      GE       F    MSFT 
##  0.4256 -0.0625  0.1191  0.5178 
## 
## Objective Measures:
##  StdDev 
## 0.01446 
## 
## 
##      mean 
## 0.0007866 
## 
## 
## Portfolio Assets and Initial Weights:
##  IBM   GE    F MSFT 
## 0.25 0.25 0.25 0.25 
## 
## **************************************************
## PortfolioAnalytics Portfolio Specification 
## **************************************************
## 
## Call:
## portfolio.spec(assets = colnames(Return))
## 
## Number of assets: 4 
## Asset Names
## [1] "IBM"  "GE"   "F"    "MSFT"
## 
## Constraints
## Enabled constraint types
##      - factor_exposure 
## 
## Objectives:
## Enabled objective names
##      - mean 
##      - StdDev 
## 
## ****************************************
## Constraints
## ****************************************
## Leverage Constraint:
## min_sum = 1
## max_sum = 1
## actual_leverage = 1
## 
## Box Constraints:
## min:
## [1] -Inf -Inf -Inf -Inf
## max:
## [1] Inf Inf Inf Inf
## 
## Position Limit Constraints:
## Maximum number of non-zero weights, max_pos:
## [1] "Unconstrained"
## Realized number of non-zero weights (i.e. positions):
## [1] 4
## 
## Maximum number of long positions, max_pos_long:
## [1] "Unconstrained"
## Realized number of long positions:
## [1] 3
## 
## Maximum number of short positions, max_pos_short:
## [1] "Unconstrained"
## Realized number of short positions:
## [1] 1
## 
## 
## Diversification Target Constraint:
## [1] "Unconstrained"
## 
## Realized diversification:
## [1] 0.5326635
## 
## Turnover Target Constraint:
## [1] "Unconstrained"
## 
## Realized turnover from initial weights:
## [1] 0.221689
## 
## Factor Exposure Constraints:
## Factor Exposure B Matrix:
##               factor1
## IBM.SP500  -1.2658762
## GE.SP500    0.3171108
## F.SP500    -0.1763554
## MSFT.SP500  1.1251208
## attr(,"scaled:center")
## [1] 1.09136
## attr(,"scaled:scale")
## [1] 0.09927195
## 
## Lower bound on factor exposures, lower:
##      factor1 
## 0.0007585558 
## 
## Upper bound on group weights, upper:
##     factor1 
## 0.003034223 
## 
## Realized Factor Exposures:
##     factor1 
## 0.003034223 
## 
## 
## ****************************************
## Objectives
## ****************************************
## 
## Objective: return_objective 
## $name
## [1] "mean"
## 
## $target
## NULL
## 
## $arguments
## list()
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] -1
## 
## $call
## add.objective(portfolio = init.portf, type = "return", name = "mean")
## 
## attr(,"class")
## [1] "return_objective" "objective"       
## 
## ****************************************
## Objective: portfolio_risk_objective 
## $name
## [1] "StdDev"
## 
## $target
## NULL
## 
## $arguments
## $arguments$portfolio_method
## [1] "single"
## 
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] 1
## 
## $call
## add.objective(portfolio = init.portf, type = "risk", name = "StdDev")
## 
## attr(,"class")
## [1] "portfolio_risk_objective" "objective"               
## 
## ****************************************
## 
## Elapsed Time:
## Time difference of 0.219985 secs
### Visualize result
library(PerformanceAnalytics)
# Optimal weights
OptWeight = extractWeights(opt.MaxSR)
chart.Weights(opt.MaxSR)

# Sharpe ratio
SharpeRatio(Return, weights = OptWeight, FUN = "StdDev")
##                                     [,1]
## StdDev Sharpe (Rf=0%, p=95%): 0.05439818
# Portfolio returns
PortReturn <- Return.portfolio(R = Return, 
                              weights = OptWeight, 
                              geometric = FALSE) #use arithmetic(FALSE) to aggregate returns
chart.TimeSeries(PortReturn)

4.2.8 Max Sharpe-Transaction

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
library(quantmod)
startd = "2015-01-01"
endd = "2021-04-30"
Tickers = c("IBM","GE","F","MSFT")
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "IBM"  "GE"   "F"    "MSFT"
### Compute Returns
Return = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Return = na.omit(Return[-1,])
colnames(Return)<-Tickers


### Construct Portfolio
# Create the portfolio specification
library(PortfolioAnalytics)
init.portf <-portfolio.spec(assets = colnames(Return))

# Relax the leverage constraint(min_sum = max_sum = 1, by default)
init.portf <- add.constraint(portfolio=init.portf, type="leverage", 
                             min_sum = 0.99, max_sum = 1.01)

# Simplify the optimization problem by setting long only constraint
init.portf <- add.constraint(portfolio=init.portf, type="long_only")


# Add a transaction cost constraint: the transaction cost constraint allows the user to specify proportional transaction costs. Transaction costs are supported as a penalty for the global numeric solvers. Here we add the transaction cost constraint with the proportional transaction cost value of 0.1%
init.portf <- add.constraint(portfolio=init.portf, type="transaction_cost", ptc=0.001)


# Add risk and return objective(we need them to calculate the Sharpe ratio)
init.portf <- add.objective(portfolio=init.portf, type="return", name = "mean")
init.portf <- add.objective(portfolio=init.portf, type="risk", name = "StdDev")

### Solve Portfolio: Maximize Sharpe Ratio
# The default action if “mean” and “StdDev” are specified as objectives with optimize_method="ROI" is to maximize quadratic utility. 
# Note that with the ROI solvers, proportional transaction cost constraint is currently only supported for the global minimum variance and quadratic utility problems with ROI quadprog plugin
library(DEoptim)
opt.MaxSR <- optimize.portfolio(R=Return, 
                                portfolio=init.portf, 
                                maxSR=TRUE,
                                trace=TRUE)
## Iteration: 1 bestvalit: 0.013856 bestmemit:    0.368000    0.026000    0.256000    0.340000
## Iteration: 2 bestvalit: 0.013856 bestmemit:    0.368000    0.026000    0.256000    0.340000
## Iteration: 3 bestvalit: 0.013856 bestmemit:    0.368000    0.026000    0.256000    0.340000
## Iteration: 4 bestvalit: 0.013825 bestmemit:    0.350000    0.030000    0.230000    0.380000
## Iteration: 5 bestvalit: 0.013825 bestmemit:    0.350000    0.030000    0.230000    0.380000
## Iteration: 6 bestvalit: 0.013825 bestmemit:    0.350000    0.030000    0.230000    0.380000
## Iteration: 7 bestvalit: 0.013825 bestmemit:    0.350000    0.030000    0.230000    0.380000
## Iteration: 8 bestvalit: 0.013825 bestmemit:    0.350000    0.030000    0.230000    0.380000
## Iteration: 9 bestvalit: 0.013825 bestmemit:    0.350000    0.030000    0.230000    0.380000
## Iteration: 10 bestvalit: 0.013825 bestmemit:    0.350000    0.030000    0.230000    0.380000
## Iteration: 11 bestvalit: 0.013825 bestmemit:    0.350000    0.030000    0.230000    0.380000
## [1] 0.35 0.03 0.23 0.38
summary(opt.MaxSR) # The optimization summary should be read carefully since not every constraint is guaranteed to be satisfied
## **************************************************
## PortfolioAnalytics Optimization Summary 
## **************************************************
## 
## Call:
## optimize.portfolio(R = Return, portfolio = init.portf, trace = TRUE, 
##     maxSR = TRUE)
## 
## Optimal Weights:
##  IBM   GE    F MSFT 
## 0.35 0.03 0.23 0.38 
## 
## Objective Measures:
##     mean 
## 0.000615 
## 
## 
##  StdDev 
## 0.01397 
## 
## 
## Portfolio Assets and Initial Weights:
##  IBM   GE    F MSFT 
## 0.25 0.25 0.25 0.25 
## 
## **************************************************
## PortfolioAnalytics Portfolio Specification 
## **************************************************
## 
## Call:
## portfolio.spec(assets = colnames(Return))
## 
## Number of assets: 4 
## Asset Names
## [1] "IBM"  "GE"   "F"    "MSFT"
## 
## Constraints
## Enabled constraint types
##      - leverage 
##      - long_only 
##      - transaction_cost 
## 
## Objectives:
## Enabled objective names
##      - mean 
##      - StdDev 
## 
## ****************************************
## Constraints
## ****************************************
## Leverage Constraint:
## min_sum = 0.99
## max_sum = 1.01
## actual_leverage = 0.99
## 
## Box Constraints:
## min:
##  IBM   GE    F MSFT 
##    0    0    0    0 
## max:
##  IBM   GE    F MSFT 
##    1    1    1    1 
## 
## Position Limit Constraints:
## Maximum number of non-zero weights, max_pos:
## [1] "Unconstrained"
## Realized number of non-zero weights (i.e. positions):
## [1] 4
## 
## Maximum number of long positions, max_pos_long:
## [1] "Unconstrained"
## Realized number of long positions:
## [1] 4
## 
## Maximum number of short positions, max_pos_short:
## [1] "Unconstrained"
## Realized number of short positions:
## [1] 0
## 
## 
## Diversification Target Constraint:
## [1] "Unconstrained"
## 
## Realized diversification:
## [1] 0.6793
## 
## Turnover Target Constraint:
## [1] "Unconstrained"
## 
## Realized turnover from initial weights:
## [1] 0.1175
## 
## ****************************************
## Objectives
## ****************************************
## 
## Objective: return_objective 
## $name
## [1] "mean"
## 
## $target
## NULL
## 
## $arguments
## list()
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] -1
## 
## $call
## add.objective(portfolio = init.portf, type = "return", name = "mean")
## 
## attr(,"class")
## [1] "return_objective" "objective"       
## 
## ****************************************
## Objective: portfolio_risk_objective 
## $name
## [1] "StdDev"
## 
## $target
## NULL
## 
## $arguments
## $arguments$portfolio_method
## [1] "single"
## 
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] 1
## 
## $call
## add.objective(portfolio = init.portf, type = "risk", name = "StdDev")
## 
## attr(,"class")
## [1] "portfolio_risk_objective" "objective"               
## 
## ****************************************
## 
## Elapsed Time:
## Time difference of 1.535854 secs
### Visualize result
library(PerformanceAnalytics)
# Optimal weights
OptWeight = extractWeights(opt.MaxSR)
chart.Weights(opt.MaxSR)

# Sharpe ratio
SharpeRatio(Return, weights = OptWeight, FUN = "StdDev")
##                                     [,1]
## StdDev Sharpe (Rf=0%, p=95%): 0.04401871
# Portfolio returns
PortReturn <- Return.portfolio(R = Return, 
                              weights = OptWeight, 
                              geometric = FALSE) #use arithmetic(FALSE) to aggregate returns
chart.TimeSeries(PortReturn)

4.2.9 Max Sharpe-Reoptimizing

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
library(quantmod)
startd = "2015-01-01"
endd = "2021-04-30"
Tickers = c("IBM","GE","F","MSFT")
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "IBM"  "GE"   "F"    "MSFT"
### Compute Returns
Return = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Return = na.omit(Return[-1,])
colnames(Return)<-Tickers

### Construct Portfolio
# Create the portfolio specification
library(PortfolioAnalytics)
init.portf <-portfolio.spec(assets = colnames(Return))

# Add a full investment constraint such that the weights sum to 1
init.portf <- add.constraint(portfolio=init.portf, type="full_investment")


# Add risk and return objective(we need them to calculate the Sharpe ratio)
init.portf <- add.objective(portfolio=init.portf, type="return", name = "mean")
init.portf <- add.objective(portfolio=init.portf, type="risk", name = "StdDev")
### Solve Portfolio: Maximize Sharpe Ratio
# The default action if “mean” and “StdDev” are specified as objectives with optimize_method="ROI" is to maximize quadratic utility. If we want to maximize Sharpe Ratio, we need to pass in maxSR=TRUE to optimize.portfolio
library(ROI)
opt.MaxSR <- optimize.portfolio(R=Return, 
                                portfolio=init.portf, 
                                optimize_method="ROI", 
                                maxSR=TRUE,
                                trace=TRUE)
# Warning: Inf or -Inf values detected in box constraints, maximum return objectives must have finite box constraint values. 



summary(opt.MaxSR) # The optimization summary should be read carefully since not every constraint is guaranteed to be satisfied
## **************************************************
## PortfolioAnalytics Optimization Summary 
## **************************************************
## 
## Call:
## optimize.portfolio(R = Return, portfolio = init.portf, optimize_method = "ROI", 
##     trace = TRUE, maxSR = TRUE)
## 
## Optimal Weights:
##     IBM      GE       F    MSFT 
##  0.0218 -0.0804  0.0757  0.9830 
## 
## Objective Measures:
##  StdDev 
## 0.01733 
## 
## 
##     mean 
## 0.001288 
## 
## 
## Portfolio Assets and Initial Weights:
##  IBM   GE    F MSFT 
## 0.25 0.25 0.25 0.25 
## 
## **************************************************
## PortfolioAnalytics Portfolio Specification 
## **************************************************
## 
## Call:
## portfolio.spec(assets = colnames(Return))
## 
## Number of assets: 4 
## Asset Names
## [1] "IBM"  "GE"   "F"    "MSFT"
## 
## Constraints
## Enabled constraint types
##      - full_investment 
## 
## Objectives:
## Enabled objective names
##      - mean 
##      - StdDev 
## 
## ****************************************
## Constraints
## ****************************************
## Leverage Constraint:
## min_sum = 1
## max_sum = 1
## actual_leverage = 1
## 
## Box Constraints:
## min:
## [1] -Inf -Inf -Inf -Inf
## max:
## [1] Inf Inf Inf Inf
## 
## Position Limit Constraints:
## Maximum number of non-zero weights, max_pos:
## [1] "Unconstrained"
## Realized number of non-zero weights (i.e. positions):
## [1] 4
## 
## Maximum number of long positions, max_pos_long:
## [1] "Unconstrained"
## Realized number of long positions:
## [1] 3
## 
## Maximum number of short positions, max_pos_short:
## [1] "Unconstrained"
## Realized number of short positions:
## [1] 1
## 
## 
## Diversification Target Constraint:
## [1] "Unconstrained"
## 
## Realized diversification:
## [1] 0.02113148
## 
## Turnover Target Constraint:
## [1] "Unconstrained"
## 
## Realized turnover from initial weights:
## [1] 0.3664756
## 
## ****************************************
## Objectives
## ****************************************
## 
## Objective: return_objective 
## $name
## [1] "mean"
## 
## $target
## NULL
## 
## $arguments
## list()
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] -1
## 
## $call
## add.objective(portfolio = init.portf, type = "return", name = "mean")
## 
## attr(,"class")
## [1] "return_objective" "objective"       
## 
## ****************************************
## Objective: portfolio_risk_objective 
## $name
## [1] "StdDev"
## 
## $target
## NULL
## 
## $arguments
## $arguments$portfolio_method
## [1] "single"
## 
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] 1
## 
## $call
## add.objective(portfolio = init.portf, type = "risk", name = "StdDev")
## 
## attr(,"class")
## [1] "portfolio_risk_objective" "objective"               
## 
## ****************************************
## 
## Elapsed Time:
## Time difference of 0.208256 secs

Caution: The function is called “rebalancing” but the implementation reoptimizes the weights.

### Solve Portfolio: Maximize Sharpe Ratio with Re-balancing
# Quarters rebalancing with 4 year training period and 1 year rolling window
opt.MaxSR.Rebal <- optimize.portfolio.rebalancing(
                                            R = Return, 
                                            portfolio = init.portf,
                                            optimize_method = 'ROI',
                                            maxSR=TRUE,
                                            trace = TRUE,
                                            rebalance_on = 'quarters',
                                            training_period = 4*252,
                                            rolling_window = 252)
# Warning: Inf or -Inf values detected in box constraints, maximum return objectives must have finite box constraint values. 


summary(opt.MaxSR.Rebal) 
## **************************************************
## PortfolioAnalytics Optimization with Rebalancing
## **************************************************
## 
## Call:
## optimize.portfolio.rebalancing(R = Return, portfolio = init.portf, 
##     optimize_method = "ROI", trace = TRUE, maxSR = TRUE, rebalance_on = "quarters", 
##     training_period = 4 * 252, rolling_window = 252)
## 
## First rebalance date:
## [1] "2019-03-29"
## 
## Last rebalance date:
## [1] "2021-04-29"
## 
## Annualized Portfolio Rebalancing Return:
## [1] 0.3164929
## 
## Annualized Portfolio Standard Deviation:
## [1] 0.3560959
## 
## Downside Risk Measures:
##                               portfolio.returns
## Semi Deviation                           0.0162
## Gain Deviation                           0.0160
## Loss Deviation                           0.0178
## Downside Deviation (MAR=210%)            0.0198
## Downside Deviation (Rf=0%)               0.0156
## Downside Deviation (0%)                  0.0156
## Maximum Drawdown                         0.3053
## Historical VaR (95%)                    -0.0312
## Historical ES (95%)                     -0.0519
## Modified VaR (95%)                      -0.0326
## Modified ES (95%)                       -0.0546
### Visualize result
library(PerformanceAnalytics)
# Optimal weights
OptWeight = extractWeights(opt.MaxSR)
OptWeight.Rebal = extractWeights(opt.MaxSR.Rebal)

chart.Weights(opt.MaxSR)

chart.Weights(opt.MaxSR.Rebal)

# Portfolio return
PortReturn <- Return.portfolio(R = Return, 
                              weights = OptWeight, 
                              geometric = FALSE) #use arithmetic(FALSE) to aggregate returns
PortReturn.Rebal <- Return.portfolio(R = Return, 
                              weights = OptWeight.Rebal, 
                              geometric = FALSE) 

Portfolio.R = cbind(PortReturn,PortReturn.Rebal)
names(Portfolio.R) = c("Non-Rebalancing","Rebalancing")

# First rebalance date: 2019-03-29
charts.PerformanceSummary(Portfolio.R, colors=c("darkred","steelblue"), main=" ") 

4.2.10 Max Utility

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
library(quantmod)
startd = "2015-01-01"
endd = "2021-04-30"
Tickers = c("IBM","GE","F","MSFT")
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "IBM"  "GE"   "F"    "MSFT"
### Compute Returns
Return = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Return = na.omit(Return[-1,])
colnames(Return)<-Tickers


### Construct Portfolio
# Create the portfolio specification
library(PortfolioAnalytics)
init.portf <-portfolio.spec(assets = colnames(Return))

# Add a full investment constraint such that the weights sum to 1
init.portf <- add.constraint(portfolio=init.portf, type="full_investment")


# Add risk and return objective(we need them to calculate the quadratic utility
init.portf <- add.objective(portfolio=init.portf, type="return", name = "mean")
init.portf <- add.objective(portfolio=init.portf, type="risk", name = "var",risk_aversion=.25)

### Solve Portfolio: Maximize Quadratic Utility
# The default action if “mean” and “StdDev” are specified as objectives with optimize_method="ROI" is to maximize quadratic utility.
library(ROI)
opt.MaxQU <- optimize.portfolio(R=Return, 
                                portfolio=init.portf, 
                                optimize_method="ROI", 
                                trace=TRUE)
# Warning: Inf or -Inf values detected in box constraints, maximum return objectives must have finite box constraint values. 


summary(opt.MaxQU) #The optimization summary should be read carefully since not every constraint is guaranteed to be satisfied
## **************************************************
## PortfolioAnalytics Optimization Summary 
## **************************************************
## 
## Call:
## optimize.portfolio(R = Return, portfolio = init.portf, optimize_method = "ROI", 
##     trace = TRUE)
## 
## Optimal Weights:
##     IBM      GE       F    MSFT 
## -5.3499 -1.7132 -1.0482  9.1113 
## 
## Objective Measures:
##    mean 
## 0.01036 
## 
## 
## StdDev 
## 0.1406 
## 
## 
## Portfolio Assets and Initial Weights:
##  IBM   GE    F MSFT 
## 0.25 0.25 0.25 0.25 
## 
## **************************************************
## PortfolioAnalytics Portfolio Specification 
## **************************************************
## 
## Call:
## portfolio.spec(assets = colnames(Return))
## 
## Number of assets: 4 
## Asset Names
## [1] "IBM"  "GE"   "F"    "MSFT"
## 
## Constraints
## Enabled constraint types
##      - full_investment 
## 
## Objectives:
## Enabled objective names
##      - mean 
##      - var 
## 
## ****************************************
## Constraints
## ****************************************
## Leverage Constraint:
## min_sum = 1
## max_sum = 1
## actual_leverage = 1
## 
## Box Constraints:
## min:
## [1] -Inf -Inf -Inf -Inf
## max:
## [1] Inf Inf Inf Inf
## 
## Position Limit Constraints:
## Maximum number of non-zero weights, max_pos:
## [1] "Unconstrained"
## Realized number of non-zero weights (i.e. positions):
## [1] 4
## 
## Maximum number of long positions, max_pos_long:
## [1] "Unconstrained"
## Realized number of long positions:
## [1] 1
## 
## Maximum number of short positions, max_pos_short:
## [1] "Unconstrained"
## Realized number of short positions:
## [1] 3
## 
## 
## Diversification Target Constraint:
## [1] "Unconstrained"
## 
## Realized diversification:
## [1] -114.6705
## 
## Turnover Target Constraint:
## [1] "Unconstrained"
## 
## Realized turnover from initial weights:
## [1] 4.43064
## 
## ****************************************
## Objectives
## ****************************************
## 
## Objective: return_objective 
## $name
## [1] "mean"
## 
## $target
## NULL
## 
## $arguments
## list()
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] -1
## 
## $call
## add.objective(portfolio = init.portf, type = "return", name = "mean")
## 
## attr(,"class")
## [1] "return_objective" "objective"       
## 
## ****************************************
## Objective: portfolio_risk_objective 
## $name
## [1] "var"
## 
## $target
## NULL
## 
## $arguments
## $arguments$portfolio_method
## [1] "single"
## 
## 
## $enabled
## [1] TRUE
## 
## $multiplier
## [1] 1
## 
## $risk_aversion
## [1] 0.25
## 
## $call
## add.objective(portfolio = init.portf, type = "risk", name = "var", 
##     risk_aversion = 0.25)
## 
## attr(,"class")
## [1] "portfolio_risk_objective" "objective"               
## 
## ****************************************
## 
## Elapsed Time:
## Time difference of 0.01096702 secs
### Visualize result
library(PerformanceAnalytics)
# Optimal weights
OptWeight = extractWeights(opt.MaxQU)
sum(OptWeight)
## [1] 1
chart.Weights(opt.MaxQU)

# Sharpe ratio
SharpeRatio(Return, weights = OptWeight, FUN = "StdDev")
##                                     [,1]
## StdDev Sharpe (Rf=0%, p=95%): 0.07368931
# Portfolio return
PortReturn <- Return.portfolio(R = Return, 
                              weights = OptWeight, 
                              geometric = FALSE) #use arithmetic(FALSE) to aggregate returns
chart.TimeSeries(PortReturn)

4.2.11 Efficient Frontier

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
### Load Data
library(quantmod)
startd = "2015-01-01"
endd = "2021-04-30"
Tickers = c("IBM","GE","F","MSFT")
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "IBM"  "GE"   "F"    "MSFT"
### Compute Returns
Return = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Return = na.omit(Return[-1,])
colnames(Return)<-Tickers


# Create the portfolio specification
library(PortfolioAnalytics)
init.portf <-portfolio.spec(assets = colnames(Return))

# Add a full investment constraint such that the weights sum to 1
init.portf <- add.constraint(portfolio=init.portf, type="full_investment")


# Add risk and return objective(we need them to calculate the Sharpe ratio
init.portf <- add.objective(portfolio=init.portf, type="return", name = "mean")
init.portf <- add.objective(portfolio=init.portf, type="risk", name = "StdDev")

### Efficient Frontier
MeanSd.EF <- create.EfficientFrontier(
                      R = Return,
                      portfolio = init.portf,
                      type = "mean-sd",
                      n.portfolios = 50)
summary(MeanSd.EF)
## **************************************************
## PortfolioAnalytics Efficient Frontier 
## **************************************************
## 
## Call:
## create.EfficientFrontier(R = Return, portfolio = init.portf, 
##     type = "mean-sd", n.portfolios = 50)
## 
## Efficient Frontier Points: 50 
## 
## Weights along the efficient frontier:
##      IBM     GE     F  MSFT
## 1  0.443  0.048 0.164 0.345
## 2  0.434  0.045 0.162 0.358
## 3  0.426  0.042 0.160 0.371
## 4  0.417  0.040 0.158 0.384
## 5  0.409  0.037 0.157 0.398
## 6  0.400  0.035 0.155 0.411
## 7  0.391  0.032 0.153 0.424
## 8  0.383  0.029 0.151 0.437
## 9  0.374  0.027 0.149 0.450
## 10 0.366  0.024 0.148 0.463
## 11 0.357  0.021 0.146 0.476
## 12 0.348  0.019 0.144 0.489
## 13 0.340  0.016 0.142 0.502
## 14 0.331  0.014 0.140 0.515
## 15 0.323  0.011 0.139 0.528
## 16 0.314  0.008 0.137 0.541
## 17 0.306  0.006 0.135 0.554
## 18 0.297  0.003 0.133 0.567
## 19 0.288  0.001 0.131 0.580
## 20 0.280 -0.002 0.130 0.593
## 21 0.271 -0.005 0.128 0.606
## 22 0.263 -0.007 0.126 0.619
## 23 0.254 -0.010 0.124 0.632
## 24 0.245 -0.012 0.122 0.645
## 25 0.237 -0.015 0.121 0.658
## 26 0.228 -0.018 0.119 0.671
## 27 0.220 -0.020 0.117 0.684
## 28 0.211 -0.023 0.115 0.697
## 29 0.202 -0.026 0.113 0.710
## 30 0.194 -0.028 0.112 0.723
## 31 0.185 -0.031 0.110 0.736
## 32 0.177 -0.033 0.108 0.749
## 33 0.168 -0.036 0.106 0.762
## 34 0.159 -0.039 0.104 0.775
## 35 0.151 -0.041 0.103 0.788
## 36 0.142 -0.044 0.101 0.801
## 37 0.134 -0.046 0.099 0.814
## 38 0.125 -0.049 0.097 0.827
## 39 0.116 -0.052 0.095 0.840
## 40 0.108 -0.054 0.094 0.853
## 41 0.099 -0.057 0.092 0.866
## 42 0.091 -0.060 0.090 0.879
## 43 0.082 -0.062 0.088 0.892
## 44 0.073 -0.065 0.086 0.905
## 45 0.065 -0.067 0.085 0.918
## 46 0.056 -0.070 0.083 0.931
## 47 0.048 -0.073 0.081 0.944
## 48 0.039 -0.075 0.079 0.957
## 49 0.030 -0.078 0.078 0.970
## 50 0.022 -0.080 0.076 0.983
## 
## Risk and return metrics along the efficient frontier:
##     mean StdDev out
## 1  0.001  0.014   0
## 2  0.001  0.014   0
## 3  0.001  0.014   0
## 4  0.001  0.014   0
## 5  0.001  0.014   0
## 6  0.001  0.014   0
## 7  0.001  0.014   0
## 8  0.001  0.014   0
## 9  0.001  0.014   0
## 10 0.001  0.014   0
## 11 0.001  0.014   0
## 12 0.001  0.014   0
## 13 0.001  0.014   0
## 14 0.001  0.014   0
## 15 0.001  0.014   0
## 16 0.001  0.014   0
## 17 0.001  0.014   0
## 18 0.001  0.014   0
## 19 0.001  0.015   0
## 20 0.001  0.015   0
## 21 0.001  0.015   0
## 22 0.001  0.015   0
## 23 0.001  0.015   0
## 24 0.001  0.015   0
## 25 0.001  0.015   0
## 26 0.001  0.015   0
## 27 0.001  0.015   0
## 28 0.001  0.015   0
## 29 0.001  0.015   0
## 30 0.001  0.015   0
## 31 0.001  0.015   0
## 32 0.001  0.015   0
## 33 0.001  0.016   0
## 34 0.001  0.016   0
## 35 0.001  0.016   0
## 36 0.001  0.016   0
## 37 0.001  0.016   0
## 38 0.001  0.016   0
## 39 0.001  0.016   0
## 40 0.001  0.016   0
## 41 0.001  0.016   0
## 42 0.001  0.016   0
## 43 0.001  0.017   0
## 44 0.001  0.017   0
## 45 0.001  0.017   0
## 46 0.001  0.017   0
## 47 0.001  0.017   0
## 48 0.001  0.017   0
## 49 0.001  0.017   0
## 50 0.001  0.017   0
# Plot
chart.EfficientFrontier(MeanSd.EF,
                        match.col="StdDev",      # which column to use for risk
                        RAR.text="Sharpe Ratio", # string name for risk adjusted return text
                        rf=0,                    # risk free rate
                        tangent.line = TRUE,
                        type="l",                # plot line type
                        chart.assets=TRUE,
                        labels.assets=TRUE)

4.2.12 Fwd Looking Min Variance

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PerformanceAnalytics)
startd = "2015-01-01"
endd = "2021-04-30"
Tickers = c("IBM","GE","F","MSFT")
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "IBM"  "GE"   "F"    "MSFT"
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers

#Calculate mean returns and covariance
MeanRet <- sapply(Returns,FUN=mean)
CovRet <- cov(Returns)
#Set expected returns 5% lower than historical avg.
ExpRet <- MeanRet*(1-.05)
#Set variances 5% higher, but covariances the same
ScaledVar <- diag(CovRet)*1.05
ExpCov <- CovRet
diag(ExpCov) <-ScaledVar

#install.packages("IntroCompFinR", repos="http://R-Forge.R-project.org")
library(IntroCompFinR) 

# Minimize expected variance subject to target return(long only)
Target <- mean(MeanRet)
opt.MinVar <- efficient.portfolio(er = ExpRet,
                                  cov.mat = ExpCov,
                                  target.return = Target,
                                  shorts = FALSE)
opt.MinVar
## Call:
## efficient.portfolio(er = ExpRet, cov.mat = ExpCov, target.return = Target, 
##     shorts = FALSE)
## 
## Portfolio expected return:     0.0004270109 
## Portfolio standard deviation:  0.01432522 
## Portfolio weights:
##    IBM     GE      F   MSFT 
## 0.5049 0.0795 0.1820 0.2336
barplot(opt.MinVar$weights,las=2,cex.name=.6,main="Min Variance Weights",ylim=c(0,.5))

Return.MinVar <- Return.portfolio(R=Returns, weights=opt.MinVar$weights, geometric=F) 
charts.PerformanceSummary(Return.MinVar)

4.2.13 Fwd Looking Max Sharpe

### Housekeeping
rm(list=ls()) # clear workspace
cat("\014")  # clear console
library(quantmod)
library(PerformanceAnalytics)
startd = "2015-01-01"
endd = "2021-04-30"
Tickers = c("IBM","GE","F","MSFT")
getSymbols(Tickers,from=startd,to=endd,src='yahoo')
## [1] "IBM"  "GE"   "F"    "MSFT"
Returns = do.call(merge,lapply(Tickers, function(x) 
        periodReturn(Ad(get(x)),period='daily',type='arithmetic')))
Returns = na.omit(Returns[-1,])
colnames(Returns)<-Tickers

#Calculate mean returns and covariance
MeanRet <- sapply(Returns,FUN=mean)
CovRet <- cov(Returns)
#Set expected returns 5% lower than historical avg.
ExpRet <- MeanRet*(1-.05)
#Set variances 5% higher, but covariances the same
ScaledVar <- diag(CovRet)*1.05
ExpCov <- CovRet
diag(ExpCov) <-ScaledVar

#install.packages("IntroCompFinR", repos="http://R-Forge.R-project.org")
library(IntroCompFinR) 

# Maximize expected Sharpe Ratio 
ExpRiskFree <- 0.75/100/30 # should match the return data frequency
opt.MaxSR <- tangency.portfolio(er = ExpRet, 
                                cov.mat = ExpCov, 
                                risk.free = ExpRiskFree,
                                shorts = FALSE)
opt.MaxSR
## Call:
## tangency.portfolio(er = ExpRet, cov.mat = ExpCov, risk.free = ExpRiskFree, 
##     shorts = FALSE)
## 
## Portfolio expected return:     0.001223585 
## Portfolio standard deviation:  0.01784839 
## Portfolio weights:
##  IBM   GE    F MSFT 
##    0    0    0    1
barplot(opt.MaxSR$weights,las=2,cex.name=.6,main="Max Sharpe Ratio Weights",ylim=c(0,.5))

Return.MaxSR <- Return.portfolio(R=Returns, weights=opt.MaxSR$weights, geometric=F) 
charts.PerformanceSummary(Return.MaxSR)