[关闭]
@1007477689 2020-05-25T02:49:36.000000Z 字数 4617 阅读 370

在此处输入标题

未分类


  1. import os
  2. import pandas as pd
  3. import numpy as np
  4. import matplotlib.pyplot as plt
  5. from scipy import optimize
  6. from scipy.stats import norm
  1. df = pd.read_excel('VaR3.xlsx')
  2. df['SP_return'] = np.log(1 + df['SP_Close'].pct_change(periods = 1))
  3. df['US_return'] = np.log(1 + df['US_Close'].pct_change(periods = 1))
  4. df = df.dropna()
  5. df = df.reset_index(drop = True)
  6. df['nav'] = (df['SP_Close']/df['SP_Close'][0] + df['US_Close']/df['US_Close'][0])/2
  7. df.head()
  1. def getNegativeLoglikelihood3(params, r):
  2. omega, alpha, beta, theta = params
  3. sigma2 = np.ones(len(r))
  4. sigma2[0] = np.var(r)
  5. for i in range(1, len(r)):
  6. s = omega + alpha * (r[i - 1] - theta*sigma2[i-1]**0.5)**2 + beta * sigma2[i - 1]
  7. sigma2[i] = (s > 0) * s + (s < 0)*100
  8. LogLikeLihood = (np.log(2*np.pi) + np.log(sigma2) + r**2/sigma2).sum()/2
  9. return LogLikeLihood
  1. # SP500
  2. params_MLE2 = optimize.fmin(f = getNegativeLoglikelihood3,
  3. x0 = np.array([0.0000015, 0.05, 0.8, 1.25]),
  4. args = (df['SP_return'],),
  5. ftol = 0.00001)
  6. params_MLE2
  1. omega, alpha, beta, theta = params_MLE2
  2. df['SP_sigma2'] = np.var(df['SP_return'])
  3. # method 1
  4. for i in range(1, df.shape[0]):
  5. data.loc[i,'SP_sigma2'] = omega + alpha * (df.loc[i - 1,'SP_return'] - theta * df.loc[i-1,'SP_sigma2']**0.5)**2 + beta * df.loc[i - 1,'SP_sigma2']
  6. # method 2
  7. def cal_sigma(row):
  8. row['SP_sigma2_t'] = omega + alpha * row['SP_return_t-1'] - theta * row['SP_sigma2_t-1']**0.5)**2 + beta * row['SP_sigma2_t-1']
  9. df.apply(lambda row: cal_sigma, axis = 1)
  1. # US
  2. params_MLE2_US = optimize.fmin(f = getNegativeLoglikelihood3,
  3. x0 = np.array([0.000005, 0.03, 0.97, 0]),
  4. args = (data['US_return'],),
  5. ftol = 0.00001)
  6. params_MLE2_US
  1. omega, alpha, beta, theta = params_MLE2_US
  2. data['US_sigma2'] = np.var(data['US_return'])
  3. for i in range(1,data.shape[0]):
  4. data.loc[i,'US_sigma2'] = omega + alpha * (data.loc[i - 1,'US_return'] - theta * data.loc[i-1,'US_sigma2']**0.5)**2 + beta * data.loc[i - 1,'SP_sigma2']
  5. data['SP_z'] = data['SP_return']/data['SP_sigma2']**0.5
  6. data['US_z'] = data['US_return']/data['US_sigma2']**0.5

```
def getNetLoglikelihood5(param,z1,z2):
2 lambd = param
3 if lambd >1:
4 return (10000)
5 else:
6 q11= np.ones(len(z1))
7
8 q12= np.ones(len(z1))
9 q12[0]= np.mean(z1*z2 )
10
11 q22= np.ones(len(z1))
12
13 rho= np.ones(len(z1))
14 rho[0]= q12[0]/np.sqrt(q11[0]*q22[0])
15
16 for i in range(1,len(z1)):
17 q11[i]= (1-lambd)*z1[i-1]**2 + lambd*q11[i-1]
18 q12[i]= (1-lambd)*z1[i-1]*z2[i-1] + lambd*q12[i-1]
19 q22[i]= (1-lambd)*z2[i-1]**2 + lambd*q22[i-1]
20
21 rho[i]= q12[i]/np.sqrt(q11[i]*q22[i])
22
23 LogLikeLihood = ( np.log(1-rho**2) + (z1**2+z2**2-2*rho*z1*z2)/(1-rho**2)).sum()*0.5
24 return LogLikeLihood
25
26# 参数估计
27params_MLE5 = optimize.fmin(getNetLoglikelihood5,0.94, args=(data['SP_z'],data['US_z'],), ftol = 0.000000001)
28params_MLE5
29
30data['q11'] = 1
31data['q12'] = np.mean(data.SP_z*data.US_z)
32data['q22'] = 1
33
34# 计算COR
35for i in range(1,data.shape[0]):
36 data.loc[i,'q11'] = (1-params_MLE5)*data.SP_z[i-1]**2 + params_MLE5*data.loc[i-1,'q11']
37 data.loc[i,'q12'] = (1-params_MLE5)*data.SP_z[i-1]*data.US_z[i-1] + params_MLE5*data.loc[i-1,'q12']
38 data.loc[i,'q22'] = (1-params_MLE5)*data.US_z[i-1]**2 + params_MLE5*data.loc[i-1,'q22']
39
40data['cor_dcc'] = data.q12/((data.q11*data.q22)**0.5)
41
42# 计算VaR
43data['VaR_DCC'] = -norm(0,1).ppf(0.01)*(data['SP_sigma2']*0.5**2 + data['US_sigma2']*0.5**2 + \
44 2*0.5*0.5*data['cor_dcc']*(data['SP_sigma2']**0.5)*(data['US_sigma2']**0.5))**0.5

def getNegLoglikelihood6(params,z1,z2):
2
3 alpha,beta = params
4
5 if alpha + beta >1:
6 return (10000)
7 else:
8 q11= np.ones(len(z1))
9
10 q12= np.ones(len(z1))
11 q12[0]= np.mean(z1*z2 )
12
13 q22= np.ones(len(z1))
14
15 rho= np.ones(len(z1))
16 rho[0]= q12[0]/np.sqrt(q11[0]*q22[0])
17
18 rho_mean = np.mean(z1*z2)
19 for i in range(1,len(z1)):
20 q11[i]= 1 + alpha*((z1[i-1]**2)-1) + beta*(q11[i-1]-1)
21 q12[i]= rho_mean+ alpha*(z1[i-1]z2[i-1]- rho_mean)+beta(q12[i-1]-rho_mean)
22 q22[i]= 1+alpha*((z2[i-1]**2)-1)+beta*(q22[i-1]-1)
23
24 rho[i]= q12[i]/np.sqrt(q11[i]*q22[i])
25 LogLikeLihood = (np.log(1-rho**2) + (z1**2+z2**2-2*rho*z1*z2)/(1-rho**2)).sum()*0.5
26 return LogLikeLihood
27
28# 参数估计
29params_MLE6 = optimize.fmin(getNegLoglikelihood6,np.array([0.05,0.9]), args=(data['SP_z'],data['US_z']), ftol = 0.000000001)
30params_MLE6
31
32# cor
33data['q11_garch'] = 1
34data['q12_garch'] = np.mean(data.SP_z*data.US_z)
35data['q22_garch'] = 1
36rho_mean = np.mean(data.SP_z*data.US_z)
37
38for i in range(1,data.shape[0]):
39 data.loc[i,'q11_garch'] = 1 + alpha*((data.SP_z[i-1]**2)-1) + params_MLE6[1](data.loc[i-1,'q11_garch']-1)
40 data.loc[i,'q12_garch'] = rho_mean+ params_MLE6[0]
(data.SP_z[i-1]data.US_z[i-1]- rho_mean)+ \
41 params_MLE6[1]
(data.loc[i-1,'q12_garch']-rho_mean)
42 data.loc[i,'q22_garch'] = 1+params_MLE6[0]*((data.US_z[i-1]**2)-1)+params_MLE6[1]*(data.loc[i-1,'q22_garch']-1)
43
44data['cor_dcc_garch'] = data.q12_garch/((data.q11_garch*data.q22_garch)**0.5)
45
46# var
47data['VaR_DCC_Garch'] = -norm(0,1).ppf(0.01)*(data['SP_sigma2']*0.5**2 + data['US_sigma2']*0.5**2 + \
48 2*0.5*0.5*data['cor_dcc_garch']*(data['SP_sigma2']**0.5)*(
data['US_sigma2']**0.5))**0.5

添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注