[关闭]
@semprelibera 2017-06-05T09:20:48.000000Z 字数 30121 阅读 785

附录A 相关代码

Thesis


一、 抓取数据

在PEER钢筋混凝土柱试验数据库中抓取试验信息

1.1 抓取并存储矩形截面柱子的数据

文件名为:scra.py

  1. import re
  2. import requests
  3. import ssl
  4. from pandas import DataFrame
  5. # 由于目标网页用了https协议,需要证书验证防止第三方入侵,这里直接关闭验证,否则会读取失败
  6. ssl._create_default_https_context = ssl._create_unverified_context
  7. from requests.packages.urllib3.exceptions import InsecureRequestWarning
  8. requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
  9. ############################
  10. # Step1, 定义函数getlinkfile#
  11. # 获取链接页的全部超级链接 #
  12. ###########################
  13. def getlinkfile(url):
  14. # 这是用来保存结果的list
  15. res = []
  16. # 读取网页源代码,存储在r中
  17. r = requests.get(url,verify = False)
  18. # 检验读取是否成功
  19. try:
  20. r.raise_for_status()
  21. except Exception as exc:
  22. print('There was a problem: %s' % (exc))
  23. # 把网页的内容以纯文本的形式存储到变量data中,data的类型为string
  24. data = r.text
  25. # 定义正则表达式,用来匹配网页上的超级链接
  26. hrefRegEx = re.compile(r"(?<=href=\").+?(?=\")|(?<=href=\').+?(?=\')")
  27. # 匹配所有的超级链接,结果存储在link_list中
  28. link_list = hrefRegEx.findall(data)
  29. #mylinkfile = open('linkfile.txt','w')
  30. # 筛选掉我们不需要的超级链接
  31. for url in link_list:
  32. if 'xml&id' not in url:
  33. res.append('https://nisee.berkeley.edu' + url)
  34. # 返回结果
  35. return res
  36. ######################################
  37. # Step2,定义函数getthedata,用来匹配每 #
  38. # 一个矩形截面柱信息页上我们需要信息 #
  39. #####################################
  40. def getthedata(url):
  41. # 读取网页源代码
  42. r = requests.get(url,verify = False)
  43. # 检验读取是否成功
  44. try:
  45. r.raise_for_status()
  46. except Exception as exc:
  47. print('There was a problem: %s' % (exc))
  48. # 获取网页源代码,以纯文本形式保存在data中
  49. data = r.text
  50. # 用正则表达式分别匹配我们需要的矩形截面柱子的构件信息
  51. NameReExpr = re.compile("(Name:).*?\">(.*?)<\/td>")
  52. TypeReExpr = re.compile("(Type:).*?\">(.*?)<\/td>")
  53. f_cReExpr = re.compile("(Concrete Strength:).*?\">(.*?)\(")
  54. Tr_steelgradReExpr = re.compile("Transverse Steel:.*?\">(Grade):(.*?)<br>")
  55. Tr_yStreReExpr = re.compile("Transverse Steel:.*?>(Yield Stress:)(.*?)\(MPa\)")
  56. Tr_uStreReExpr = re.compile("Transverse Steel:.*?(Strength:)(.*?)\(MPa\)")
  57. Lg_steelgradReExpr = re.compile("Longitudinal Steel.*?\">(Grade):(.*?)<br>")
  58. Lg_yStreReExpr = re.compile("Longitudinal Steel.*?(Yield Stress:)(.*?)\(MPa\)")
  59. Lg_uStreReExpr = re.compile("Longitudinal Steel.*?(Strength:)(.*?)\(MPa\)")
  60. Geo_WidthReExpr = re.compile("(Width:)(.*?)\(mm\)")
  61. Geo_DepthReExpr = re.compile("(Depth:)(.*?)\(mm\)")
  62. Geo_LmeasureExpre = re.compile("(L-Measured:)(.*?)\(mm\)")
  63. Axi_loadReExpre = re.compile("(Axial Load:).*>(.*?)\(kN\)")
  64. Lg_DiaReExpre = re.compile("(Diameter:)<.*?\">(.*?)\(mm\)")
  65. Lg_NumbExpre = re.compile("(Number of Bars:)<.*?\">(.*?)<\/td>")
  66. Lg_ClearoverExpre = re.compile("(Clear cover:)(.*?)\(mm\)")
  67. Tr_ShearLagsExpre = re.compile("(Number of Shear Legs:).*?\">(\d)")
  68. Tr_DiaReExpre = re.compile("(Bar Diameter:)(.*?)\(mm\)")
  69. Tr_BarSpaceExpre = re.compile("(Hoop Spacing:)(.*?)\(mm\)")
  70. # 进行匹配,相应的结果以tuple的形式返回
  71. name = NameReExpr.search(data)
  72. typeof = TypeReExpr.search(data)
  73. f_c = f_cReExpr.search(data)
  74. Tr_steelgrad = Tr_steelgradReExpr.search(data)
  75. Tr_yStre = Tr_yStreReExpr.search(data)
  76. Tr_uStre = Tr_uStreReExpr.search(data)
  77. Lg_steelgrad = Lg_steelgradReExpr.search(data)
  78. Lg_yStre = Lg_yStreReExpr.search(data)
  79. Lg_uStre = Lg_uStreReExpr.search(data)
  80. Width = Geo_WidthReExpr.search(data)
  81. Depth = Geo_DepthReExpr.search(data)
  82. Length = Geo_LmeasureExpre.search(data)
  83. Axi_load = Axi_loadReExpre.search(data)
  84. Lg_Dia = Lg_DiaReExpre.search(data)
  85. Lg_Numb = Lg_NumbExpre.search(data)
  86. Lg_Clear =Lg_ClearoverExpre.search(data)
  87. Tr_ShearLags = Tr_ShearLagsExpre.search(data)
  88. Tr_Dia = Tr_DiaReExpre.search(data)
  89. Tr_BarSpace = Tr_BarSpaceExpre.search(data)
  90. # 返回结果
  91. return (name,typeof,f_c,Tr_steelgrad,Tr_yStre,Tr_uStre,Lg_steelgrad,
  92. Lg_yStre,Lg_uStre,Width,Depth,Length,Axi_load,Lg_Dia,Lg_Numb,Lg_Clear,
  93. Tr_ShearLags,Tr_Dia,Tr_BarSpace)
  94. #####################################
  95. #Step3、运用之前两个函数进行数据的抓取工作#
  96. #####################################
  97. # 定义字典,mydata,{feature1:[item1,item2,...],feature2:[item1,item2,...],...},
  98. # 用来存储二维数据表,它可以直接转化为DataFrame
  99. mydata = {}
  100. # 用来计数的变量,由于前254条数据是矩形截面的,这里的代码写的不是很规范,矩形和圆形
  101. # 截面的两种情况,其实可以用条件控制语句实现。
  102. count = 1
  103. # 调用获取超级链接的函数,获取超链接页面中我们需要的全部超级链接,结果保存为list
  104. sourceurl = 'https://nisee.berkeley.edu/spd/servlet/search'
  105. myurllist = getlinkfile(sourceurl)
  106. # 这个list存放读取过的矩形截面链接
  107. myrectlist = []
  108. # 这些是mydata字典里面的keys
  109. mykeys = ('Test_Name','Type_of_Column','Concrete_Strength',
  110. 'Tr_steelgrad','Tr_ySre','Tr_uStre','Lg_steelgrad',
  111. 'Lg_yStre','Lg_uStre','Width','Depth','Length','Axi_load',
  112. 'Lg_Dia','Lg_Num','Lg_Clearover','Tr_ShearLags','Tr_Dia',
  113. 'Tr_BarSpace')
  114. # 初始化mydata字典
  115. for mykey in mykeys:
  116. mydata[mykey] = []
  117. # 用for循环读取所有的矩形截面构件信息
  118. for url in myurllist:
  119. print('-----------------------------------------------')
  120. print('Try getting ' + str(count) + ' of all the data')
  121. # 调用getthedata函数获取信息
  122. tableattrib = getthedata(url)
  123. print('Successfully logging in the site!')
  124. # 这里的if语句作用不大
  125. if tableattrib[1].group(2) == 'Rectangular':
  126. # 记录矩形截面柱的链接
  127. myrectlist.append(url)
  128. # 读取所有信息
  129. for (mykey,el) in zip(mykeys,tableattrib):
  130. mydata[mykey].append(el.group(2))
  131. count += 1
  132. # 读到254个的时候就进入圆形截面柱了,跳出循环(用条件控制可以解决此问题)
  133. if count == 254:
  134. break
  135. # 把结果存储为DataFrame格式
  136. mydataFrame = DataFrame(mydata)
  137. # 写入csv文件
  138. mydataFrame.to_csv("peerdatabase_rect.csv")
  139. # 用集合差运算获得圆形截面柱的超级链接
  140. spirurllist = list(set(myurllist)-set(myrectlist))
  141. # 把超级链接分为两部分存储,1、矩形截面柱。2、圆形截面柱
  142. rectfile = open("linkfile_rect.txt",'w')
  143. spirfile = open("linkfile_spir.txt",'w')
  144. # 存储矩形截面柱链接
  145. for url1 in myrectlist:
  146. rectfile.write('https://nisee.berkeley.edu' + url1 + '\n')
  147. # 存储圆形截面柱链接
  148. for url2 in spirurllist:
  149. spirfile.write('https://nisee.berkeley.edu' + url2 + '\n')
  150. rectfile.close()
  151. spirfile.close()

1.2 抓取圆形截面柱子数据

文件名为:scra_spir.py

  1. import re
  2. import ssl
  3. import requests
  4. #import pandas as pd
  5. from pandas import DataFrame
  6. ssl._create_default_https_context = ssl._create_unverified_context
  7. from requests.packages.urllib3.exceptions import InsecureRequestWarning
  8. requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
  9. ###################
  10. #Step1, 定义相关函数#
  11. ###################
  12. # 定义函数getthedataofspir,用来抓取圆形截面柱的信息
  13. def getthedataofspir(url):
  14. # 流程与抓取矩形截面柱的过程类似
  15. r = requests.get(url,verify = False)
  16. # 检验是否读取成功
  17. try:
  18. r.raise_for_status()
  19. except Exception as exc:
  20. print('There was a problem: %s' % (exc))
  21. data = r.text
  22. # 用正则表达式匹配信息
  23. NameReExpr = re.compile("(Name:).*?\">(.*?)<\/td>")
  24. TypeReExpr = re.compile("(Type:).*?\">(.*?)<\/td>")
  25. f_cReExpr = re.compile("(Concrete Strength:).*?\">(.*?)\(")
  26. Tr_yStreReExpr = re.compile("Transverse Steel:.*?>(Yield Stress:)(.*?)\(MPa\)")
  27. Lg_yStreReExpr = re.compile("Longitudinal Steel.*?.*?\">(Yield Stress:)(.*?)\(")
  28. Lg_uStreReExpr = re.compile("Longitudinal Steel.*?(Strength:)(.*?)\(MPa\)")
  29. Geo_DiameterExpr = re.compile("(Diameter:).*?\">(.*?)\(mm\)")
  30. Geo_LmeasureExpre = re.compile("(L-Measured:)(.*?)\(mm\)")
  31. Axi_loadReExpre = re.compile("(Axial Load:).*>(.*?)\(kN\)")
  32. Lg_DiaReExpre = re.compile("(Diameter:)<.*?\">(.*?)\(mm\)")
  33. Lg_NumbExpre = re.compile("(Number of Bars:)<.*?\">(.*?)<\/td>")
  34. Lg_ClearoverExpre = re.compile("(Cover to Center of Hoop Bar).*?\">(.*?)\(mm\)")
  35. Tr_DiaReExpre = re.compile("(Diameter Spiral).*?\">(.*?)\(mm\)")
  36. Tr_BarSpaceExpre = re.compile("(Hoop Spacing, Sv).*?\">(.*?)\(mm\)")
  37. # 搜索需要的信息
  38. name = NameReExpr.search(data)
  39. typeof = TypeReExpr.search(data)
  40. f_c = f_cReExpr.search(data)
  41. Tr_yStre = Tr_yStreReExpr.search(data)
  42. Lg_yStre = Lg_yStreReExpr.search(data)
  43. Lg_uStre = Lg_uStreReExpr.search(data)
  44. Colum_Dia = Geo_DiameterExpr.search(data)
  45. Length = Geo_LmeasureExpre.search(data)
  46. Axi_load = Axi_loadReExpre.search(data)
  47. Lg_Dia = Lg_DiaReExpre.search(data)
  48. Lg_Numb = Lg_NumbExpre.search(data)
  49. Lg_Clear =Lg_ClearoverExpre.search(data)
  50. Tr_Dia = Tr_DiaReExpre.search(data)
  51. Tr_BarSpace = Tr_BarSpaceExpre.search(data)
  52. # 返回信息
  53. return (name,typeof,f_c,Tr_yStre,
  54. Lg_yStre,Lg_uStre,Colum_Dia,Length,Axi_load,Lg_Dia,Lg_Numb,Lg_Clear,
  55. Tr_Dia, Tr_BarSpace)
  56. #################
  57. #Step2, 数据抓取 #
  58. #################
  59. # 定义字典mydata,{feature1:[item1,item2,...],feature2:[item1,item2,...],...},
  60. # 用来存放二维数据表,可以直接转化为DataFrame
  61. mydata = {}
  62. count = 1
  63. # 定义mydata里的keys
  64. mykeys = ('Test_Name','Type_of_Column','Concrete_Strength'
  65. ,'Tr_ySre',
  66. 'Lg_yStre','Lg_uStre','Colum-Dia','Length','Axi_load',
  67. 'Lg_Dia','Lg_Num','Lg_Clearover','Tr_Dia',
  68. 'Tr_BarSpace')
  69. # 初始化mydata字典
  70. for mykey in mykeys:
  71. mydata[mykey] = []
  72. # 读取存放圆形截面柱超级链接的文件
  73. mysprifile = open("linkfile_spir.txt",'r')
  74. while True:
  75. try:
  76. print('-----------------------------------------------')
  77. print('Try getting ' + str(count) + ' of all the data')
  78. url = mysprifile.readline().strip()
  79. tableattrib = getthedataofspir(url)
  80. print('Successfully logging in the site!')
  81. for (mykey,el) in zip(mykeys,tableattrib):
  82. mydata[mykey].append(el.group(2))
  83. count += 1
  84. except:
  85. break
  86. mysprifile.close()
  87. # 转化为DataFrame
  88. mydataFrame = DataFrame(mydata)
  89. # 把数据存储到本地硬盘
  90. mydataFrame.to_csv("peerdatabase_spir_new.csv")

1.3 后期处理

  • Pan/Li的数据用,在线OCR软件读取即可
  • 对于PEER数据的文件,手工在csv文件中加入一列,作为承载力
  • 最后把三个文件改名为"01Result_Rect.csv","02Result_Spir.csv",和"03dataofLiBing.csv"

二、 数据清洗

主要目的是把三个文件合并为一个数据表,anaofData
文件名为:datadigging.py

  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Fri Apr 14 16:03:11 2017
  5. @author: Bruce
  6. """
  7. import pandas as pd
  8. import Concrete_Strength_ana
  9. import numpy as np
  10. import pylab
  11. # 定义函数cleanofNumber,把带逗号','的字符串数字,转化为浮点数
  12. def cleanofNumber(Acolumn):
  13. temp = []
  14. for el in Acolumn:
  15. temp.append(np.float(el.replace(',','')))
  16. return temp
  17. # Reading data from raw csv files
  18. # 在原始csv文件中读取数据
  19. # Original data consists of three DataFrames with different data structure
  20. # 原始数据由三个不同数据结构的DataFrame构成
  21. data_rect = pd.read_csv("01Result_Rect.csv",index_col='ind')
  22. data_spir = pd.read_csv("02Result_Spir.csv",index_col= 'ind')
  23. data_libing = pd.read_csv("03dataofLiBing.csv",index_col= 'ind')
  24. #############################
  25. # Step1,处理Pan/Li文献中的数据#
  26. #############################
  27. # 丢掉不需要的列
  28. ana_libing = data_libing.drop(['Reference','Configuration','ms'],axis=1)
  29. # 把以百分号为单位的配筋率转化为无量纲的配筋率
  30. ana_libing['Ratio_Lg'] = ana_libing['Ratio_Lg'] / 100
  31. ana_libing['Ratio_Tr'] = ana_libing['Ratio_Tr'] / 100
  32. #######################
  33. # Step2,处理矩形截面数据#
  34. #######################
  35. # 计算配筋率
  36. data_rect['Ratio_Lg'] = (0.25 * np.pi * data_rect['Lg_Dia']**2) * data_rect['Lg_Num'] / (
  37. data_rect['Depth'] * data_rect['Width'])
  38. data_rect['Ratio_Tr'] = (0.25 * np.pi * data_rect['Tr_Dia']**2) * data_rect['Tr_ShearLags'] / (
  39. data_rect['Tr_BarSpace'] * data_rect['Width'])
  40. # 丢掉不需要的列
  41. ana_rect = data_rect.drop(['Lg_Clearover','Lg_Dia','Lg_Num','Lg_steelgrad','Lg_uStre',
  42. 'Tr_BarSpace','Tr_Dia','Tr_ShearLags',
  43. 'Tr_steelgrad','Tr_uStre','Type_of_Column'],axis = 1)
  44. #########################
  45. # Step3,把数据中带有字符串#
  46. # 的列全部转化为纯数字 #
  47. ########################
  48. ana_rect['Axi_load'] = cleanofNumber(ana_rect['Axi_load'])
  49. ana_rect['Length'] = cleanofNumber(ana_rect['Length'])
  50. ana_rect['Tr_ySre']=cleanofNumber(ana_rect['Tr_ySre'])
  51. data_spir['Axi_load'] = cleanofNumber(data_spir['Axi_load'])
  52. data_spir['Length'] = cleanofNumber(data_spir['Length'])
  53. data_spir['Colum-Dia'] = cleanofNumber(data_spir['Colum-Dia'])
  54. data_spir['Tr_ySre']=cleanofNumber(data_spir['Tr_ySre'])
  55. #########################
  56. # Step3,处理圆形截面的数据#
  57. ########################
  58. # 计算配筋率
  59. data_spir['Ratio_Lg'] = (data_spir['Lg_Dia']**2) * data_spir['Lg_Num'] / (
  60. data_spir['Colum-Dia']**2)
  61. data_spir['Ratio_Tr'] = (data_spir['Tr_Dia']**2) * np.pi * 0.5/ (
  62. (data_spir['Colum-Dia'] - data_spir['Lg_Clearover'] * 2) * data_spir['Tr_BarSpace'])
  63. # 计算轴压比
  64. data_spir['Ratio_AxisLoad'] = data_spir['Axi_load'] * 10**3 / (
  65. data_spir['Concrete_Strength'] * data_spir['Colum-Dia']**2 * np.pi * 0.25 )
  66. data_spir['Ratio_Aspect'] = data_spir['Length'] / data_spir['Colum-Dia']
  67. # 计算界面剂
  68. data_spir['Area'] = data_spir['Colum-Dia']**2 * np.pi *0.25
  69. # 丢掉不需要的列
  70. ana_spir = data_spir.drop(['Lg_Clearover','Lg_Dia','Lg_Num','Lg_uStre',
  71. 'Tr_BarSpace','Tr_Dia'
  72. ,'Type_of_Column'],axis = 1)
  73. #######################
  74. # Step4,三个数据表的合并#
  75. #######################
  76. # 先合并矩形截面数据和Pan/Li的数据,因为这些都是矩形截面数据
  77. anaOfData = pd.concat([ana_rect,ana_libing]) # just include rectangular section typle
  78. # 计算轴压比
  79. anaOfData['Ratio_AxisLoad'] = anaOfData['Axi_load'] * 10**3 / (
  80. anaOfData['Concrete_Strength'] * anaOfData['Depth'] * anaOfData['Width'])
  81. # 计算高深比
  82. anaOfData['Ratio_Aspect'] = anaOfData['Length'] / anaOfData['Depth']
  83. # 计算截面积
  84. anaOfData['Area'] = anaOfData['Depth'] * anaOfData['Width']
  85. # 把圆形截面数据也合并进来
  86. anaOfData = pd.concat([anaOfData,ana_spir])
  87. # 筛选出高深比小于4的短柱
  88. anaOfData = anaOfData[anaOfData['Ratio_Aspect'] <= 4 ] # give me all the short columns
  89. #####################################
  90. # Step5,计算几列数据处理时候需要用的性质#
  91. ####################################
  92. # 平均等效剪应力的计算
  93. anaOfData['v_max'] = anaOfData['V_max'] * 10**3 / anaOfData['Area']
  94. # 归一化剪切承载力
  95. anaOfData['v_max/f_c'] = anaOfData['v_max'] / anaOfData['Concrete_Strength']
  96. # 丢掉无效数据
  97. anaOfData = anaOfData[anaOfData['v_max/f_c'] != 0 ]
  98. # 考虑钢筋屈服强度,对钢筋“配筋率”进行重新计算
  99. anaOfData['Ratio_Tr'] = anaOfData['Ratio_Tr'] * anaOfData['Tr_ySre']/anaOfData['Concrete_Strength']
  100. anaOfData['Ratio_Lg'] = anaOfData['Ratio_Lg'] * anaOfData['Lg_yStre']/anaOfData['Concrete_Strength']
  101. # 丢掉不需要的列
  102. anaOfData = anaOfData.drop(['Colum-Dia','Depth','Width'],axis=1)
  103. # 丢掉有na数据的列
  104. anaOfData = anaOfData.dropna()
  105. # 筛选出轴向受压力的数据,得到最终的数据表anaofData
  106. anaOfData = anaOfData[anaOfData['Ratio_AxisLoad']>=0]
  107. #################################
  108. # Step6,debug代码。总览一下数据分布#
  109. #################################
  110. if __name__ == "__main__":
  111. # 不加分组,直接绘制散点图
  112. pylab.figure()
  113. pylab.plot(anaOfData['Concrete_Strength'],anaOfData['v_max'],'m.',label='experiment data')
  114. pylab.title('Effect of Concrete Strength')
  115. pylab.xlabel("$f_c^{\'}$/Mpa")
  116. pylab.ylabel('$v_{max}$/Mpa')
  117. pylab.legend()
  118. pylab.show()
  119. z1,p1,x1 = Concrete_Strength_ana.linearfit(1/anaOfData['Ratio_Aspect'],anaOfData['v_max/f_c'],0.25,1)
  120. r2 = Concrete_Strength_ana.getR2(1/anaOfData['Ratio_Aspect'],anaOfData['v_max/f_c'],p1)
  121. r2=round(r2,2)
  122. pylab.figure()
  123. pylab.plot(1/anaOfData['Ratio_Aspect'],anaOfData['v_max/f_c'],'m.',label='experiment data')
  124. pylab.plot(x1,p1(x1),'k-',label='linear fit with $R^2$= ' +str(r2))
  125. pylab.title('Effect of Aspect Ratio')
  126. pylab.xlabel(r"$1/\phi_{asp}$")
  127. pylab.ylabel('$v$')
  128. pylab.legend()
  129. pylab.show()
  130. anaOfaxis = anaOfData[anaOfData['Ratio_AxisLoad']>=0]
  131. pylab.figure()
  132. pylab.plot(anaOfaxis['Ratio_AxisLoad'],anaOfaxis['v_max/f_c'],'b.',label='experiment data')
  133. pylab.title('Effect of ratio of axis load')
  134. pylab.xlabel('$\psi_{axis}$')
  135. pylab.ylabel('$v$')
  136. pylab.legend()
  137. pylab.show()
  138. z2,p2,x2 = Concrete_Strength_ana.linearfit(anaOfData['Ratio_Lg'],anaOfData['v_max/f_c'],0,0.8)
  139. r2 = Concrete_Strength_ana.getR2(anaOfData['Ratio_Lg'],anaOfData['v_max/f_c'],p2)
  140. r2 = round(r2,2)
  141. pylab.figure()
  142. pylab.plot(anaOfData['Ratio_Lg'],anaOfData['v_max/f_c'],'b.',label = 'experiment data')
  143. pylab.plot(x2,p2(x2),'k-',label='linear fit with $R^2$= ' + str(r2))
  144. pylab.title(r'Effect of $\rho_l$')
  145. pylab.xlabel(r'$\rho_l$')
  146. pylab.ylabel(r'$v$')
  147. pylab.legend()
  148. pylab.show()
  149. baddata = anaOfData[(anaOfData['Ratio_Lg']>0.7) & (anaOfData['v_max/f_c']<0.075)]
  150. z3,p3,x3 = Concrete_Strength_ana.linearfit(anaOfData['Ratio_Tr'],anaOfData['v_max/f_c'],0,0.20)
  151. r2 = Concrete_Strength_ana.getR2(anaOfData['Ratio_Lg'],anaOfData['v_max/f_c'],p3)
  152. r2 = round(r2,2)
  153. pylab.figure()
  154. pylab.plot(anaOfData['Ratio_Tr'],anaOfData['v_max/f_c'],'b.',label = 'experiment data')
  155. # pylab.plot(x3,p3(x3),'k-',label='linear fit with $R^2$= ' + str(r2))
  156. pylab.title(r'Effect of $\rho_v$')
  157. pylab.xlabel(r'$\rho_v$')
  158. pylab.ylabel(r'$v$')
  159. pylab.legend()
  160. pylab.show()
  161. baddata = anaOfData[anaOfData['v_max/f_c']>0.175]

三、 数据处理

3.1 混凝土强度的影响

文件名为:Concrete_Strength_ana.py

  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Sat Apr 22 20:07:44 2017
  5. Mission: Find the influence of concrete strength
  6. @author: Bruce
  7. """
  8. import datadigging
  9. import pylab
  10. import pandas as pd
  11. import numpy as np
  12. ########################
  13. #Step1, 定义需要用到的函数#
  14. ########################
  15. # 根据数据表和列名,绘制数据分布柱状图
  16. def drawHist(data,column):
  17. """
  18. To draw a histogram of a column in data.
  19. data is a DataFrame, and column is an attribute in it.
  20. """
  21. pylab.figure()
  22. pylab.hist(data[column])
  23. pylab.title('This is the distribution of ' + column)
  24. pylab.xlabel(column)
  25. pylab.ylabel("number")
  26. pylab.show()
  27. # 根据数据表和两个列名绘制二维分布图
  28. def drawHist2D(data,column1,column2):
  29. """
  30. To draw a 2D histogram of two columns in data.
  31. data is a DataFrame, and column1, column2 are two attributes in it.
  32. """
  33. pylab.figure()
  34. pylab.plot(data[column1],data[column2],'b.')
  35. pylab.title('This is the distribution of ' + column1 + ' and ' + column2)
  36. pylab.xlabel(column1)
  37. pylab.ylabel(column2)
  38. pylab.legend()
  39. pylab.show()
  40. # 计算模型的回归判定系数
  41. def getR2(x,y,ypre):
  42. """
  43. x is an array-like list
  44. y is an array-like list
  45. ypre is the prediction model
  46. --------
  47. Return
  48. R^2 of the prediciton model
  49. --------
  50. """
  51. x=np.array(x)
  52. y=np.array(y)
  53. preValue = [ypre(el) for el in x]
  54. preValue = np.array(preValue)
  55. res = 1 - np.sum((y-preValue)**2) / np.sum((y-np.average(y))**2)
  56. return res
  57. # 对数据做线性回归
  58. def linearfit(x,y,minimum,maximum):
  59. """
  60. Return polynomial, a fucntion, and xrange
  61. """
  62. z1 = np.polyfit(x,y,1)
  63. p1 = np.poly1d(z1)
  64. xp1 = np.linspace(minimum,maximum,100)
  65. return z1,p1,xp1
  66. if __name__ == "__main__":
  67. ############
  68. # 绘图的过程#
  69. ###########
  70. # 此过程可以预想定义函数之后在进行绘图,
  71. # 可以减少代码的重复量
  72. # 先根据高深比分类
  73. anaOfAxisLoad_3 = datadigging.anaOfData[datadigging.anaOfData['Ratio_Aspect'] == 3]
  74. anaOfAxisLoad_2 = datadigging.anaOfData[datadigging.anaOfData['Ratio_Aspect'] == 2]
  75. anaOfAxisLoad_4 = datadigging.anaOfData[datadigging.anaOfData['Ratio_Aspect'] == 4]
  76. anaOfAxisLoad_1 = datadigging.anaOfData[datadigging.anaOfData['Ratio_Aspect']<1.2]
  77. anaOfAxisLoad_1_5 = datadigging.anaOfData[(datadigging.anaOfData['Ratio_Aspect']<1.7) &
  78. (datadigging.anaOfData['Ratio_Aspect']>1.2)]
  79. anaOfAxisLoad_2_5 = datadigging.anaOfData[datadigging.anaOfData['Ratio_Aspect']==2.5]
  80. # 绘制各个分组的关系图
  81. print('Concrete Strength(aspectratio=3 and axisload = 0.08-0.13)')
  82. #anaOfAxis_more3 = anaOfAxisLoad_3[
  83. # (anaOfAxisLoad_3['Ratio_AxisLoad']>0.08) * (anaOfAxisLoad_3['Ratio_AxisLoad']<0.13)]
  84. pylab.figure()
  85. pylab.plot(anaOfAxisLoad_3['Concrete_Strength'],anaOfAxisLoad_3['v_max'],'k^')
  86. pylab.title('Concrete Strength with aspectratio 3')
  87. pylab.xlabel('Concrete Strength/MPa')
  88. pylab.ylabel('$v_{max}/Mpa$')
  89. pylab.show()
  90. print('-----------------------------------------------------------------------')
  91. print('Concrete Strength(aspectratio=2 and axisload = 0.08-0.13)')
  92. #anaOfAxis_more2 = anaOfAxisLoad_2[
  93. # (anaOfAxisLoad_2['Ratio_AxisLoad']>0.08) * (anaOfAxisLoad_3['Ratio_AxisLoad']<0.13)]
  94. pylab.figure()
  95. pylab.plot(anaOfAxisLoad_2['Concrete_Strength'],anaOfAxisLoad_2['v_max'],'g^')
  96. pylab.title('Concrete Strength with aspectratio 2')
  97. pylab.xlabel('Concrete Strength/MPa')
  98. pylab.ylabel('$v_{max}/Mpa$')
  99. pylab.show()
  100. print('-----------------------------------------------------------------------')
  101. pylab.figure()
  102. pylab.plot(anaOfAxisLoad_1['Concrete_Strength'],anaOfAxisLoad_1['v_max'],'g^')
  103. pylab.title('Concrete Strength with aspectratio 1')
  104. pylab.xlabel('Concrete Strength/MPa')
  105. pylab.ylabel('$v_{max}/Mpa$')
  106. pylab.show()
  107. print('-----------------------------------------------------------------------')
  108. anaOfAxis_more2_1 = anaOfAxisLoad_2[
  109. (anaOfAxisLoad_2['Ratio_AxisLoad']>0) & (anaOfAxisLoad_2['Ratio_AxisLoad']<0.2)]
  110. z1,p1,xp1 = linearfit(anaOfAxis_more2_1['Concrete_Strength'],anaOfAxis_more2_1['v_max'],0,90)
  111. pylab.figure()
  112. pylab.plot(anaOfAxis_more2_1['Concrete_Strength'],anaOfAxis_more2_1['v_max'],'g^'
  113. ,xp1,p1(xp1),'k-')
  114. pylab.title('Concrete Strength with aspectratio 2 and axisload = 0-0.2')
  115. pylab.xlabel('Concrete Strength/MPa')
  116. pylab.ylabel('$v_{max}/Mpa$')
  117. pylab.show()
  118. print('The slope k= ' + str(z1[0]))
  119. drawHist2D(anaOfAxis_more2_1,'Ratio_Tr','Ratio_Lg')
  120. print('-----------------------------------------------------------------------')
  121. anaOfAxis_more2_2 = anaOfAxisLoad_2[
  122. (anaOfAxisLoad_2['Ratio_AxisLoad']>0.2) & (anaOfAxisLoad_2['Ratio_AxisLoad']<0.5)]
  123. z2 = np.polyfit(anaOfAxis_more2_2['Concrete_Strength'],anaOfAxis_more2_2['v_max'],1)
  124. p2 = np.poly1d(z2)
  125. xp2 = np.linspace(0,125,125)
  126. pylab.figure()
  127. pylab.plot(anaOfAxis_more2_2['Concrete_Strength'],anaOfAxis_more2_2['v_max'],'m.',
  128. label = 'experiment data')
  129. r2 = getR2(anaOfAxis_more2_2['Concrete_Strength'],anaOfAxis_more2_2['v_max'],p2)
  130. r2 = round(r2,2)
  131. pylab.plot(xp2,p2(xp2),'k-',label = 'linear fit with $R^2$ = ' + str(r2))
  132. pylab.title('Effect of $f_c^{\'}$ with $\phi_{asp}$ = 2 and $\psi_{axis}$ = 0.2-0.5')
  133. pylab.xlabel('$f_c^{\'}$/MPa')
  134. pylab.ylabel('$v_{max}$/MPa')
  135. pylab.legend()
  136. pylab.show()
  137. print('The slope k= ' + str(z2[0]))
  138. print('-----------------------------------------------------------------------')
  139. pylab.figure()
  140. pylab.plot(anaOfAxis_more2_2['Concrete_Strength'],anaOfAxis_more2_2['v_max/f_c'],'m.',
  141. label = 'experiment data')
  142. pylab.title('Effect of $f_c^{\'}$ with $\phi_{asp}$ = 2 and $\psi_{axis}$ = 0.2-0.5')
  143. pylab.xlabel('$f_c^{\'}$/MPa')
  144. pylab.ylabel('$v_{max}$/MPa')
  145. pylab.ylim(0,0.200)
  146. pylab.legend()
  147. pylab.show()
  148. print('The slope k= ' + str(z2[0]))
  149. print('-----------------------------------------------------------------------')

3.2 高深比的影响

文件名为:Ratio_Aspect_ana.py

  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Sun Apr 23 22:53:12 2017
  5. @author: Bruce
  6. """
  7. import datadigging
  8. import Concrete_Strength_ana
  9. import pylab
  10. import pandas as pd
  11. import numpy as np
  12. # 从datadigging文件导入数据
  13. anaofdata = datadigging.anaOfData.copy()
  14. # 定义字典,用于绘图的坐标显示
  15. dicOfKeys = {'Ratio_Aspect': r'$\phi_{asp}$','Ratio_AxisLoad':r'$\psi_{axis}$',
  16. 'Ratio_Lg':r'$\rho_l$','Ratio_Tr':r'$\rho_v$',
  17. 'Concrete_Strength':'$f_c^{\'}$','v_max/f_c':'$v$'}
  18. # 定义函数,绘制实验数据的分布图
  19. def drawHist(data,column,binst=None):
  20. """
  21. To draw a histogram of a column in data.
  22. data is a DataFrame, and column is an attribute in it.
  23. """
  24. pylab.figure()
  25. if binst == None:
  26. pylab.hist(data[column],color='k')
  27. else:
  28. pylab.hist(data[column],bins=binst,color='k')
  29. pylab.title('The distribution of ' + dicOfKeys[column])
  30. pylab.xlabel(dicOfKeys[column])
  31. pylab.ylabel("# of sample")
  32. pylab.show()
  33. # 定义函数,绘制二维的数据分布图,或者二维散点图
  34. def drawHist2D(data,column1,column2,isHist = True):
  35. """
  36. To draw a 2D histogram of two columns in data.
  37. data is a DataFrame, and column1, column2 are two attributes in it.
  38. isHist controls whether to draw a histogram or a scatter diagram
  39. """
  40. data = data[data[column1]!=np.nan]
  41. data = data[data[column2]!=np.nan]
  42. pylab.figure()
  43. if isHist:
  44. pylab.hist2d(data[column1],data[column2],bins = 5)
  45. pylab.colorbar()
  46. else:
  47. pylab.plot(data[column1],data[column2],'ko')
  48. pylab.title('The distribution of ' + column1 + ' and ' + column2)
  49. pylab.xlabel(column1)
  50. pylab.ylabel(column2)
  51. pylab.show()
  52. # 定义函数,用于控制变量,同时绘制图像
  53. def drawRelationship(data,column1,column2,control1,control2,val1_1,val1_2,val2_1,val2_2,
  54. style,mytitle,xlabel,ylabel,legend1,xmin,xmax,myvarified,varified = False):
  55. """
  56. data is a DataFrame
  57. To filter the data with the condtion val1_1 < control1 < val1_2 AND
  58. val2_1 < control2 < val2_2
  59. The x axis is column1 and y axis is column2
  60. style is used to set the style of the diagram
  61. ----------
  62. Return
  63. The a DataFrame after filtering, dataofDraw.
  64. ----------
  65. """
  66. dataOfDraw = data[(data[control1]>val1_1) & (data[control1]<val1_2) &
  67. (data[control2]>val2_1) & (data[control2]<val2_2)]
  68. if column1 == 'Ratio_Aspect':
  69. z1,p1,x1 = Concrete_Strength_ana.linearfit(1/dataOfDraw[column1],dataOfDraw[column2],xmin,xmax)
  70. r2 = Concrete_Strength_ana.getR2(1/dataOfDraw[column1],dataOfDraw[column2],p1)
  71. r2=round(r2,2)
  72. pylab.figure()
  73. pylab.plot(1/dataOfDraw[column1],dataOfDraw[column2],style,label = legend1)
  74. pylab.plot(x1,p1(x1),'k-',label="linear fit with $R^2$= " + str(r2))
  75. else:
  76. z1,p1,x1 = Concrete_Strength_ana.linearfit(dataOfDraw[column1],dataOfDraw[column2],xmin,xmax)
  77. r2 = Concrete_Strength_ana.getR2(dataOfDraw[column1],dataOfDraw[column2],p1)
  78. r2 = round(r2,2)
  79. pylab.figure()
  80. pylab.plot(dataOfDraw[column1],dataOfDraw[column2],style,label = legend1)
  81. pylab.plot(x1,p1(x1),'k-',label="linear fit with $R^2$= " + str(r2))
  82. if varified:
  83. pylab.plot(1/myvarified[column1],myvarified[column2],'rx',label='varified data')
  84. pylab.title(mytitle)
  85. pylab.xlabel(xlabel)
  86. pylab.ylabel(ylabel)
  87. pylab.xlim(xmin-0.005,xmax+0.005)
  88. pylab.ylim(0,0.200)
  89. pylab.legend()
  90. pylab.show()
  91. print("k = " + str(z1[0]))
  92. print("b = " + str(z1[1]))
  93. return dataOfDraw
  94. if __name__ == "__main__":
  95. ##################
  96. # 控制变量,进行绘图#
  97. ##################
  98. anaOfAxisLoad_2 = datadigging.anaOfData[datadigging.anaOfData['Ratio_Aspect'] == 2]
  99. drawHist2D(anaofdata,'Ratio_AxisLoad','Ratio_Aspect',False)
  100. drawHist2D(anaofdata,'Ratio_Lg','Ratio_Tr')
  101. drawHist(anaofdata,'Ratio_Lg')
  102. anaofAspect1 = anaofdata[(anaofdata['Ratio_AxisLoad']<0.9) & (anaofdata['Ratio_AxisLoad']>0.5)]
  103. myres1 = drawRelationship(anaofdata,'Ratio_Aspect','v_max/f_c',
  104. 'Ratio_AxisLoad','Ratio_Lg',
  105. 0.1,0.2,0.30,0.40,'b.',
  106. r'Effect of $\phi_{asp}$ with $\psi_{axis}$=0.1-0.2 and $\rho_l$=0.3-0.4',
  107. r'1/$\phi_{asp}$','$v$',
  108. 'experiment data')
  109. myres2 = drawRelationship(anaofdata,'Ratio_Aspect','v_max/f_c',
  110. 'Ratio_AxisLoad','Ratio_Lg',
  111. 0.3,1.0,0.30,0.40,'b.',
  112. r'Effect of $\phi_{asp}$ with $\psi_{axis}$=0.3-1.0 and $\rho_l$=0.30-0.40',
  113. r'1/$\phi_{asp}$','$v$',
  114. 'experiment data')
  115. myres3 = drawRelationship(anaofAspect1,'Ratio_Aspect','v_max/f_c',
  116. 'Ratio_Tr','Ratio_Lg',
  117. 0.00,1.0,0.0,1.5,'b.',
  118. r'Effect of $\phi_{asp}$ with $\psi_{axis}$=0.5-0.8 and $\rho_v$=0.10-0.20',
  119. r'1/$\phi_{asp}$','$v$',
  120. 'experiment data',xmin=0.22,xmax=1.00)
  121. drawHist(myres3,'Ratio_AxisLoad')
  122. myvarified = drawRelationship(myres3,'Ratio_Aspect','v_max/f_c','Ratio_Tr',
  123. 'Ratio_AxisLoad',
  124. 0.06,0.08,0.5,1.0,'b.',
  125. r'Effect of $\phi_{asp}$ with $\rho_v$=0.06-0.08 and $\rho_l$=0.3-0.4 and $\psi_{axis}=0-0.2$',
  126. r'1/$\phi_{asp}$','$v$',
  127. 'experiment data',xmin=0.25,xmax=1.00)
  128. myres4 = drawRelationship(myres3,'Ratio_Aspect','v_max/f_c','Ratio_Tr',
  129. 'Ratio_AxisLoad',
  130. 0.06,0.08,0.0,0.3,'b.',
  131. r'Effect of $\phi_{asp}$ with $\rho_v$=0.06-0.08 and $\rho_l$=0.3-0.4 and $\psi_{axis}=0-0.2$',
  132. r'1/$\phi_{asp}$','$v$',
  133. 'experiment data',xmin=0.25,xmax=1.00,varified=True)

3.3 轴压比、配筋率的影响

运用先前在Ratio_Aspect_ana.py文件中定义的几个函数drawHist,drawHist2D,drawRelationship,就可以进行数据分组、绘图工作。只需要简单改变输入参数即可。

3.4 机器学习算法

文件名为:machine_learning.py

  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Tue Apr 25 10:22:01 2017
  5. @author: Bruce
  6. """
  7. """
  8. 1.Some basic ideals
  9. a, Hold-out Methon. Divide my data set(D) into two seperate subsets, e.g. 200(S)+100(T)
  10. Conventionly, we use 2/3 - 4/5 to train the LEARNING MACHINE
  11. b, how to apply stratified sampling with respect to my data set? It is continous
  12. distributed.
  13. maybe simply the random.sample(population, k) method to do it(without replacement)?
  14. maybe it is to arbitrary considering the random distribution of originial data set.
  15. c, We need to repeat the random sampling process for many times, and then take
  16. the average value of the output as well as its deviation.
  17. d, consider signal-to-noise ratio(SNR)--feature extraction?
  18. """
  19. #from sklearn import linear_model
  20. from sklearn import neural_network
  21. from sklearn.preprocessing import StandardScaler
  22. from sklearn.externals import joblib
  23. import datadigging
  24. import numpy as np
  25. import pandas as pd
  26. import pylab
  27. # 从anaOfData中丢去不需要的列,得到机器学习算法需要的数据表data_machine
  28. dicOfKeys = {'Ratio_Aspect': r'$\phi_{asp}$','Ratio_AxisLoad':r'$\psi_{axis}$',
  29. 'Ratio_Lg':r'$\rho_l$','Ratio_Tr':r'$\rho_v$'}
  30. data_machine = datadigging.anaOfData.drop(['Area','Axi_load',
  31. 'Concrete_Strength','Length',
  32. 'Lg_yStre','Test_Name','Tr_ySre','V_max'
  33. ,'v_max'],axis=1)
  34. data_machine = data_machine.dropna()
  35. # 定义函数,用来绘制一张图,试验数据和模型预测的对比。
  36. def plotOne(totalRes,Model):
  37. """
  38. totalRes is the total result of the experiments
  39. Model is the mathematic model to be evalutated
  40. """
  41. myX,myy = getXy(totalRes,'v_max/f_c')
  42. for col in ["Ratio_Aspect","Ratio_AxisLoad",
  43. "Ratio_Tr","Ratio_Lg"]:
  44. # plot the actual and predicted test group
  45. pylab.figure()
  46. # myX = scaler.inverse_transform(myX)
  47. pylab.plot(myX[col],Model.predict(myX)/myy,'k.',
  48. label="experiment data")
  49. # pylab.plot(myX[col],Model.predict(myX),'b^',
  50. # label="model prediction")
  51. pylab.title("comparison between model prediction and experiment data ")
  52. pylab.xlabel(dicOfKeys[col])
  53. pylab.ylabel("v")
  54. pylab.legend()
  55. pylab.show()
  56. # plot the hist for difference between actual and predicted value
  57. print("This is the difference distribution ")
  58. pylab.figure()
  59. pylab.hist(100 * (Model.predict(myX)-myy) / myy,15,color='k')
  60. pylab.title("$(v_{predicted} - v_{experiment})/v_{expermiment}$ distribution")
  61. pylab.xlabel("differences/%")
  62. pylab.ylabel("number")
  63. pylab.show()
  64. # 另一个绘图函数,临时定义。
  65. def plotOneMy(totalRes,Model,col,lengendlist):
  66. pylab.figure()
  67. for mydata,leg in zip(totalRes,lengendlist):
  68. pylab.plot(mydata[col],Model.predict(mydata),
  69. label=leg)
  70. pylab.title("Effect of " + col)
  71. pylab.xlabel("$\phi_{asp}$")
  72. pylab.xlim((0.8,4.2))
  73. pylab.ylim(0,0.20)
  74. pylab.ylabel("v_mac/f_c")
  75. pylab.legend()
  76. pylab.show()
  77. # 数据采样函数
  78. def sampleOfData(rawdata,num):
  79. """
  80. rawdata is the total results of the experiment, a DataFrame
  81. num is an integer, samping number.
  82. Break the rawdata into two parts of size, (num) and (total - num).
  83. --------
  84. Return a tuple
  85. --------
  86. """
  87. temp = rawdata.reset_index()
  88. temp = temp.drop('ind',axis = 1)
  89. myTrain = temp.sample(num)
  90. myTest = temp.drop(myTrain.index)
  91. return (myTrain,myTest)
  92. # 把数据表分解为X和y,一个是feature一个是label
  93. def getXy(data,label):
  94. feature = data.drop(label, axis = 1)
  95. label = data[label]
  96. return (feature,label)
  97. # 一次拟合试验
  98. def OneTrial(rawdata,numOfSample,predModel):
  99. """
  100. Run one trial
  101. rawdata is the total result, numOfSample is the sample number
  102. and preModel is the machine learning model we use, say, MLPResgressor.
  103. ------
  104. Return the R2 of the model on testdata, the resultant regression model
  105. and the corresponding test dataset.
  106. ------
  107. """
  108. mytrain,mytest = sampleOfData(rawdata,numOfSample)
  109. mytrainX,mytrainy = getXy(mytrain,'v_max/f_c')
  110. mytestX,mytesty = getXy(mytest,'v_max/f_c')
  111. model = predModel(activation='logistic',solver='lbfgs',hidden_layer_sizes=(2,))
  112. # scaler = StandardScaler()
  113. # scaler.fit(mytrainX)
  114. # mytrainX = scaler.transform(mytrainX)
  115. model.fit(mytrainX,mytrainy)
  116. # mytestX = scaler.transform(mytestX)
  117. mypre = model.predict(mytestX)
  118. R2= model.score(mytestX,mytesty)
  119. return (R2,mytestX,mytesty,mypre,model)
  120. # 一次模拟过程
  121. def oneSimulation(rawdata,numOfSample,predModel,numOfTrial,res=[],totalRes=[]):
  122. """
  123. Run on simulation
  124. rawdata is the total result, numOfSample is the sample number
  125. and preModel is the machine learning model we use, say, MLPResgressor.
  126. numOfTrial is the trial number, res and totalRes are lists to save result
  127. Result is a tuple generating in OneTrial
  128. ------
  129. Return the average R^2
  130. ------
  131. """
  132. print("Running total " + str(numOfTrial) + " trials using " +
  133. str(predModel.__name__) + " algorithm")
  134. for i in range(numOfTrial):
  135. resOfTrial = OneTrial(rawdata,numOfSample,predModel)
  136. res.append(resOfTrial[0])
  137. totalRes.append(resOfTrial)
  138. return sum(res)/len(res)
  139. # Run the simulation and then draw the figure.
  140. def plotFigure():
  141. """
  142. --------
  143. Return the best model in all trials
  144. --------
  145. """
  146. # 定义函数,用来绘图
  147. def plotOne(totalRes,Model):
  148. myX,myy = getXy(totalRes,'v_max/f_c')
  149. for col in ["Ratio_Aspect","Ratio_AxisLoad",
  150. "Ratio_Tr","Ratio_Lg"]:
  151. # plot the actual and predicted test group
  152. pylab.figure()
  153. pylab.plot(myX[col],myy,'k.',
  154. label="experiment data")
  155. pylab.plot(myX[col],Model.predict(myX),'b^',
  156. label="model prediction")
  157. pylab.title("Effect of " + col)
  158. pylab.xlabel(col)
  159. pylab.ylabel("v_mac/f_c")
  160. pylab.legend()
  161. pylab.show()
  162. # plot the hist for difference between actual and predicted value
  163. print("This is the difference distribution ")
  164. pylab.figure()
  165. pylab.hist(100 * (Model.predict(myX)-myy) / myy,15)
  166. pylab.title("predicted value - actual value distribution")
  167. pylab.xlabel("differences/%")
  168. pylab.ylabel("number")
  169. pylab.show()
  170. # 此函数用来返回偏差最小的模型的index
  171. def smallestIndex(mylist):
  172. temp = mylist.copy()
  173. res = -100
  174. for el in mylist:
  175. if el > res:
  176. res = el
  177. return temp.index(res)
  178. # 开始进行回归模拟过程
  179. res2 = []
  180. totalRes2 = []
  181. aveR2_2 = oneSimulation(data_machine,280,neural_network.MLPRegressor,
  182. 800,res2,totalRes2)
  183. # 得到最佳模型的index
  184. plotindex2 = smallestIndex(res2)
  185. resModel = totalRes2[plotindex2][4]
  186. # 打印信息
  187. print("average $R^2$ of neural_network.MLPRegressor is " +
  188. str(aveR2_2))
  189. print("smallest $R^2$ of neural_network.MLPRegressor is "+
  190. str(totalRes2[plotindex2][0]))
  191. # 绘图
  192. plotOne(data_machine,resModel)
  193. print("------------------------------------------------------------" + '\n')
  194. # 返回结果
  195. return resModel
  196. ## 多次拟合选最佳的模型,并保存到本地
  197. #myresmodel = plotFigure()
  198. # 读取本地保存的最佳模型
  199. myplotModel = joblib.load('model86_1l2u.model')
  200. # 绘图
  201. plotOne(data_machine,myplotModel)
  202. if __name__ == "__main__":
  203. # 这一段是用来debug的代码
  204. mymodel = joblib.load('model85.model')
  205. def anaofRatioAspect():
  206. axisload = np.average(data_machine['Ratio_AxisLoad'])
  207. ratiolg = np.average(data_machine['Ratio_Lg'])
  208. ratiotr = np.average(data_machine['Ratio_Tr'])
  209. datalist = []
  210. for axisl in [axisload-0.2,axisload,axisload+0.2,axisload+0.4,axisload+0.6]:
  211. data_generate = []
  212. asp = 1
  213. while asp <=10:
  214. data_generate.append([asp,axisl,ratiolg,ratiotr])
  215. asp += 0.01
  216. data_generate=pd.DataFrame(data_generate)
  217. data_generate.columns=['Ratio_Aspect','Ratio_AxisLoad','Ratio_Lg','Ratio_Tr']
  218. datalist.append(data_generate)
  219. return datalist
  220. myplot1 = anaofRatioAspect()
  221. plotOneMy(myplot1,mymodel,'Ratio_Aspect',['$\psi_{axis}$ = 0','$\psi_{axis}$ = 0.2',
  222. '$\psi_{axis}$ = 0.4','$\psi_{axis}$ = 0.6','$\psi_{axis}$ = 0.8'])
添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注