MLP实现波士顿房屋价格回归任务
1. 数据集
波士顿房屋价格.csv文件,文件中的数据有可能不完整,部分数据如下:
CRIM, ZN ,INDUS ,CHAS,NOX,RM,AGE,DIS,RAD,TAX,PTRATIO,LSTAT,MEDV
0.00632,18,2.31,0,0.538,6.575,65.2,4.09,1,296,15.3,4.98,24
0.02731,0,7.07,0,0.469,6.421,78.9,4.9671,2,242,17.8,9.14,21.6
0.02729,0,7.07,0,0.469,7.185,61.1,4.9671,2,242,17.8,4.03,34.7
0.03237,0,2.18,0,0.458,6.998,45.8,6.0622,3,222,18.7,2.94,33.4
0.06905,0,2.18,0,0.458,7.147,54.2,6.0622,3,222,18.7,5.33,36.2
0.02985,0,2.18,0,0.458,6.43,58.7,6.0622,3,222,18.7,5.21,28.7
0.08829,12.5,7.87,0,0.524,6.012,66.6,5.5605,5,311,15.2,12.43,22.9
0.14455,12.5,7.87,0,0.524,6.172,96.1,5.9505,5,311,15.2,19.15,27.1
0.21124,12.5,7.87,0,0.524,5.631,100,6.0821,5,311,15.2,29.93,16.5
0.17004,12.5,7.87,0,0.524,6.004,85.9,6.5921,5,311,15.2,17.1,18.9
0.22489,12.5,7.87,0,0.524,6.377,94.3,6.3467,5,311,15.2,20.45,15
0.11747,12.5,7.87,0,0.524,6.009,82.9,6.2267,5,311,15.2,13.27,18.9
0.09378,12.5,7.87,0,0.524,5.889,39,5.4509,5,311,15.2,15.71,21.7
...
2. 代码
import numpy as np class MlpRegressor:
def __init__(self, input_size, hidden_size1, hidden_size2, output_size, learning_rate=0.000001):
self.input_size = input_size
self.hidden_size1 = hidden_size1
self.hidden_size2 = hidden_size2
self.output_size = output_size
self.learning_rate = learning_rate self.W1 = np.random.randn(input_size, hidden_size1) * 0.01
self.b1 = np.zeros((1, hidden_size1))
self.W2 = np.random.randn(hidden_size1, hidden_size2) * 0.01
self.b2 = np.zeros((1, hidden_size2))
self.W3 = np.random.randn(hidden_size2, output_size) * 0.01
self.b3 = np.zeros((1, output_size)) def relu(self, x):
return np.maximum(x, 0) def relu_derivative(self, x):
return np.where(x > 0, 1, 0) def forward(self, X):
self.Z1 = np.dot(X, self.W1) + self.b1
self.A1 = self.relu(self.Z1)
self.Z2 = np.dot(self.A1, self.W2) + self.b2
self.A2 = self.relu(self.Z2)
self.Z3 = np.dot(self.A2, self.W3) + self.b3
self.A3 = self.Z3
return self.A3 def backward(self, X, y):
m = X.shape[0]
dA3 = self.A3 - y
dZ3 = dA3 * 1
dW3 = np.dot(self.A2.T, dZ3) / m
db3 = np.sum(dZ3, axis=0, keepdims=True) / m
dA2 = np.dot(dZ3, self.W3.T)
dZ2 = dA2 * self.relu_derivative(self.Z2)
dW2 = np.dot(self.A1.T, dZ2) / m
db2 = np.sum(dZ2, axis=0, keepdims=True) / m
dA1 = np.dot(dZ2, self.W2.T)
dZ1 = dA1 * self.relu_derivative(self.Z1)
dW1 = np.dot(X.T, dZ1) / m
db1 = np.sum(dZ1, axis=0, keepdims=True) / m # Update weights and biases
self.W3 -= self.learning_rate * dW3
self.b3 -= self.learning_rate * db3
self.W2 -= self.learning_rate * dW2
self.b2 -= self.learning_rate * db2
self.W1 -= self.learning_rate * dW1
self.b1 -= self.learning_rate * db1 def train(self, X, y, epochs=100000, batch_size=64):
m = X.shape[0]
for epoch in range(epochs):
for i in range(0, m, batch_size):
X_batch = X[i:i+batch_size]
y_batch = y[i:i+batch_size] # Forward propagation
y_pred = self.forward(X_batch) # Backward propagation
self.backward(X_batch, y_batch) if (epoch+1) % 1000 == 0:
loss = np.mean((y - self.forward(X)) ** 2)
print(f'Epoch {epoch+1}/{epochs}, Loss: {loss}') def predict(self, X):
return self.forward(X) if __name__ == '__main__':
import pandas as pd
from sklearn.model_selection import train_test_split # 从CSV文件加载数据
data = pd.read_csv('C:\\Users\\zhang\\Desktop\\AI 框架\\作业\\第一次作业\\boston.csv') # 提取特征和目标变量
X = data.drop('MEDV', axis=1) # 特征
y = data['MEDV'] # 目标变量 # 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_train = X_train.values.reshape(-1, 12)
y_train = y_train.values.reshape(-1, 1)
X_test = X_test.values.reshape(-1, 12)
y_test = y_test.values.reshape(-1, 1)
# 定义神经网络的参数
input_size = X_train.shape[1] # 输入层大小
hidden_size1 = 64 # 隐藏层大小
hidden_size2 = 32 # 隐藏层大小
output_size = 1 # 输出层大小 regressor_model = MlpRegressor(input_size, hidden_size1, hidden_size2, output_size)
regressor_model.train(X_train, y_train) y_pred = regressor_model.predict(X_test)
print(y_pred)
3. 运行结果
------------------------------- Training -------------------------------
Epoch 1000/100000, Loss: 215.62193437067899
Epoch 2000/100000, Loss: 192.23747827864736
Epoch 3000/100000, Loss: 126.98647196839126
Epoch 4000/100000, Loss: 104.56790587428584
Epoch 5000/100000, Loss: 93.55895944555557
Epoch 6000/100000, Loss: 81.83677659220027
Epoch 7000/100000, Loss: 69.98101706862538
Epoch 8000/100000, Loss: 60.4438306323673
Epoch 9000/100000, Loss: 54.275051269137215
Epoch 10000/100000, Loss: 50.144528058278446
Epoch 11000/100000, Loss: 46.71168138687022
Epoch 12000/100000, Loss: 44.091385588842066
Epoch 13000/100000, Loss: 41.98703195048573
Epoch 14000/100000, Loss: 40.32358336326167
Epoch 15000/100000, Loss: 38.920320023943574
Epoch 16000/100000, Loss: 37.66334430017403
Epoch 17000/100000, Loss: 36.57507989257117
Epoch 18000/100000, Loss: 35.6136438473766
Epoch 19000/100000, Loss: 34.7649982576941
Epoch 20000/100000, Loss: 33.94940895608493
Epoch 21000/100000, Loss: 33.16146768674789
Epoch 22000/100000, Loss: 32.395166297024666
Epoch 23000/100000, Loss: 31.600705371799602
Epoch 24000/100000, Loss: 30.804917947962632
Epoch 25000/100000, Loss: 29.974635196398847
Epoch 26000/100000, Loss: 29.20624305663592
Epoch 27000/100000, Loss: 28.35745827864321
Epoch 28000/100000, Loss: 27.602636238259457
Epoch 29000/100000, Loss: 26.81650819310092
Epoch 30000/100000, Loss: 26.002914608193542
Epoch 31000/100000, Loss: 25.325440117147213
Epoch 32000/100000, Loss: 24.65334725693612
Epoch 33000/100000, Loss: 24.060109198505522
Epoch 34000/100000, Loss: 23.54033990229176
Epoch 35000/100000, Loss: 23.10805183197649
Epoch 36000/100000, Loss: 22.760178305933966
Epoch 37000/100000, Loss: 22.43516727905778
Epoch 38000/100000, Loss: 22.107765200437264
Epoch 39000/100000, Loss: 21.965537775136905
Epoch 40000/100000, Loss: 21.989661009199523
Epoch 41000/100000, Loss: 21.62034576184785
Epoch 42000/100000, Loss: 21.572752403139138
Epoch 43000/100000, Loss: 21.34211337200876
Epoch 44000/100000, Loss: 21.0702495450661
Epoch 45000/100000, Loss: 20.837503486889897
Epoch 46000/100000, Loss: 20.681326474362805
Epoch 47000/100000, Loss: 20.503454394563672
Epoch 48000/100000, Loss: 20.389419746474474
Epoch 49000/100000, Loss: 20.074870271025098
Epoch 50000/100000, Loss: 19.98878160482701
Epoch 51000/100000, Loss: 19.762006774714624
Epoch 52000/100000, Loss: 19.73720805461732
Epoch 53000/100000, Loss: 19.840507926145058
Epoch 54000/100000, Loss: 19.586065516878563
Epoch 55000/100000, Loss: 19.26826647737148
Epoch 56000/100000, Loss: 19.186796811752668
Epoch 57000/100000, Loss: 19.128833329447612
Epoch 58000/100000, Loss: 18.86699431502371
Epoch 59000/100000, Loss: 18.991072309691766
Epoch 60000/100000, Loss: 19.037016453401602
Epoch 61000/100000, Loss: 18.865622588128197
Epoch 62000/100000, Loss: 18.872795070321768
Epoch 63000/100000, Loss: 18.872594451190064
Epoch 64000/100000, Loss: 18.854228191893057
Epoch 65000/100000, Loss: 18.67904692926805
Epoch 66000/100000, Loss: 18.768560510204782
Epoch 67000/100000, Loss: 18.7118185353233
Epoch 68000/100000, Loss: 18.55438967997513
Epoch 69000/100000, Loss: 18.621562397315216
Epoch 70000/100000, Loss: 18.405648834715997
Epoch 71000/100000, Loss: 18.189924349964524
Epoch 72000/100000, Loss: 18.353894145904075
Epoch 73000/100000, Loss: 18.45440674353988
Epoch 74000/100000, Loss: 18.39953074149147
Epoch 75000/100000, Loss: 18.364700160941528
Epoch 76000/100000, Loss: 18.186265195636466
Epoch 77000/100000, Loss: 18.302174526166176
Epoch 78000/100000, Loss: 18.205052422317795
Epoch 79000/100000, Loss: 18.037575818441386
Epoch 80000/100000, Loss: 18.01479027887508
Epoch 81000/100000, Loss: 17.96447524097066
Epoch 82000/100000, Loss: 17.895826329884876
Epoch 83000/100000, Loss: 17.7832487773441
Epoch 84000/100000, Loss: 17.86057435108409
Epoch 85000/100000, Loss: 17.703615956724253
Epoch 86000/100000, Loss: 17.68351796915479
Epoch 87000/100000, Loss: 17.633931736731242
Epoch 88000/100000, Loss: 17.612497052225557
Epoch 89000/100000, Loss: 17.647989798918914
Epoch 90000/100000, Loss: 17.710895613739616
Epoch 91000/100000, Loss: 17.598476799927635
Epoch 92000/100000, Loss: 17.56779767441564
Epoch 93000/100000, Loss: 17.668065621304482
Epoch 94000/100000, Loss: 17.48657393624495
Epoch 95000/100000, Loss: 17.609330714804045
Epoch 96000/100000, Loss: 17.576140983650948
Epoch 97000/100000, Loss: 17.568941202807263
Epoch 98000/100000, Loss: 17.52902584563828
Epoch 99000/100000, Loss: 17.383449295966283
Epoch 100000/100000, Loss: 17.320131803597924 ----------------------------- Prediction -----------------------------
[[26.40028594]
[35.95670906]
[21.0375181 ]
[26.81075528]
[19.27303297]
[19.24176243]
[18.02909242]
[18.33487919]
[24.73263076]
[18.90793689]
[19.12267473]
[17.93149187]
[ 6.61505735]
[20.76470599]
[18.72246741]
[30.04006402]
[20.53259631]
[10.52485402]
[45.73780961]
[18.29906554]
[25.11074838]
[26.06149324]
[17.07979461]
[22.89127798]
[20.37305481]
[15.75135761]
[22.60178594]
[18.58907094]
[18.73504267]
[18.63945006]
[19.18184467]
[24.63804329]
[30.09810365]
[28.96464891]
[15.40000902]
[19.41514728]
[32.46295104]
[22.51692248]
[21.41680795]
[26.37525141]
[17.23718715]
[34.40460321]
[49.03923572]
[20.31266774]
[25.74679912]
[18.24712181]
[17.16189391]
[27.57055882]
[19.08726561]
[31.30209539]
[18.61876664]
[33.61725349]
[18.83749261]
[28.45453076]
[43.2806737 ]
[28.16084359]
[20.96559786]
[36.19731044]
[23.90688183]
[17.84116438]
[25.34401538]
[32.92448779]
[35.99460035]
[21.36938789]
[24.68693002]
[16.16543793]
[20.70512879]
[25.19973729]
[32.02685795]
[16.72595575]
[22.26460783]
[34.84019666]
[12.66375726]
[23.07451255]
[22.26649686]
[ 8.78072356]
[20.4205716 ]
[46.24483168]
[15.96989209]
[15.28259261]
[21.4773465 ]
[13.02810024]
[20.03181817]
[11.16139533]
[19.83595495]
[29.22583834]
[21.09276755]
[26.42585934]
[25.46828173]
[20.37952764]
[22.45176222]
[ 9.03221341]
[21.40233138]
[19.99845823]
[25.57073065]
[22.63883292]
[36.03449262]
[ 9.66554266]
[17.70571079]
[16.65375084]
[22.44931029]
[23.52876957]]
MLP实现波士顿房屋价格回归任务的更多相关文章
- 爬虫笔记之自如房屋价格图片识别(价格字段css背景图片偏移显示)
一.前言 自如房屋详情页的价格字段用图片显示,特此破解一下以丰富一下爬虫笔记系列博文集. 二.分析 & 实现 先打开一个房屋详情页观察一下: 网页的源代码中没有直接显示价格字段,价格的显示是使 ...
- Spark学习笔记——房屋价格预测
先翻译了一下给的房屋数据的特征,这里定义了一个case class,方便理解每个特征的含义, Kaggle的房价数据集使用的是Ames Housing dataset,是美国爱荷华州的艾姆斯镇2006 ...
- Python_sklearn机器学习库学习笔记(一)_一元回归
一.引入相关库 %matplotlib inline import matplotlib.pyplot as plt from matplotlib.font_manager import FontP ...
- 机器学习04-(决策树、集合算法:AdaBoost模型、BBDT、随机森林、分类模型:逻辑回归)
机器学习04 机器学习-04 集合算法 AdaBoost模型(正向激励) 特征重要性 GBDT 自助聚合 随机森林 分类模型 什么问题属于分类问题? 逻辑回归 代码总结 波士顿房屋价格数据分析与房价预 ...
- 机器学习02-(损失函数loss、梯度下降、线性回归、评估训练、模型加载、岭回归、多项式回归)
机器学习-02 回归模型 线性回归 评估训练结果误差(metrics) 模型的保存和加载 岭回归 多项式回归 代码总结 线性回归 绘制图像,观察w0.w1.loss的变化过程 以等高线的方式绘制梯度下 ...
- 用 WEKA 进行数据挖掘,第 1 部分: 简介和回归(转)
http://www.ibm.com/developerworks/cn/opensource/os-weka1/index.html 简介 什么是 数据挖掘?您会不时地问自己这个问题,因为这个主题越 ...
- Weka回归
第一个数据挖掘技术:回归 例子:给房子定价 房子的价格(因变量)是很多自变量 — 房子的面积.占地的大小.厨房是否有花岗石以及卫生间是否刚重装过等的结果.所以,不管是购买过一个房子还是销售过一个房子, ...
- 用 WEKA 进行数据挖掘——第二章: 回归
回归 回归是最为简单易用的一种技术,但可能也是最不强大(这二者总是相伴而来,很有趣吧).此模型可以简单到只有一个输入变量和一个输出变量(在 Excel 中称为 Scatter 图形,或 OpenOff ...
- [译]使用scikit-learn进行机器学习(scikit-learn教程1)
原文地址:http://scikit-learn.org/stable/tutorial/basic/tutorial.html 翻译:Tacey Wong 概要: 该章节,我们将介绍贯穿scikit ...
- 我的Keras使用总结(1)——Keras概述与常见问题整理
今天整理了自己所写的关于Keras的博客,有没发布的,有发布的,但是整体来说是有点乱的.上周有空,认真看了一周Keras的中文文档,稍有心得,整理于此.这里附上Keras官网地址: Keras英文文档 ...
随机推荐
- ArkUI框架,更懂程序员的UI信息语法
原文:https://mp.weixin.qq.com/s/LQA6AYiG8O_AeGE1PZwxZg,点击链接查看更多技术内容. ArkUI框架简化代码的"秘密" 在传统 ...
- Vue3开源组件库
最近收到的很多问题都是关于Vue3组件库的问题 今天就给大家推荐几个基于Vue3重构的开源组件库 目前状态都处于Beta阶段,建议大家抱着学习的心态入场,勿急于用到生产环境 Ant-design-vu ...
- 深度解读《深度探索C++对象模型》之数据成员的存取效率分析(一)
接下来我将持续更新"深度解读<深度探索C++对象模型>"系列,敬请期待,欢迎关注!也可以关注公众号:iShare爱分享,自动获得推文和全部的文章列表. 在<深度解 ...
- 力扣238(java)-除自身以外数组的乘积(中等)
题目: 给你一个整数数组 nums,返回 数组 answer ,其中 answer[i] 等于 nums 中除 nums[i] 之外其余各元素的乘积 . 题目数据 保证 数组 nums之中任意元素的全 ...
- 力扣523(java&python)-连续的子数组和(中等)
题目: 给你一个整数数组 nums 和一个整数 k ,编写一个函数来判断该数组是否含有同时满足下述条件的连续子数组: 子数组大小 至少为 2 ,且子数组元素总和为 k 的倍数.如果存在,返回 true ...
- Apsara Stack 技术百科 | 可运营的行业云,让云上资源跑起来
简介:企业级云管理平台,如何打造千人千面的个性化体验,从应用.云资源.硬件等进行全局智能优化,实现资源配置的最佳配比,构建精细化运营能力? 距离第一例新冠疫情病例的发现,不知不觉中已经过去两年, ...
- Serverless Devs 2.0 开箱测评:Serverless 开发最佳实践
简介: 当下,Serverless 概念很火,很多同学被 Serverless 的优势吸引过来,比如它的弹性伸缩,免运维,高可用,资费少.但真正使用起来去落地的时候发现问题很多,大型项目如何组织函数 ...
- [GPT] nodejs 什么情况下可以使用 import 来引入 export 的模块
在 Node.js 中,原生并不支持 ES6 的 import 语句来引入模块. 不过从 Node.js v12 开始,通过实验性功能(--experimental-modules)可以使用 .mjs ...
- [FAQ] PHP+Nginx 架构网站 502 和 504 问题
502 Bad Gateway:作为网关或者代理工作的服务器尝试执行请求时,从上游服务器接收到无效的响应.504 Gateway Time-out:作为网关或者代理工作的服务器尝试执行请求时,未能及时 ...
- [FAQ] uni-app 导航路由切换时如何强制刷新页面?
使用 this.$forceUpdate() 强制刷新页面. Refer:uni-app自定义导航 Link:https://www.cnblogs.com/farwish/p/13870801.ht ...