径向基(RBF)神经网络python实现
from numpy import array, append, vstack, transpose, reshape, \
dot, true_divide, mean, exp, sqrt, log, \
loadtxt, savetxt, zeros, frombuffer
from numpy.linalg import norm, lstsq
from multiprocessing import Process, Array
from random import sample
from time import time
from sys import stdout
from ctypes import c_double
from h5py import File def metrics(a, b):
return norm(a - b) def gaussian (x, mu, sigma):
return exp(- metrics(mu, x)**2 / (2 * sigma**2)) def multiQuadric (x, mu, sigma):
return pow(metrics(mu,x)**2 + sigma**2, 0.5) def invMultiQuadric (x, mu, sigma):
return pow(metrics(mu,x)**2 + sigma**2, -0.5) def plateSpine (x,mu):
r = metrics(mu,x)
return (r**2) * log(r) class Rbf:
def __init__(self, prefix = 'rbf', workers = 4, extra_neurons = 0, from_files = None):
self.prefix = prefix
self.workers = workers
self.extra_neurons = extra_neurons # Import partial model
if from_files is not None:
w_handle = self.w_handle = File(from_files['w'], 'r')
mu_handle = self.mu_handle = File(from_files['mu'], 'r')
sigma_handle = self.sigma_handle = File(from_files['sigma'], 'r') self.w = w_handle['w']
self.mu = mu_handle['mu']
self.sigmas = sigma_handle['sigmas'] self.neurons = self.sigmas.shape[0] def _calculate_error(self, y):
self.error = mean(abs(self.os - y))
self.relative_error = true_divide(self.error, mean(y)) def _generate_mu(self, x):
n = self.n
extra_neurons = self.extra_neurons # TODO: Make reusable
mu_clusters = loadtxt('clusters100.txt', delimiter='\t') mu_indices = sample(range(n), extra_neurons)
mu_new = x[mu_indices, :]
mu = vstack((mu_clusters, mu_new)) return mu def _calculate_sigmas(self):
neurons = self.neurons
mu = self.mu sigmas = zeros((neurons, ))
for i in xrange(neurons):
dists = [0 for _ in xrange(neurons)]
for j in xrange(neurons):
if i != j:
dists[j] = metrics(mu[i], mu[j])
sigmas[i] = mean(dists)* 2
# max(dists) / sqrt(neurons * 2))
return sigmas def _calculate_phi(self, x):
C = self.workers
neurons = self.neurons
mu = self.mu
sigmas = self.sigmas
phi = self.phi = None
n = self.n def heavy_lifting(c, phi):
s = jobs[c][1] - jobs[c][0]
for k, i in enumerate(xrange(jobs[c][0], jobs[c][1])):
for j in xrange(neurons):
# phi[i, j] = metrics(x[i,:], mu[j])**3)
# phi[i, j] = plateSpine(x[i,:], mu[j]))
# phi[i, j] = invMultiQuadric(x[i,:], mu[j], sigmas[j]))
phi[i, j] = multiQuadric(x[i,:], mu[j], sigmas[j])
# phi[i, j] = gaussian(x[i,:], mu[j], sigmas[j]))
if k % 1000 == 0:
percent = true_divide(k, s)*100
print(c, ': {:2.2f}%'.format(percent))
print(c, ': Done') # distributing the work between 4 workers
shared_array = Array(c_double, n * neurons)
phi = frombuffer(shared_array.get_obj())
phi = phi.reshape((n, neurons)) jobs = []
workers = [] p = n / C
m = n % C
for c in range(C):
jobs.append((c*p, (c+1)*p + (m if c == C-1 else 0)))
worker = Process(target = heavy_lifting, args = (c, phi))
workers.append(worker)
worker.start() for worker in workers:
worker.join() return phi def _do_algebra(self, y):
phi = self.phi w = lstsq(phi, y)[0]
os = dot(w, transpose(phi))
return w, os
# Saving to HDF5
os_h5 = os_handle.create_dataset('os', data = os) def train(self, x, y):
self.n = x.shape[0] ## Initialize HDF5 caches
prefix = self.prefix
postfix = str(self.n) + '-' + str(self.extra_neurons) + '.hdf5'
name_template = prefix + '-{}-' + postfix
phi_handle = self.phi_handle = File(name_template.format('phi'), 'w')
os_handle = self.w_handle = File(name_template.format('os'), 'w')
w_handle = self.w_handle = File(name_template.format('w'), 'w')
mu_handle = self.mu_handle = File(name_template.format('mu'), 'w')
sigma_handle = self.sigma_handle = File(name_template.format('sigma'), 'w') ## Mu generation
mu = self.mu = self._generate_mu(x)
self.neurons = mu.shape[0]
print('({} neurons)'.format(self.neurons))
# Save to HDF5
mu_h5 = mu_handle.create_dataset('mu', data = mu) ## Sigma calculation
print('Calculating Sigma...')
sigmas = self.sigmas = self._calculate_sigmas()
# Save to HDF5
sigmas_h5 = sigma_handle.create_dataset('sigmas', data = sigmas)
print('Done') ## Phi calculation
print('Calculating Phi...')
phi = self.phi = self._calculate_phi(x)
print('Done')
# Saving to HDF5
print('Serializing...')
phi_h5 = phi_handle.create_dataset('phi', data = phi)
del phi
self.phi = phi_h5
print('Done') ## Algebra
print('Doing final algebra...')
w, os = self.w, _ = self._do_algebra(y)
# Saving to HDF5
w_h5 = w_handle.create_dataset('w', data = w)
os_h5 = os_handle.create_dataset('os', data = os) ## Calculate error
self._calculate_error(y)
print('Done') def predict(self, test_data):
mu = self.mu = self.mu.value
sigmas = self.sigmas = self.sigmas.value
w = self.w = self.w.value print('Calculating phi for test data...')
phi = self._calculate_phi(test_data)
os = dot(w, transpose(phi))
savetxt('iok3834.txt', os, delimiter='\n')
return os @property
def summary(self):
return '\n'.join( \
['-----------------',
'Training set size: {}'.format(self.n),
'Hidden layer size: {}'.format(self.neurons),
'-----------------',
'Absolute error : {:02.2f}'.format(self.error),
'Relative error : {:02.2f}%'.format(self.relative_error * 100)]) def predict(test_data):
mu = File('rbf-mu-212243-2400.hdf5', 'r')['mu'].value
sigmas = File('rbf-sigma-212243-2400.hdf5', 'r')['sigmas'].value
w = File('rbf-w-212243-2400.hdf5', 'r')['w'].value n = test_data.shape[0]
neur = mu.shape[0] mu = transpose(mu)
mu.reshape((n, neur)) phi = zeros((n, neur))
for i in range(n):
for j in range(neur):
phi[i, j] = multiQuadric(test_data[i,:], mu[j], sigmas[j]) os = dot(w, transpose(phi))
savetxt('iok3834.txt', os, delimiter='\n')
return os
径向基(RBF)神经网络python实现的更多相关文章
- RBF(径向基)神经网络
只要模型是一层一层的,并使用AD/BP算法,就能称作 BP神经网络.RBF 神经网络是其中一个特例.本文主要包括以下内容: 什么是径向基函数 RBF神经网络 RBF神经网络的学习问题 RBF神经网络与 ...
- RBF高斯径向基核函数【转】
XVec表示X向量.||XVec||表示向量长度.r表示两点距离.r^2表示r的平方.k(XVec,YVec) = exp(-1/(2*sigma^2)*(r^2))= exp(-gamma*r^2) ...
- 机器学习之径向基神经网络(RBF NN)
本文基于台大机器学习技法系列课程进行的笔记总结. 主要内容如下图所示: 首先介绍一下径向基函数网络的Hypothesis和网络的结构,然后介绍径向基神经网络学习算法,以及利用K-means进行的学习, ...
- RBF径向基神经网络——乳腺癌医学诊断建模
案例描述 近年来疾病早期诊断越来越受到医学专家的重视,从而产生了各种疾病诊断的新方法.乳癌最早的表现是患乳出现单发的.无痛性并呈进行性生长的小肿块.肿块位于外上象限最多见,其次是乳头.乳晕区和内上象限 ...
- 径向基网络(RBF network)
来源:http://blog.csdn.net/zouxy09/article/details/13297881 1.径向基函数 径向基函数(Radical Basis Function,RBF)方法 ...
- RBF神经网络
RBF神经网络 RBF神经网络通常只有三层,即输入层.中间层和输出层.其中中间层主要计算输入x和样本矢量c(记忆样本)之间的欧式距离的Radial Basis Function (RBF)的值,输出层 ...
- RBF神经网络——直接看公式,本质上就是非线性变换后的线性变化(RBF神经网络的思想是将低维空间非线性不可分问题转换成高维空间线性可分问题)
Deeplearning Algorithms tutorial 谷歌的人工智能位于全球前列,在图像识别.语音识别.无人驾驶等技术上都已经落地.而百度实质意义上扛起了国内的人工智能的大旗,覆盖无人驾驶 ...
- RBF神经网络学习算法及与多层感知器的比较
对于RBF神经网络的原理已经在我的博文<机器学习之径向基神经网络(RBF NN)>中介绍过,这里不再重复.今天要介绍的是常用的RBF神经网络学习算法及RBF神经网络与多层感知器网络的对比. ...
- RBF神经网络通用函数 newrb, newrbe
RBF神经网络通用函数 newrb, newrbe 1.newrb 其中P为输入向量,T为输出向量,GOAL为均方误差的目标,SPREED为径向基的扩展速度.返回值是一个构建好的网络,用newrb ...
- RBF神经网络的matlab简单实现
径向基神经网络 1.径向基函数 (Radial Basis Function,RBF) 神经网络是一种性能良好的前向网络,具有最佳逼近.训练简洁.学习收敛速度快以及克服局部最小值问题的性能,目前已经证 ...
随机推荐
- mac 手动卸载软件位置
系统偏爱设置 /Users/xxxxx/Library/Preferences/ xxxx 支持文件 /Users/xxxxx/Library/Application Support/xxx文件夹 数 ...
- Javascript---add to favorite | Set Homepage
<!-- 设为首页 --> <li><a href="javascript:setHome(this,window.location)"> &l ...
- 也许,这样理解HTTPS更容易
http://kb.cnblogs.com/page/563885/ 本文尝试一步步还原HTTPS的设计过程,以理解为什么HTTPS最终会是这副模样.但是这并不代表HTTPS的真实设计过程.在阅读本文 ...
- win7 64 &vs2010 与 opengl配置
http://blog.csdn.net/lixam/article/details/7618015 http://blog.sina.com.cn/s/blog_7745fc8601017m36.h ...
- LVS 原理(调度算法、四种模式、四层负载均衡和七层 的区别)
参考文档:http://blog.csdn.net/ioy84737634/article/details/44916241 目录 lvs的调度算法 lvs的四种模式 四层均衡负载和七层的区别 1.l ...
- 构造个人轻量级XSS平台获取管理员cookie并登录
一.前言 本平台是个人轻量级XSS测试平台,仅作为练习参考. 二.实验环境 服务器操作系统:Centos 7 Web容器:Apache 三.平台搭建过程 安装Apache 安装PHP 安装Git工具 ...
- 企业大数据之Elasticsearch的搜索类型
下面的 ES基于版本(V2.3.4) ES之默认 1.默认自动发先同一局域网的所有集群节点 2.默认一个索引库会有5个分片,(分片越多,效率越好) 由于这两个默认,所以统一索引库的分片对分布在不同机器 ...
- 网络编程--Socket(套接字)
网络编程 网络编程的目的就是指直接或间接地通过网络协议与其他计算机进行通讯.网络编程中 有两个主要的问题,一个是如何准确的定位网络上一台或多台主机,另一个就是找到主机后 如何可靠高效的进行数据传输.在 ...
- SQL Server错误处理
一.SQLServer数据库引擎错误 1.查询系统错误信息 SQLServer在每个数据库的系统视图sys.messages中存储系统自定义(Message_id <= 50000)和用户自定义 ...
- 【错误记录】python logging日志打印俩次的原因
有一个原因是因为同一个handler被添加了俩次 applogger = loggerfactory.LoggerFactory.init_app_logger()perflogger = logge ...