TensorFlow实现FM
看了网上的一些用tf实现的FM,很多都没有考虑FM实际使用中数据样本稀疏的问题。
我在实现的时候使用 embedding_lookup_sparse来解决这个问题。
对于二阶部分,由于embedding_lookup_sparse没法计算 和的平方 和 平方的和,我参考embedding_lookup_sparse中sum和mean两种实现,自己写了一下。不过数据输入部分还需要改一下,改用dataset会更好。
代码如下:
import tensorflow as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
import random
import numpy as np
from sklearn import metrics class Args():
feature_size=925
field_size=15
embedding_size = 20
epoch = 3
batch_size = 2000
learning_rate = 0.001
l2_reg_rate = 0.001
checkpoint_dir = "./model"
is_training = True class FMmodel():
def __init__(self):
self.feature_sizes = Args.feature_size
self.field_size = Args.field_size
self.embedding_size = Args.embedding_size
self.l2_reg_rate = Args.l2_reg_rate
self.epoch = Args.epoch
self.learning_rate = Args.learning_rate
self.weight = {}
self.model_path = Args.checkpoint_dir
self.batch_size = Args.batch_size def build_model(self,is_warm_up=False):
self.x1_index = tf.sparse_placeholder(tf.int64,name="x1_index")
self.x1_value = tf.sparse_placeholder(tf.float32,name="x1_value")
self.labels = tf.placeholder(tf.float32,name="labels",shape=[None,1])
init_randomW = tf.random_normal_initializer(mean=0.0, stddev=0.05, seed=None, dtype=tf.float32)
init_randomV = tf.random_normal_initializer(mean=0.0, stddev=0.00001, seed=None, dtype=tf.float32)
#特征向量
self.weight["feature_weight"] = tf.get_variable(
shape =[self.feature_sizes,self.embedding_size],
name='feature_weight',
initializer=init_randomV
) #一次项中的W系数
self.weight["feature_first"] = tf.get_variable(
shape=[self.feature_sizes,1],
initializer=init_randomW,
name='feature_first') self.weight["bais"] = tf.get_variable(shape=[1,1],initializer=tf.constant_initializer(0.0),name="bais") #[batch_size,1] 线性部分的计算结果 xi*wi求和
self.line_part1 = tf.nn.embedding_lookup_sparse(self.weight["feature_first"],
sp_ids=self.x1_index,sp_weights=self.x1_value,combiner='sum')
self.line_part1_shape = tf.shape(self.line_part1)
#[batch*embedding_size]
self.embedding_part1_sum_square = tf.nn.embedding_lookup_sparse(self.weight["feature_weight"],
sp_ids=self.x1_index,sp_weights=self.x1_value,combiner='sum') #[batch_size,embeding_size]
ids_1 = self.x1_index.values self.ids1,self.idx1 = tf.unique(ids_1) self.weight_1 = self.x1_value.values self.weight_1 = tf.reshape(self.weight_1,[-1,1]) if self.weight_1.dtype != dtypes.float32:
self.weight_1 = math_ops.cast(self.weight_1,dtypes.float32) #[batch_size,embedding_size]
self.embedding_1 = tf.nn.embedding_lookup(self.weight["feature_weight"],ids=self.ids1) self.new_embedding_1 = tf.gather(self.embedding_1,self.idx1) #[batch_value_count,embedding_size]
self.embedding_weight_part1 =tf.multiply(self.weight_1,self.new_embedding_1) self.embedding_weight_part1_square = tf.square(self.embedding_weight_part1) self.segment_ids_1 = self.x1_index.indices[:, 0] if self.segment_ids_1.dtype != dtypes.int32:
self.segment_ids_1 = math_ops.cast(self.segment_ids_1, dtypes.int32) self.embeddings_square_sum1 = tf.math.segment_sum(
self.embedding_weight_part1_square,self.segment_ids_1) self.ess1_shape = tf.shape(self.embeddings_square_sum1)
#[batch_size,1]
self.y1_v = 0.5*tf.reduce_sum(tf.subtract(self.embedding_part1_sum_square,self.embeddings_square_sum1),1)
self.y1_v = tf.reshape(self.y1_v,[-1,1])
self.y1 = tf.add(tf.add(self.line_part1,self.y1_v),self.weight["bais"]) self.o1 = tf.sigmoid(self.y1)
self.loss = tf.losses.log_loss(labels=self.labels,predictions=self.o1)
self.error = tf.reduce_mean(self.loss)
# with tf.name_scope("loss"):
# tf.summary.scalar("loss", self.error) self.opt = tf.train.AdamOptimizer().minimize(self.error)
self.session = tf.Session()
self.init = tf.group(tf.global_variables_initializer())
if is_warm_up:
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(self.session, self.model_path)
else:
self.session.run(self.init) def predict(self,file_name):
result_list = []
for x1_index, x1_value, true_labels in self.load_data(file_name,is_train=False):
predict1 = self.session.run([self.o1],feed_dict={
self.x1_value:x1_value,
self.x1_index:x1_index
})
# print(len(predict1))
# print(len(predict1[0]))
# print(true_labels.shape)
for i in range(len(predict1[0])):
result_list.append((true_labels[i][0],predict1[0][i]))
print(len(result_list))
with open("./data/result.txt",'w') as file1:
for tp in result_list:
file1.write(str(tp[0])+","+str(tp[1][0])+"\n") def save(self,sess,path):
saver = tf.train.Saver()
saver.save(sess,save_path=path) def restore(self,sess,path):
saver = tf.train.Saver()
saver.restore(sess,save_path=path) def train(self,train_data_file):
index=0
for x1_index,x1_value,true_labels in self.load_data(train_data_file):#ids_1,ids_2,weight_1,weight_2,
if(len(true_labels)<2):
#print("###$$$$$$ : "+str(len(true_labels)))
continue
my_o1,myerror,_=self.session.run([self.o1,self.error,self.opt],feed_dict={
self.x1_index : x1_index,
self.x1_value : x1_value,
self.labels:true_labels
})
index+=1
# if(index%1000==0):
# for i in range(len(my_o1)):
# print(str(my_o1[i])+" : "+str(true_labels[i]))
#y_t = true_labels.reshape([-1])
#y_p = np.asarray(my_o1,dtype=float).reshape([-1])
print(metrics.roc_auc_score(true_labels,my_o1)) #print(my_o1) self.save(self.session,self.model_path) self.session.close() def load_data(self,file_name,epoch=3,is_train=True):
def __parse_line(line):
tokens = line.split("#")[0].split()
assert len(tokens)>=2, "Ill-formatted line: {}".format(line)
label = float(tokens[0])
uid = tokens[1]
mid = tokens[2]
kv_pairs = [kv.split(":") for kv in tokens[3:]]
features = {k: float(v) for (k,v) in kv_pairs}
#print(type(features))
qid = uid
return qid,features,label def __encoder_line(sample):
qid = sample[0]
features = sample[1]
label = sample[2]
features_arr = []
for key in features.keys():
features_arr.append(str(key)+":"+str(features[key]))
return str(label)+" "+"qid:"+str(qid)+" "+" ".join(features_arr) def __gen_sparse_tensor(sample_list):
# 生成batch_size数据
# 根据sample_pair_list生成一个batch_size的训练样本
sample_index = 0
tensor_x1_index_ids = []
tensor_x1_index_value = [] tensor_x1_value_ids = []
tensor_x1_value_values = []
label_list = []
for sample in sample_list:
x1_feature = sample[0]
label_list.append([float(sample[1])])
tmpIndex = 0
for key in x1_feature.keys():
tensor_x1_index_ids.append([sample_index, tmpIndex])
tensor_x1_index_value.append(int(key)) tensor_x1_value_ids.append([sample_index, tmpIndex])
tensor_x1_value_values.append(float(x1_feature[key]))
tmpIndex += 1
sample_index+=1
x1_index = tf.SparseTensorValue(indices=tensor_x1_index_ids,values=tensor_x1_index_value,
dense_shape=[len(sample_list),self.feature_sizes])
x1_value = tf.SparseTensorValue(indices=tensor_x1_value_ids,values=tensor_x1_value_values,
dense_shape=[len(sample_list),self.feature_sizes])
#print("AHAHAHAHA : "+str(len(sample_list)))
return x1_index,x1_value,np.asarray(label_list,dtype=np.float32) def __gen_train_data(file_name):
new_file_name = file_name+"_train_data"
with open(file_name,'r') as filer:
with open(new_file_name,'w') as filew:
sample_list = []
now_qid = None
for l in filer:
qid, features, label = __parse_line(l)
if now_qid is None or now_qid==qid:
now_qid = qid
sample_list.append((qid,features,label))
else:
sorted_sample_list = sorted(sample_list,key=lambda x:x[2],reverse=True)
for sample in sorted_sample_list:
sample_str = __encoder_line(sample)
filew.write(sample_str+"\n")
sample_list = []
now_qid = qid
sample_list.append((qid, features, label)) return new_file_name if is_train:
new_file_name ="./data/new_final_train_data.txt" # __gen_train_data(file_name)
print("process data")
sample_list = []
while epoch>0:
epoch-=1
with open(new_file_name,'r') as filer:
for l in filer:
qid,features,label = __parse_line(l)
#print(len(sample_list))
if len(sample_list)<self.batch_size*10:
sample_list.append((features,label))
else:
random.shuffle(sample_list)
start = 0
end = len(sample_list)
while (start < end):
tmpEnd = min(end, start + self.batch_size)
sub_list = sample_list[start:tmpEnd]
x1_index, x1_value,labels = __gen_sparse_tensor(sub_list) # ids_1,ids_2,weight_1,weight_2,
if(labels.sum()<1):
start += self.batch_size
continue
yield (x1_index, x1_value,labels) # ids_1,ids_2,weight_1,weight_2,
start += self.batch_size
sample_list = []
sample_list.append((features, label))
else:
with open(file_name, 'r') as filer:
sample_list = []
for l in filer:
qid, features, label = __parse_line(l)
# print(len(sample_list))
if len(sample_list) < self.batch_size:
sample_list.append((features, label))
else:
start = 0
end = len(sample_list)
while (start < end):
tmpEnd = min(end, start + self.batch_size)
sub_list = sample_list[start:tmpEnd]
x1_index, x1_value, labels = __gen_sparse_tensor(sub_list) # ids_1,ids_2,weight_1,weight_2,
yield (x1_index, x1_value, labels) # ids_1,ids_2,weight_1,weight_2,
start += self.batch_size
sample_list = []
sample_list.append((features, label)) if __name__ =="__main__":
fm = FMmodel()
fm.build_model(is_warm_up=True)
#fm.train("./data/new_final_train_data.txt")
fm.predict("./data/test.data")
TensorFlow实现FM的更多相关文章
- FM-分解机模型详解
https://blog.csdn.net/zynash2/article/details/80029969 FM论文地址:https://www.csie.ntu.edu.tw/~b97053/pa ...
- 100、TensorFlow实现FFM Field-awared FM模型
''' Created on 2017年11月15日 @author: weizhen ''' import tensorflow as tf import pandas as pd import n ...
- 采用ubuntu系统来安装tensorflow
最近在学习google新开源的深度学习框架tensorflow.发现安装它的时候,需要依赖python2.7.X;我之前一直使用的linux是centos.而centos不更新了,里面的自带的pyth ...
- TensorFlow深度学习笔记 循环神经网络实践
转载请注明作者:梦里风林 Github工程地址:https://github.com/ahangchen/GDLnotes 欢迎star,有问题可以到Issue区讨论 官方教程地址 视频/字幕下载 加 ...
- TensorFlow入门学习(让机器/算法帮助我们作出选择)
catalogue . 个人理解 . 基本使用 . MNIST(multiclass classification)入门 . 深入MNIST . 卷积神经网络:CIFAR- 数据集分类 . 单词的向量 ...
- 用 tensorflow实现DeepFM
http://www.fabwrite.com/deepfm 文章DeepFM: A Factorization-Machine based Neural Network for CTR Predic ...
- 『TensorFlow』卷积层、池化层详解
一.前向计算和反向传播数学过程讲解
- TensorFlow实战——个性化推荐
原创文章,转载请注明出处: http://blog.csdn.net/chengcheng1394/article/details/78820529 请安装TensorFlow1.0,Python3. ...
- 将libFM模型变换成tensorflow可serving的形式
fm_model是libFM生成的模型 model.ckpt是可以tensorflow serving的模型结构 亲测输出正确. 代码: import tensorflow as tf # libFM ...
随机推荐
- [Nginx] – 安全优化 – 配置文件优化
1.配置Nginx gzip压缩实现性能优化 1.Nginx gzip压缩功能介绍 Nginx gzip压缩模块提供了压缩文件内容的功能,用户请求的内容在发送出用客户端之前,Nginx服务器会根据一 ...
- [ASP.NET]从Request.Url获取根网址的最简单方法
在拼接绝对路径的网址时,经常需要从Request.Url中获取根网址(比如http://www.cnblogs.com),然后与相对路径一起拼接为绝对路径. 以前的做法如下: var uri = Re ...
- ArcGIS API for JavaScript开发笔记(一)——ArcGIS for Javascript API 3.14本地部署
堪称史上最详细的< ArcGIS forJavascript API 3.14本地部署>文档,有图有真相~~~ ---------环境:Windows server 2012R2,IIS ...
- Logback配置讲解
复制文件并粘贴到项目下: logback.xml: <?xml version="1.0" encoding="UTF-8"?> <confi ...
- Java中内部类揭秘(一):外部类与非静态内部类的”相互可见性“
声明:本博客为原创博客.未经同意,不得转载.原文链接为 http://blog.csdn.net/bettarwang/article/details/27012421. ...
- LVS和nginx反向代理网站架构
LVS和nginx反向代理网站架构 nginx反向代理和lvs的dr都存在单点,要keepalived做高可用,但是成本高了 f
- mydumper安装
安装依赖包: yum install glib2-devel mysql-devel zlib-devel pcre-devel openssl-devel cmake 下载二进制包: wget ht ...
- (1)了解cocostudio基础
操作界面 Cocos Studio的界面主要分为菜单栏.工具栏.对象面板.资源面板.画布面板.属性面板.动画面板.输出窗口.状态栏九部分组成,如下图: 菜单栏 菜单栏为Cocos Studio ...
- 在Java中关于二进制、八进制、十六进制的辨析
八进制数中不可能出7以上的阿拉伯数字.但如果这个数是123.是567,或12345670,那么它是八进制数还是10进制数?单从数字的角度来讲都有可能! 八进制 所以在Java中规定,一个数如果要指明它 ...
- HDU5183 hash 表
做题的时候忘了 数据结构老师说的hash表了, 用二分找,还好过了, hash 表 对这题 更快一些 #include <iostream> #include <algorithm& ...