Learning Memory-guided Normality代码学习笔记
Learning Memory-guided Normality代码学习笔记
记忆模块核心
Memory部分的核心在于以下定义Memory类的部分。
class Memory(nn.Module):
def __init__(self, memory_size, feature_dim, key_dim, temp_update, temp_gather):
super(Memory, self).__init__()
# Constants
self.memory_size = memory_size
self.feature_dim = feature_dim
self.key_dim = key_dim
self.temp_update = temp_update
self.temp_gather = temp_gather
def hard_neg_mem(self, mem, i):
similarity = torch.matmul(mem,torch.t(self.keys_var))
similarity[:,i] = -1
_, max_idx = torch.topk(similarity, 1, dim=1)
return self.keys_var[max_idx]
def random_pick_memory(self, mem, max_indices):
m, d = mem.size()
output = []
for i in range(m):
flattened_indices = (max_indices==i).nonzero()
a, _ = flattened_indices.size()
if a != 0:
number = np.random.choice(a, 1)
output.append(flattened_indices[number, 0])
else:
output.append(-1)
return torch.tensor(output)
def get_update_query(self, mem, max_indices, update_indices, score, query, train):
m, d = mem.size()
if train:
query_update = torch.zeros((m,d)).cuda()
# random_update = torch.zeros((m,d)).cuda()
for i in range(m):
idx = torch.nonzero(max_indices.squeeze(1)==i)
a, _ = idx.size()
if a != 0:
query_update[i] = torch.sum(((score[idx,i] / torch.max(score[:,i])) *query[idx].squeeze(1)), dim=0)
else:
query_update[i] = 0
return query_update
else:
query_update = torch.zeros((m,d)).cuda()
for i in range(m):
idx = torch.nonzero(max_indices.squeeze(1)==i)
a, _ = idx.size()
if a != 0:
query_update[i] = torch.sum(((score[idx,i] / torch.max(score[:,i])) *query[idx].squeeze(1)), dim=0)
else:
query_update[i] = 0
return query_update
def get_score(self, mem, query):
bs, h,w,d = query.size()
m, d = mem.size()
score = torch.matmul(query, torch.t(mem))# b X h X w X m
score = score.view(bs*h*w, m)# (b X h X w) X m
score_query = F.softmax(score, dim=0)
score_memory = F.softmax(score,dim=1)
return score_query, score_memory
def forward(self, query, keys, train=True):
batch_size, dims,h,w = query.size() # b X d X h X w
query = F.normalize(query, dim=1)
query = query.permute(0,2,3,1) # b X h X w X d
#train
if train:
#losses
separateness_loss, compactness_loss = self.gather_loss(query,keys, train)
# read
updated_query, softmax_score_query,softmax_score_memory = self.read(query, keys)
#update
updated_memory = self.update(query, keys, train)
return updated_query, updated_memory, softmax_score_query, softmax_score_memory, separateness_loss, compactness_loss
#test
else:
# loss
compactness_loss, query_re, top1_keys, keys_ind = self.gather_loss(query,keys, train)
# read
updated_query, softmax_score_query,softmax_score_memory = self.read(query, keys)
#update
updated_memory = keys
return updated_query, updated_memory, softmax_score_query, softmax_score_memory, query_re, top1_keys,keys_ind, compactness_loss
def update(self, query, keys,train):
batch_size, h,w,dims = query.size() # b X h X w X d
softmax_score_query, softmax_score_memory = self.get_score(keys, query)
query_reshape = query.contiguous().view(batch_size*h*w, dims)
_, gathering_indices = torch.topk(softmax_score_memory, 1, dim=1)
_, updating_indices = torch.topk(softmax_score_query, 1, dim=0)
if train:
query_update = self.get_update_query(keys, gathering_indices, updating_indices, softmax_score_query, query_reshape,train)
updated_memory = F.normalize(query_update + keys, dim=1)
else:
query_update = self.get_update_query(keys, gathering_indices, updating_indices, softmax_score_query, query_reshape, train)
updated_memory = F.normalize(query_update + keys, dim=1)
return updated_memory.detach()
def pointwise_gather_loss(self, query_reshape, keys, gathering_indices, train):
n,dims = query_reshape.size() # (b X h X w) X d
loss_mse = torch.nn.MSELoss(reduction='none')
pointwise_loss = loss_mse(query_reshape, keys[gathering_indices].squeeze(1).detach())
return pointwise_loss
def gather_loss(self,query, keys, train):
batch_size, h,w,dims = query.size() # b X h X w X d
if train:
loss = torch.nn.TripletMarginLoss(margin=1.0)
loss_mse = torch.nn.MSELoss()
softmax_score_query, softmax_score_memory = self.get_score(keys, query)
query_reshape = query.contiguous().view(batch_size*h*w, dims)
_, gathering_indices = torch.topk(softmax_score_memory, 2, dim=1)
#1st, 2nd closest memories
pos = keys[gathering_indices[:,0]]
neg = keys[gathering_indices[:,1]]
top1_loss = loss_mse(query_reshape, pos.detach())
gathering_loss = loss(query_reshape,pos.detach(), neg.detach())
return gathering_loss, top1_loss
else:
loss_mse = torch.nn.MSELoss()
softmax_score_query, softmax_score_memory = self.get_score(keys, query)
query_reshape = query.contiguous().view(batch_size*h*w, dims)
_, gathering_indices = torch.topk(softmax_score_memory, 1, dim=1)
gathering_loss = loss_mse(query_reshape, keys[gathering_indices].squeeze(1).detach())
return gathering_loss, query_reshape, keys[gathering_indices].squeeze(1).detach(), gathering_indices[:,0]
def read(self, query, updated_memory):
batch_size, h,w,dims = query.size() # b X h X w X d
softmax_score_query, softmax_score_memory = self.get_score(updated_memory, query)
query_reshape = query.contiguous().view(batch_size*h*w, dims)
concat_memory = torch.matmul(softmax_score_memory.detach(), updated_memory) # (b X h X w) X d
updated_query = torch.cat((query_reshape, concat_memory), dim = 1) # (b X h X w) X 2d
updated_query = updated_query.view(batch_size, h, w, 2*dims)
updated_query = updated_query.permute(0,3,1,2)
return updated_query, softmax_score_query, softmax_score_memory
Update过程
调用get_update_query(self, mem, max_indices, update_indices, score, query, train)函数计算\(query\_ dpdate= \sum_{k \in U_{t}^M} v_t^{'k,m}q_t^k\)
然后计算\(f(P^m+query_dpdate)\)
文中对f的描述为L2正则。
看一下get_update_query函数的定义:
def get_update_query(self, mem, max_indices, update_indices, score, query, train):
m, d = mem.size()
if train:
query_update = torch.zeros((m,d)).cuda()
# random_update = torch.zeros((m,d)).cuda()
for i in range(m):
idx = torch.nonzero(max_indices.squeeze(1)==i)
a, _ = idx.size()
if a != 0:
query_update[i] = torch.sum(((score[idx,i] / torch.max(score[:,i])) *query[idx].squeeze(1)), dim=0)
else:
query_update[i] = 0
return query_update
else:
query_update = torch.zeros((m,d)).cuda()
for i in range(m):
idx = torch.nonzero(max_indices.squeeze(1)==i)
a, _ = idx.size()
if a != 0:
query_update[i] = torch.sum(((score[idx,i] / torch.max(score[:,i])) *query[idx].squeeze(1)), dim=0)
else:
query_update[i] = 0
return query_update
在定义中,我们需要看到\(v_t^{'k,m}\)的计算。代码是通过(score[idx,i] / torch.max(score[:,i])实现的,进一步,我们需要查看\(v_t^{k,m}\)的计算过程。这个参数与\(w\)一样是权重,文中通过get_score函数计算权重,如下为此函数的定义:
def get_score(self, mem, query):
#计算权重$w_t^{k,m}$
bs, h,w,d = query.size()
m, d = mem.size()
score = torch.matmul(query, torch.t(mem))# b X h X w X m
score = score.view(bs*h*w, m)# (b X h X w) X m
score_query = F.softmax(score, dim=0)
score_memory = F.softmax(score,dim=1)
return score_query, score_memory
实现了文献中的权重计算


Read过程
def read(self, query, updated_memory):
#Read部分
batch_size, h,w,dims = query.size() # b X h X w X d
softmax_score_query, softmax_score_memory = self.get_score(updated_memory, query)
query_reshape = query.contiguous().view(batch_size*h*w, dims)
concat_memory = torch.matmul(softmax_score_memory.detach(), updated_memory) # (b X h X w) X d
# 权重和memory获得加权均值
updated_query = torch.cat((query_reshape, concat_memory), dim = 1) # (b X h X w) X 2d
# 进行拼接
updated_query = updated_query.view(batch_size, h, w, 2*dims)
updated_query = updated_query.permute(0,3,1,2)
return updated_query, softmax_score_query, softmax_score_memory
核心部分在代码中给出了注释。
forward过程
separateness_loss, compactness_loss = self.gather_loss(query,keys, train)
# read
updated_query, softmax_score_query,softmax_score_memory = self.read(query, keys)
#update
updated_memory = self.update(query, keys, train)
return updated_query, updated_memory, softmax_score_query, softmax_score_memory, separateness_loss, compactness_loss
分别调用update函数和read函数
需要说明损失函数的定义,\(L = L_{rec} + \lambda _cL_{compact}+ \lambda _sL_{separate}\)
代码中通过gather_loss函数实现。
def gather_loss(self,query, keys, train):
batch_size, h,w,dims = query.size() # b X h X w X d
if train:
loss = torch.nn.TripletMarginLoss(margin=1.0)
# 计算Feature separateness loss的主要函数
loss_mse = torch.nn.MSELoss()
# 计算均方差损失
softmax_score_query, softmax_score_memory = self.get_score(keys, query)
query_reshape = query.contiguous().view(batch_size*h*w, dims)
_, gathering_indices = torch.topk(softmax_score_memory, 2, dim=1)
#1st, 2nd closest memories
pos = keys[gathering_indices[:,0]]
neg = keys[gathering_indices[:,1]]
top1_loss = loss_mse(query_reshape, pos.detach())
gathering_loss = loss(query_reshape,pos.detach(), neg.detach())
return gathering_loss, top1_loss
else:
loss_mse = torch.nn.MSELoss()
softmax_score_query, softmax_score_memory = self.get_score(keys, query)
query_reshape = query.contiguous().view(batch_size*h*w, dims)
_, gathering_indices = torch.topk(softmax_score_memory, 1, dim=1)
gathering_loss = loss_mse(query_reshape, keys[gathering_indices].squeeze(1).detach())
return gathering_loss, query_reshape, keys[gathering_indices].squeeze(1).detach(), gathering_indices[:,0]
Learning Memory-guided Normality代码学习笔记的更多相关文章
- DeepLearnToolbox-master代码学习笔记
卷积神经网络(CNN)博大精深,网上资料浩如烟海,让初学者无从下手.笔者以为,学习编程还是从代码实例入们最好.目前,学习CNN最好的代码实例就是,DeepLearnToolbox-master,不用装 ...
- 《Learning Play! Framework 2》学习笔记——案例研究1(Templating System)
注解: 这是对<Learning Play! Framework 2>第三章的学习 本章是一个显示聊天记录的项目,只有一个页面,可以自动对聊天记录进行排序.分组和显示,并整合使用了less ...
- Machine Learning In Action 第二章学习笔记: kNN算法
本文主要记录<Machine Learning In Action>中第二章的内容.书中以两个具体实例来介绍kNN(k nearest neighbors),分别是: 约会对象预测 手写数 ...
- C# 好代码学习笔记(1):文件操作、读取文件、Debug/Trace 类、Conditional条件编译、CLS
目录 1,文件操作 2,读取文件 3,Debug .Trace类 4,条件编译 5,MethodImpl 特性 5,CLSCompliantAttribute 6,必要时自定义类型别名 目录: 1,文 ...
- 1.JAVA中使用JNI调用C++代码学习笔记
Java 之JNI编程1.什么是JNI? JNI:(Java Natibe Inetrface)缩写. 2.为什么要学习JNI? Java 是跨平台的语言,但是在有些时候仍然是有需要调用本地代码 ( ...
- APM代码学习笔记1
libraries目录 传感器 AP_InertialSensor 惯性导航传感器 就是陀螺仪加速计 AP_Baro 气压计 居然支持BMP085 在我印象中APM一直用高端的MS5611 AP_Co ...
- boost timer代码学习笔记
socket连接中需要判断超时 所以这几天看了看boost中计时器的文档和示例 一共有五个例子 从简单的同步等待到异步调用超时处理 先看第一个例子 // timer1.cpp: 定义控制台应用程序的入 ...
- Hands on Machine Learning with Sklearn and TensorFlow学习笔记——机器学习概览
一.什么是机器学习? 计算机程序利用经验E(训练数据)学习任务T(要做什么,即目标),性能是P(性能指标),如果针对任务T的性能P随着经验E不断增长,成为机器学习.[这是汤姆米切尔在1997年定义] ...
- cc代码学习笔记1
#define #define INT32 int #define INT8 char #define CHAR char #define SSHORT signed short #define IN ...
随机推荐
- 线上MySQL读写分离,出现写完读不到问题如何解决
大家好,我是历小冰. 今天我们来详细了解一下主从同步延迟时读写分离发生写后读不到的问题,依次讲解问题出现的原因,解决策略以及 Sharding-jdbc.MyCat 和 MaxScale 等开源数据库 ...
- 03-Spring默认标签解析
默认标签的解析 上一篇分析了整体的 xml 文件解析,形成 BeanDefinition 并注册到 IOC 容器中,但并没有详细的说明具体的解析,这一篇主要说一下 默认标签的解析,下一篇主要说自定义标 ...
- JAVA使用Collator对中文排序
首先创建一个集合 public static List<String> init() { List<String> list = new ArrayList<String ...
- android消息线程和消息队列
基于消息队列的线程通信: 消息队列与线程循环 MessageQueue: 利用链表来管理消息. Mess ...
- C# 通过ServiceStack 操作Redis——ZSet类型的使用及示例
Sorted Sets是将 Set 中的元素增加了一个权重参数 score,使得集合中的元素能够按 score 进行有序排列 /// <summary> /// Sorted Sets是将 ...
- P1049_装箱问题(JAVA语言)
思路:动态规划的背包问题.使箱子剩余空间最小,也就是使箱内装的物品体积达到最大,我们可将物品的体积视为价值,然后按照01背包问题求解即可. //直接上模板 题目描述 有一个箱子容量为VV(正整数,0 ...
- 对象存储服务-Minio
Mino 目录 Mino 对象存储服务 Minio 参考 Minio 架构 为什么要用 Minio 存储机制 纠删码 MinIO概念 部署 单机部署: Docker 部署Minio 分布式Minio ...
- Python基础之告警定义与告警抑制
技术背景 在前面一篇博客中我们介绍了在python中自定义异常以及异常的捕获.这里我们要介绍另外一种形式的用户提醒:告警.我们这里就不给出一些过于官方或者技术的定义了,在实际项目中的使用场景主要有这么 ...
- > 与 < 差在哪?-- Shell十三问<第十一问>
> 与 < 差在哪?-- Shell十三问<第十一问> 谈到 I/O redirection ,不妨先让我们认识一下 File Descriptor (FD) .程序的运算,在 ...
- 解决wampserver 服务无法启动
如图左击选中apache的httpd.conf把文本中的80端口,改成未被占用的端口.