# -*- coding:utf-8 -*-

import sys
import re
from hmmlearn import hmm
import numpy as np
from sklearn.externals import joblib
import matplotlib.pyplot as plt
import tldextract
import os def iterbrowse(path):
for home, dirs, files in os.walk(path):
for filename in files:
yield os.path.join(home, filename) def extract_domain(domain):
suffix = {'.com','.la','.io', '.co', '.cn','.info', '.net', '.org','.me', '.mobi', '.us', '.biz', '.xxx', '.ca', '.co.jp', '.com.cn', '.net.cn', '.org.cn', '.mx','.tv', '.ws', '.ag', '.com.ag', '.net.ag', '.org.ag','.am','.asia', '.at', '.be', '.com.br', '.net.br', '.name', '.live', '.news', '.bz', '.tech', '.pub', '.wang', '.space', '.top', '.xin', '.social', '.date', '.site', '.red', '.studio', '.link', '.online', '.help', '.kr', '.club', '.com.bz', '.net.bz', '.cc', '.band', '.market', '.com.co', '.net.co', '.nom.co', '.lawyer', '.de', '.es', '.com.es', '.nom.es', '.org.es', '.eu', '.wiki', '.design', '.software', '.fm', '.fr', '.gs', '.in', '.co.in', '.firm.in', '.gen.in', '.ind.in', '.net.in', '.org.in', '.it', '.jobs', '.jp', '.ms', '.com.mx', '.nl','.nu','.co.nz','.net.nz', '.org.nz', '.se', '.tc', '.tk', '.tw', '.com.tw', '.idv.tw', '.org.tw', '.hk', '.co.uk', '.me.uk', '.org.uk', '.vg'} domain = domain.lower()
names = domain.split(".")
if len(names) >= 3:
if ("."+".".join(names[-2:])) in suffix:
return ".".join(names[-3:]), ".".join(names[:-3])
elif ("."+names[-1]) in suffix:
return ".".join(names[-2:]), ".".join(names[:-2])
print "New domain suffix found. Use tld extract domain..." pos = domain.rfind("/")
if pos >= 0: # maybe subdomain contains /, for dns tunnel tool
ext = tldextract.extract(domain[pos+1:])
subdomain = domain[:pos+1] + ext.subdomain
else:
ext = tldextract.extract(domain)
subdomain = ext.subdomain
if ext.suffix:
mdomain = ext.domain + "." + ext.suffix
else:
mdomain = ext.domain
return mdomain, subdomain def parse(log):
data = log.split('^')
SRC_PORT_IDX = 5-1
DST_PORT_IDX = 6-1
PROTOCOL_IDX = 7-1
protol = data[PROTOCOL_IDX]
dstport = data[DST_PORT_IDX]
if '' == protol and ('' == dstport):
DNS_QUERY_NAME_IDX = 55-1 # domain
if (len(data) < 55):
print "error line:"
print log
return ("", "")
domain = data[DNS_QUERY_NAME_IDX]
mdomain, subdomain = extract_domain(domain)
return (mdomain, subdomain)
else:
print "error line not a DNS:"
print log
return ("", "") #处理域名的最小长度
MIN_LEN=3 #状态个数
N=5
#最大似然概率阈值
T=-50 #模型文件名
FILE_MODEL="hmm-cdn.m" def get_cdn_domains(dir_path):
domain_list=[]
for path in iterbrowse(dir_path):
with open(path) as f:
for line in f:
mdomain, sub_domain = parse(line)
if len(sub_domain) >= MIN_LEN:
domain_list.append(sub_domain)
if len(domain_list) >= 2000:
return domain_list
#else:
# print path, "pass line:", line
return domain_list def domain2ver(domain):
ver=[]
for i in range(0,len(domain)):
ver.append([ord(domain[i])])
return ver def train_hmm(domain_list):
X = [[0]]
X_lens = [1]
for domain in domain_list:
ver=domain2ver(domain)
np_ver = np.array(ver)
#print len(np_ver)
try:
X=np.concatenate([X,np_ver])
except ValueError:
print domain
print len(X), len(np_ver)
print X
print np_ver
raise
X_lens.append(len(np_ver)) remodel = hmm.GaussianHMM(n_components=N, covariance_type="full", n_iter=100)
remodel.fit(X,X_lens)
joblib.dump(remodel, FILE_MODEL) return remodel def test(remodel, domain_list):
x=[]
y=[]
for domain in domain_list:
domain_ver=domain2ver(domain)
np_ver = np.array(domain_ver)
pro = remodel.score(np_ver)
print "SCORE:(%d) DOMAIN:(%s) " % (pro, domain)
x.append(len(domain))
y.append(pro)
return x,y if __name__ == '__main__':
domain_list = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_cdn")
remodel=train_hmm(domain_list)
remodel=joblib.load(FILE_MODEL) x_1,y_1=test(remodel, domain_list)
print x_1
print y_1
#sys.exit(0)
domain_list = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_black")
x_2,y_2=test(remodel, domain_list)
print x_2
print y_2
domain_list = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_white_like")
x_3,y_3=test(remodel, domain_list)
print x_3
print y_3
#%matplotlib inline
fig,ax=plt.subplots()
ax.set_xlabel('Domain Length')
ax.set_ylabel('HMM Score')
ax.scatter(x_3,y_3,color='b',label="WHITE")
ax.scatter(x_2, y_2, color='g', label="BLACK")
ax.scatter(x_1, y_1, color='r', label="CDN")
ax.legend(loc='right')
plt.show()

使用pickle保存和加载模型:

# -*- coding:utf-8 -*-

import sys
import re
from hmmlearn import hmm
import numpy as np
#from sklearn.externals import joblib
import matplotlib.pyplot as plt
import tldextract
import os
import pickle def iterbrowse(path):
for home, dirs, files in os.walk(path):
for filename in files:
yield os.path.join(home, filename) def extract_domain(domain):
suffix = {'.com','.la','.io', '.co', '.cn','.info', '.net', '.org','.me', '.mobi', '.us', '.biz', '.xxx', '.ca', '.co.jp', '.com.cn', '.net.cn', '.org.cn', '.mx','.tv', '.ws', '.ag', '.com.ag', '.net.ag', '.org.ag','.am','.asia', '.at', '.be', '.com.br', '.net.br', '.name', '.live', '.news', '.bz', '.tech', '.pub', '.wang', '.space', '.top', '.xin', '.social', '.date', '.site', '.red', '.studio', '.link', '.online', '.help', '.kr', '.club', '.com.bz', '.net.bz', '.cc', '.band', '.market', '.com.co', '.net.co', '.nom.co', '.lawyer', '.de', '.es', '.com.es', '.nom.es', '.org.es', '.eu', '.wiki', '.design', '.software', '.fm', '.fr', '.gs', '.in', '.co.in', '.firm.in', '.gen.in', '.ind.in', '.net.in', '.org.in', '.it', '.jobs', '.jp', '.ms', '.com.mx', '.nl','.nu','.co.nz','.net.nz', '.org.nz', '.se', '.tc', '.tk', '.tw', '.com.tw', '.idv.tw', '.org.tw', '.hk', '.co.uk', '.me.uk', '.org.uk', '.vg'} domain = domain.lower()
names = domain.split(".")
if len(names) >= 3:
if ("."+".".join(names[-2:])) in suffix:
return ".".join(names[-3:]), ".".join(names[:-3])
elif ("."+names[-1]) in suffix:
return ".".join(names[-2:]), ".".join(names[:-2])
print "New domain suffix found. Use tld extract domain..." pos = domain.rfind("/")
if pos >= 0: # maybe subdomain contains /, for dns tunnel tool
ext = tldextract.extract(domain[pos+1:])
subdomain = domain[:pos+1] + ext.subdomain
else:
ext = tldextract.extract(domain)
subdomain = ext.subdomain
if ext.suffix:
mdomain = ext.domain + "." + ext.suffix
else:
mdomain = ext.domain
return mdomain, subdomain def parse(log):
data = log.split('^')
SRC_PORT_IDX = 5-1
DST_PORT_IDX = 6-1
PROTOCOL_IDX = 7-1
protol = data[PROTOCOL_IDX]
dstport = data[DST_PORT_IDX]
if '' == protol and ('' == dstport):
DNS_QUERY_NAME_IDX = 55-1 # domain
if (len(data) < 55):
print "error line:"
print log
return ("", "")
domain = data[DNS_QUERY_NAME_IDX]
mdomain, subdomain = extract_domain(domain)
return (mdomain, subdomain)
else:
print "error line not a DNS:"
print log
return ("", "") #处理域名的最小长度
MIN_LEN=1 #状态个数
N=8
#最大似然概率阈值
T=-50 #模型文件名
FILE_MODEL="hmm-cdn.m"
FILE_MODEL2 ="hmm-cdn-white.pkl" def get_cdn_domains(dir_path):
domain_list=[]
for path in iterbrowse(dir_path):
with open(path) as f:
for line in f:
mdomain, sub_domain = parse(line)
if len(sub_domain) >= MIN_LEN:
domain_list.append(sub_domain)
if len(domain_list) >= 3000:
return domain_list
#else:
# print path, "pass line:", line
return domain_list def domain2ver(domain):
ver=[]
for i in range(0,len(domain)):
ver.append([ord(domain[i])])
return ver def train_hmm(domain_list):
if os.path.exists(FILE_MODEL2):
print "found model file, use it..."
file_model = open(FILE_MODEL2, 'rb')
model = pickle.load(file_model)
file_model.close()
return model X = [[0]]
X_lens = [1]
for domain in domain_list:
ver=domain2ver(domain)
np_ver = np.array(ver)
#print len(np_ver)
try:
X=np.concatenate([X,np_ver])
except ValueError:
print domain
print len(X), len(np_ver)
print X
print np_ver
raise
X_lens.append(len(np_ver)) #remodel = hmm.GaussianHMM(n_components=N, covariance_type="spherical", n_iter=500) #spherical, diag full,tied
remodel = hmm.GaussianHMM(n_components=N, covariance_type="full", n_iter=500)
remodel.fit(X,X_lens)
#joblib.dump(remodel, FILE_MODEL) file_model = open(FILE_MODEL2, 'wb')
pickle.dump(remodel, file_model)
file_model.close() return remodel def test(remodel, domain_list):
x=[]
y=[]
for domain in domain_list:
domain_ver=domain2ver(domain)
np_ver = np.array(domain_ver)
pro = remodel.score(np_ver)
print "SCORE:(%d) DOMAIN:(%s) " % (pro, domain)
x.append(len(domain))
y.append(pro)
return x,y if __name__ == '__main__':
domain_list = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_cdn")
domain_list2 = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_white_like")
#remodel=train_hmm(domain_list)
remodel=train_hmm(domain_list+domain_list2)
#remodel=joblib.load(FILE_MODEL) x_1,y_1=test(remodel, domain_list)
print x_1
print y_1
#sys.exit(0)
domain_list = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_black")
x_2,y_2=test(remodel, domain_list)
print x_2
print y_2
domain_list = get_cdn_domains("/home/bonelee/latest_metadata_sample/labeled_white_like")
x_3,y_3=test(remodel, domain_list)
print x_3
print y_3
#%matplotlib inline
fig,ax=plt.subplots()
ax.set_xlabel('Domain Length')
ax.set_ylabel('HMM Score')
#ax.scatter(x_3,y_3,color='b',label="WHITE")
ax.scatter(x_2, y_2, color='g', label="DNS tunnel")
ax.scatter(x_1, y_1, color='r', label="CDN")
ax.legend(loc='right')
plt.show()

其中:X = [[0]],X_lens = [] 也可以按照下面方式进行读写。除去了冗余的初始化。

def train_hmm(domain_list):
if os.path.exists(FILE_MODEL2):
print "found model file, use it..."
file_model = open(FILE_MODEL2, 'rb')
model = pickle.load(file_model)
file_model.close()
return model #X = [[0]]
#X_lens = [1]
X = []
X_lens = []
#print X
for domain in domain_list:
ver=domain2ver(domain)
#np_ver = np.array(ver)
try:
#X=np.concatenate([X,np_ver])
X = X + ver
except ValueError:
print domain
print X
print ver
raise
X_lens.append(len(ver))
#remodel = hmm.GaussianHMM(n_components=N, covariance_type="spherical", n_iter=500) #spherical, diag full,tied
remodel = hmm.GaussianHMM(n_components=N, covariance_type="full", n_iter=500)
remodel.fit(X,X_lens)
#joblib.dump(remodel, FILE_MODEL) file_model = open(FILE_MODEL2, 'wb')
pickle.dump(remodel, file_model)
file_model.close() return remodel

hmm CDN检测的更多相关文章

  1. HMM XSS检测

    HMM XSS检测 转自:http://www.freebuf.com/articles/web/133909.html 前言 上篇我们介绍了HMM的基本原理以及常见的基于参数的异常检测实现,这次我们 ...

  2. 绕过CDN查找真实IP方法总结

    CDN的全称是Content Delivery Network,即内容分发网络.CDN是构建在现有网络基础之上的智能虚拟网络,依靠部署在各地的边缘服务器,通过中心平台的负载均衡.内容分发.调度等功能模 ...

  3. [转载]绕过CDN查找真实IP方法总结

    前言 类似备忘录形式记录一下,这里结合了几篇绕过CDN寻找真实IP的文章,总结一下绕过CDN查找真实的IP的方法 介绍 CDN的全称是Content Delivery Network,即内容分发网络. ...

  4. web渗透测试

    信息收集 网络搜索 目录遍历:site:域名 intitle:index.of 配置文件泄露:site:域名 ext:xml | ext:conf | ext:cnf | ext:reg | ext: ...

  5. 基于Python的渗透测试信息收集系统的设计和实现

    信息收集系统的设计和实现 渗透测试是保卫网络安全的一种有效且必要的技术手段,而渗透测试的本质就是信息收集,信息搜集整理可为后续的情报跟进提供强大的保证,目标资产信息搜集的广度,决定渗透过程的复杂程度, ...

  6. 脚本检测CDN节点资源是否与源站资源一致

    需求: 1.所有要检测的资源url放到一个单独文件中 2.检测cdn节点资源大小与源站文件大小是否一致 3.随机抽查几个资源,检查md5sum是否一致 4.使用多线程,可配置线程数 代码目录: hex ...

  7. 简单检测CDN链接是否有效

    CDN链接经常是使用的.但是,CDN链接挂了怎么办,因此,就要调用使用本站点的库,那么怎么实现呢? 检测CDN的jquery链接是否有效(这种方法比较简单) <script src=" ...

  8. 大数据DDos检测——DDos攻击本质上是时间序列数据,t+1时刻的数据特点和t时刻强相关,因此用HMM或者CRF来做检测是必然! 和一个句子的分词算法CRF没有区别!

    DDos攻击本质上是时间序列数据,t+1时刻的数据特点和t时刻强相关,因此用HMM或者CRF来做检测是必然!——和一个句子的分词算法CRF没有区别!注:传统DDos检测直接基于IP数据发送流量来识别, ...

  9. 基于机器学习的web异常检测——基于HMM的状态序列建模,将原始数据转化为状态机表示,然后求解概率判断异常与否

    基于机器学习的web异常检测 from: https://jaq.alibaba.com/community/art/show?articleid=746 Web防火墙是信息安全的第一道防线.随着网络 ...

随机推荐

  1. BZOJ 1116 并查集

    思路: 如果 每个联通块 边数>=点数 就OK 用并查集搞 //By SiriusRen #include <cstdio> #include <cstring> #in ...

  2. java中字符串比较==和equals

    1 总体来说java中字符串的比较是==比较引用,equals 比较值的做法.(equals 对于其他引用类型比较的是地址,这是因为object的equals方法比较的是引用),但是不同的声明方法字符 ...

  3. 每条sql语句实际上都是一个事物(事物多种类型解读)

    事务(数据库引擎) 事务是作为单个逻辑工作单元执行的一系列操作.一个逻辑工作单元必须有四个属性,称为原子性.一致性.隔离性和持久性 (ACID) 属性,只有这样才能成为一个事务.原子性事务必须是原子工 ...

  4. How to share memory between services and user processes?

    除了必要的InitializeSecurityDescriptor和SetSecurityDescriptorDacl, 内存映射文件名必须GLOBAL开头.

  5. mysql5.5和5.6版本更新内容

    mysql 5.5,5.6 比5.1改进地方: 1,5.5默认存储引擎为innodb2,5.5增加cpu多核处理能力:innodb_read_io_threads innodb_write_io_th ...

  6. Walking on the path of Redis --- Introduction and Installation

    废话开篇 以前从来没听说过有Redis这么个玩意,无意间看到一位仁兄的博客,才对其有所了解,所以决定对其深入了解下.有不对的地方还请各位指正. Redis介绍 下面是官方的介绍,不喜欢english的 ...

  7. VFS文件系统结构分析 与socket

    本文乃fireaxe原创,使用GPL发布,可以自由拷贝,转载.但转载请保持文档的完整性,并注明原作者及原链接.内容可任意使用,但对因使用该内容引起的后果不做任何保证. 作者:fireaxe_hq@ho ...

  8. PhotoZoom放大的图片效果怎么样?清不清晰?

    PhotoZoom是一款使用了革命性技术.效果最好的图像无损放大工具.它可以对图片进行放大而没有锯齿,不会失真,让您无与伦比完美放大图像质量. PhotoZoom Pro使用了S-Spline Max ...

  9. idea中git的运用

    创建本地 Git 仓库 安装 Git 插件 将代码添加到 Git 的本地仓库 在 GitHub 中创建仓库

  10. vs code格式化代码快捷键

    windows:shift+alt+F ubuntu: ctrl+shift+i