HashPayloadPcapReader
package net.ripe.hadoop.pcap; import java.io.DataInputStream;
import java.io.IOException; import com.google.common.hash.Hashing; import net.ripe.hadoop.pcap.packet.HashPayloadPacket;
import net.ripe.hadoop.pcap.packet.Packet; public class HashPayloadPcapReader extends PcapReader {
public HashPayloadPcapReader(DataInputStream is) throws IOException {
super(is);
} @Override
protected Packet createPacket() {
return new HashPayloadPacket();
} @Override
protected boolean isReassemble() {
return true;
} @Override
protected boolean isPush() {
return false;
} @Override
protected void processPacketPayload(Packet packet, byte[] payload) {
if (payload.length > 0) {
packet.put(HashPayloadPacket.PAYLOAD_SHA1_HASH, Hashing.sha1().hashBytes(payload).toString());
packet.put(HashPayloadPacket.PAYLOAD_SHA256_HASH, Hashing.sha256().hashBytes(payload).toString());
packet.put(HashPayloadPacket.PAYLOAD_SHA512_HASH, Hashing.sha512().hashBytes(payload).toString());
packet.put(HashPayloadPacket.PAYLOAD_MD5_HASH, Hashing.md5().hashBytes(payload).toString());
}
}
}
HttpPcapReader
package net.ripe.hadoop.pcap; import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.LinkedList; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.http.Header;
import org.apache.http.HttpClientConnection;
import org.apache.http.HttpException;
import org.apache.http.HttpRequest;
import org.apache.http.HttpRequestFactory;
import org.apache.http.HttpResponse;
import org.apache.http.HttpResponseFactory;
import org.apache.http.impl.DefaultHttpRequestFactory;
import org.apache.http.impl.DefaultHttpResponseFactory;
import org.apache.http.impl.conn.DefaultClientConnection;
import org.apache.http.impl.io.AbstractSessionInputBuffer;
import org.apache.http.impl.io.AbstractSessionOutputBuffer;
import org.apache.http.impl.io.DefaultHttpRequestParser;
import org.apache.http.impl.io.DefaultHttpResponseParser;
import org.apache.http.io.HttpMessageParser;
import org.apache.http.io.SessionInputBuffer;
import org.apache.http.io.SessionOutputBuffer;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.HttpParams; import com.google.common.base.Joiner; import net.ripe.hadoop.pcap.packet.HttpPacket;
import net.ripe.hadoop.pcap.packet.Packet; public class HttpPcapReader extends PcapReader{
public static final Log LOG = LogFactory.getLog(HttpPcapReader.class); public static final int HTTP_PORT = 80;
public static final String HEADER_PREFIX = "header_"; private HttpParams params = new BasicHttpParams();
private HttpRequestFactory reqFactory = new DefaultHttpRequestFactory();
private HttpResponseFactory respFactory = new DefaultHttpResponseFactory(); public HttpPcapReader(DataInputStream is) throws IOException {
super(is);
} @Override
protected Packet createPacket() {
return new HttpPacket();
} @Override
protected boolean isReassemble() {
return true;
} @Override
protected boolean isPush() {
return false;
} @Override
protected void processPacketPayload(Packet packet, final byte[] payload) {
HttpPacket httpPacket = (HttpPacket)packet;
Integer srcPort = (Integer)packet.get(Packet.SRC_PORT);
Integer dstPort = (Integer)packet.get(Packet.DST_PORT);
if ((HTTP_PORT == srcPort || HTTP_PORT == dstPort) &&
packet.containsKey(Packet.REASSEMBLED_FRAGMENTS) &&
PROTOCOL_TCP.equals(packet.get(Packet.PROTOCOL))) {
final SessionInputBuffer inBuf = new AbstractSessionInputBuffer() {
{
init(new ByteArrayInputStream(payload), 1024, params);
} @Override
public boolean isDataAvailable(int timeout) throws IOException {
return true;
}
};
final SessionOutputBuffer outBuf = new AbstractSessionOutputBuffer() {}; if (HTTP_PORT == srcPort) {
HttpMessageParser<HttpResponse> parser = new DefaultHttpResponseParser(inBuf, null, respFactory, params); HttpClientConnection conn = new DefaultClientConnection() {
{
init(inBuf, outBuf, params);
} @Override
protected void assertNotOpen() {} @Override
protected void assertOpen() {}
}; try {
HttpResponse response = parser.parse();
conn.receiveResponseEntity(response);
propagateHeaders(httpPacket, response.getAllHeaders());
} catch (IOException e) {
LOG.error("IOException when decoding HTTP response", e);
} catch (HttpException e) {
LOG.error("HttpException when decoding HTTP response", e);
}
} else if (HTTP_PORT == dstPort) {
HttpMessageParser<HttpRequest> parser = new DefaultHttpRequestParser(inBuf, null, reqFactory, params);
try {
HttpRequest request = parser.parse();
propagateHeaders(httpPacket, request.getAllHeaders());
} catch (IOException e) {
LOG.error("IOException when decoding HTTP request", e);
} catch (HttpException e) {
LOG.error("HttpException when decoding HTTP request", e);
}
}
}
} private void propagateHeaders(HttpPacket packet, Header[] headers) {
LinkedList<String> headerKeys = new LinkedList<String>();
for (Header header : headers) {
String headerKey = HEADER_PREFIX + header.getName().toLowerCase();
packet.put(headerKey, header.getValue());
}
packet.put(HttpPacket.HTTP_HEADERS, Joiner.on(',').join(headerKeys));
}
}
DnsPcapReader
package net.ripe.hadoop.pcap; import java.io.DataInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import net.ripe.hadoop.pcap.packet.DnsPacket;
import net.ripe.hadoop.pcap.packet.Packet; import org.xbill.DNS.Header;
import org.xbill.DNS.Message;
import org.xbill.DNS.Opcode;
import org.xbill.DNS.Rcode;
import org.xbill.DNS.Record;
import org.xbill.DNS.Section;
import org.xbill.DNS.Flags; public class DnsPcapReader extends PcapReader {
public static final int DNS_PORT = 53; public DnsPcapReader(DataInputStream is) throws IOException {
super(is);
} @Override
protected Packet createPacket() {
return new DnsPacket();
} @Override
protected boolean isReassemble() {
return true;
} @Override
protected boolean isPush() {
return false;
} @Override
protected void processPacketPayload(Packet packet, byte[] payload) {
DnsPacket dnsPacket = (DnsPacket)packet; if (DNS_PORT == (Integer)packet.get(Packet.SRC_PORT) || DNS_PORT == (Integer)packet.get(Packet.DST_PORT)) {
if (PROTOCOL_TCP.equals(packet.get(Packet.PROTOCOL)) &&
payload.length > 2) // TODO Support DNS responses with multiple messages (as used for XFRs)
payload = Arrays.copyOfRange(payload, 2, payload.length); // First two bytes denote the size of the DNS message, ignore them
try {
Message msg = new Message(payload);
Header header = msg.getHeader();
dnsPacket.put(DnsPacket.QUERYID, header.getID());
dnsPacket.put(DnsPacket.FLAGS, header.printFlags());
dnsPacket.put(DnsPacket.QR, header.getFlag(Flags.QR));
dnsPacket.put(DnsPacket.OPCODE, Opcode.string(header.getOpcode()));
dnsPacket.put(DnsPacket.RCODE, Rcode.string(header.getRcode()));
dnsPacket.put(DnsPacket.QUESTION, convertRecordToString(msg.getQuestion()));
dnsPacket.put(DnsPacket.QNAME, convertRecordOwnerToString(msg.getQuestion()));
dnsPacket.put(DnsPacket.QTYPE, convertRecordTypeToInt(msg.getQuestion()));
dnsPacket.put(DnsPacket.ANSWER, convertRecordsToStrings(msg.getSectionArray(Section.ANSWER)));
dnsPacket.put(DnsPacket.AUTHORITY, convertRecordsToStrings(msg.getSectionArray(Section.AUTHORITY)));
dnsPacket.put(DnsPacket.ADDITIONAL, convertRecordsToStrings(msg.getSectionArray(Section.ADDITIONAL)));
} catch (Exception e) {
// If we cannot decode a DNS packet we ignore it
}
}
} private String convertRecordToString(Record record) {
if (record == null)
return null; String recordString = record.toString();
recordString = normalizeRecordString(recordString);
return recordString;
} private String convertRecordOwnerToString(Record record) {
if (record == null)
return null;
String ownerString = record.getName().toString();
ownerString = ownerString.toLowerCase();
return ownerString;
} private int convertRecordTypeToInt(Record record) {
if (record == null)
return -1;
return record.getType();
} private List<String> convertRecordsToStrings(Record[] records) {
if (records == null)
return null; ArrayList<String> retVal = new ArrayList<String>(records.length);
for (Record record : records)
retVal.add(convertRecordToString(record));
return retVal;
} protected String normalizeRecordString(String recordString) {
if (recordString == null)
return null; // Reduce everything that is more than one whitespace to a single whitespace
recordString = recordString.replaceAll("\\s{2,}", " ");
// Replace tabs with a single whitespace
recordString = recordString.replaceAll("\\t{1,}", " ");
return recordString;
}
}
HashPayloadPcapReader的更多相关文章
随机推荐
- python爬虫之小说爬取
废话不多说,直接进入正题. 今天我要爬取的网站是起点中文网,内容是一部小说. 首先是引入库 from urllib.request import urlopen from bs4 import Bea ...
- iOS-cocoapods安装与使用以及常见错误
前言 CocoaPods是一个负责管理iOS项目中第三方开源代码的工具. 二.安装由于网上的教程基本都大同小异,但细节之处还不是很完善,所以借机会在这里补充下:注:要使用CocoaPods,那就要下载 ...
- Electron应用使用electron-builder配合electron-updater实现自动更新(windows + mac)
发客户端一定要做的就是自动更新模块,否则每次版本升级都是一个头疼的事.下面是Electron应用使用electron-builder配合electron-updater实现自动更新的解决方案. 1.安 ...
- 以C语言为例的程序性能优化 --《深入理解计算机系统》第五章读书笔记
其实大多数的编译器本身就能提供一些简单的优化,比如gcc就能通过使用 -O2 或者 -O3 的选项来优化程序.但编译器的优化始终也是有限,因为它必须小心翼翼保证优化过程不对程序的功能有改动.故而程序员 ...
- Community Stories: Cinemachine and Timeline——Community Stories: Cinemachine and Timeline
Community Stories: Cinemachine and Timeline 社区故事:Cinemachine 和 Timeline Adam Myhill, 八月 25, 2017 原文: ...
- 分析AJAX抓取今日头条的街拍美图并把信息存入mongodb中
今天学习分析ajax 请求,现把学得记录, 把我们在今日头条搜索街拍美图的时候,今日头条会发起ajax请求去请求图片,所以我们在网页源码中不能找到图片的url,但是今日头条网页中有一个json 文件, ...
- web框架之Django基础
1. Django的简介 Django是一个由python写成的开放源代码的Web应用框架. Django的目的是使常见的Web开发任务,快速和容易. 2. Django框架的特点 1. 遵循MVC开 ...
- 《Python网络编程》学习笔记--从例子中收获的计算机网络相关知识
从之前笔记的四个程序中(http://www.cnblogs.com/take-fetter/p/8278864.html),我们可以看出分别使用了谷歌地理编码API(对URL表示地理信息查询和如何获 ...
- C 洛谷 P3599 Koishi Loves Construction [构造 打表观察]
题目描述 Koishi决定走出幻想乡成为数学大师! Flandre听说她数学学的很好,就给Koishi出了这样一道构造题: Task1:试判断能否构造并构造一个长度为的的排列,满足其个前缀和在模的意义 ...
- ERROR 1044 (42000): Access denied for user ''@'localhost' to database 'mysql' mysql> use mysql
show databases;select user,password,host from user;我们想通过 查看存在"mysql"数据库中的user表来查看我们的msql数据 ...