package net.ripe.hadoop.pcap;

import java.io.DataInputStream;
import java.io.IOException; import com.google.common.hash.Hashing; import net.ripe.hadoop.pcap.packet.HashPayloadPacket;
import net.ripe.hadoop.pcap.packet.Packet; public class HashPayloadPcapReader extends PcapReader {
public HashPayloadPcapReader(DataInputStream is) throws IOException {
super(is);
} @Override
protected Packet createPacket() {
return new HashPayloadPacket();
} @Override
protected boolean isReassemble() {
return true;
} @Override
protected boolean isPush() {
return false;
} @Override
protected void processPacketPayload(Packet packet, byte[] payload) {
if (payload.length > 0) {
packet.put(HashPayloadPacket.PAYLOAD_SHA1_HASH, Hashing.sha1().hashBytes(payload).toString());
packet.put(HashPayloadPacket.PAYLOAD_SHA256_HASH, Hashing.sha256().hashBytes(payload).toString());
packet.put(HashPayloadPacket.PAYLOAD_SHA512_HASH, Hashing.sha512().hashBytes(payload).toString());
packet.put(HashPayloadPacket.PAYLOAD_MD5_HASH, Hashing.md5().hashBytes(payload).toString());
}
}
}

  

HttpPcapReader

package net.ripe.hadoop.pcap;

import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.LinkedList; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.http.Header;
import org.apache.http.HttpClientConnection;
import org.apache.http.HttpException;
import org.apache.http.HttpRequest;
import org.apache.http.HttpRequestFactory;
import org.apache.http.HttpResponse;
import org.apache.http.HttpResponseFactory;
import org.apache.http.impl.DefaultHttpRequestFactory;
import org.apache.http.impl.DefaultHttpResponseFactory;
import org.apache.http.impl.conn.DefaultClientConnection;
import org.apache.http.impl.io.AbstractSessionInputBuffer;
import org.apache.http.impl.io.AbstractSessionOutputBuffer;
import org.apache.http.impl.io.DefaultHttpRequestParser;
import org.apache.http.impl.io.DefaultHttpResponseParser;
import org.apache.http.io.HttpMessageParser;
import org.apache.http.io.SessionInputBuffer;
import org.apache.http.io.SessionOutputBuffer;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.HttpParams; import com.google.common.base.Joiner; import net.ripe.hadoop.pcap.packet.HttpPacket;
import net.ripe.hadoop.pcap.packet.Packet; public class HttpPcapReader extends PcapReader{
public static final Log LOG = LogFactory.getLog(HttpPcapReader.class); public static final int HTTP_PORT = 80;
public static final String HEADER_PREFIX = "header_"; private HttpParams params = new BasicHttpParams();
private HttpRequestFactory reqFactory = new DefaultHttpRequestFactory();
private HttpResponseFactory respFactory = new DefaultHttpResponseFactory(); public HttpPcapReader(DataInputStream is) throws IOException {
super(is);
} @Override
protected Packet createPacket() {
return new HttpPacket();
} @Override
protected boolean isReassemble() {
return true;
} @Override
protected boolean isPush() {
return false;
} @Override
protected void processPacketPayload(Packet packet, final byte[] payload) {
HttpPacket httpPacket = (HttpPacket)packet;
Integer srcPort = (Integer)packet.get(Packet.SRC_PORT);
Integer dstPort = (Integer)packet.get(Packet.DST_PORT);
if ((HTTP_PORT == srcPort || HTTP_PORT == dstPort) &&
packet.containsKey(Packet.REASSEMBLED_FRAGMENTS) &&
PROTOCOL_TCP.equals(packet.get(Packet.PROTOCOL))) {
final SessionInputBuffer inBuf = new AbstractSessionInputBuffer() {
{
init(new ByteArrayInputStream(payload), 1024, params);
} @Override
public boolean isDataAvailable(int timeout) throws IOException {
return true;
}
};
final SessionOutputBuffer outBuf = new AbstractSessionOutputBuffer() {}; if (HTTP_PORT == srcPort) {
HttpMessageParser<HttpResponse> parser = new DefaultHttpResponseParser(inBuf, null, respFactory, params); HttpClientConnection conn = new DefaultClientConnection() {
{
init(inBuf, outBuf, params);
} @Override
protected void assertNotOpen() {} @Override
protected void assertOpen() {}
}; try {
HttpResponse response = parser.parse();
conn.receiveResponseEntity(response);
propagateHeaders(httpPacket, response.getAllHeaders());
} catch (IOException e) {
LOG.error("IOException when decoding HTTP response", e);
} catch (HttpException e) {
LOG.error("HttpException when decoding HTTP response", e);
}
} else if (HTTP_PORT == dstPort) {
HttpMessageParser<HttpRequest> parser = new DefaultHttpRequestParser(inBuf, null, reqFactory, params);
try {
HttpRequest request = parser.parse();
propagateHeaders(httpPacket, request.getAllHeaders());
} catch (IOException e) {
LOG.error("IOException when decoding HTTP request", e);
} catch (HttpException e) {
LOG.error("HttpException when decoding HTTP request", e);
}
}
}
} private void propagateHeaders(HttpPacket packet, Header[] headers) {
LinkedList<String> headerKeys = new LinkedList<String>();
for (Header header : headers) {
String headerKey = HEADER_PREFIX + header.getName().toLowerCase();
packet.put(headerKey, header.getValue());
}
packet.put(HttpPacket.HTTP_HEADERS, Joiner.on(',').join(headerKeys));
}
}

  DnsPcapReader

package net.ripe.hadoop.pcap;

import java.io.DataInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import net.ripe.hadoop.pcap.packet.DnsPacket;
import net.ripe.hadoop.pcap.packet.Packet; import org.xbill.DNS.Header;
import org.xbill.DNS.Message;
import org.xbill.DNS.Opcode;
import org.xbill.DNS.Rcode;
import org.xbill.DNS.Record;
import org.xbill.DNS.Section;
import org.xbill.DNS.Flags; public class DnsPcapReader extends PcapReader {
public static final int DNS_PORT = 53; public DnsPcapReader(DataInputStream is) throws IOException {
super(is);
} @Override
protected Packet createPacket() {
return new DnsPacket();
} @Override
protected boolean isReassemble() {
return true;
} @Override
protected boolean isPush() {
return false;
} @Override
protected void processPacketPayload(Packet packet, byte[] payload) {
DnsPacket dnsPacket = (DnsPacket)packet; if (DNS_PORT == (Integer)packet.get(Packet.SRC_PORT) || DNS_PORT == (Integer)packet.get(Packet.DST_PORT)) {
if (PROTOCOL_TCP.equals(packet.get(Packet.PROTOCOL)) &&
payload.length > 2) // TODO Support DNS responses with multiple messages (as used for XFRs)
payload = Arrays.copyOfRange(payload, 2, payload.length); // First two bytes denote the size of the DNS message, ignore them
try {
Message msg = new Message(payload);
Header header = msg.getHeader();
dnsPacket.put(DnsPacket.QUERYID, header.getID());
dnsPacket.put(DnsPacket.FLAGS, header.printFlags());
dnsPacket.put(DnsPacket.QR, header.getFlag(Flags.QR));
dnsPacket.put(DnsPacket.OPCODE, Opcode.string(header.getOpcode()));
dnsPacket.put(DnsPacket.RCODE, Rcode.string(header.getRcode()));
dnsPacket.put(DnsPacket.QUESTION, convertRecordToString(msg.getQuestion()));
dnsPacket.put(DnsPacket.QNAME, convertRecordOwnerToString(msg.getQuestion()));
dnsPacket.put(DnsPacket.QTYPE, convertRecordTypeToInt(msg.getQuestion()));
dnsPacket.put(DnsPacket.ANSWER, convertRecordsToStrings(msg.getSectionArray(Section.ANSWER)));
dnsPacket.put(DnsPacket.AUTHORITY, convertRecordsToStrings(msg.getSectionArray(Section.AUTHORITY)));
dnsPacket.put(DnsPacket.ADDITIONAL, convertRecordsToStrings(msg.getSectionArray(Section.ADDITIONAL)));
} catch (Exception e) {
// If we cannot decode a DNS packet we ignore it
}
}
} private String convertRecordToString(Record record) {
if (record == null)
return null; String recordString = record.toString();
recordString = normalizeRecordString(recordString);
return recordString;
} private String convertRecordOwnerToString(Record record) {
if (record == null)
return null;
String ownerString = record.getName().toString();
ownerString = ownerString.toLowerCase();
return ownerString;
} private int convertRecordTypeToInt(Record record) {
if (record == null)
return -1;
return record.getType();
} private List<String> convertRecordsToStrings(Record[] records) {
if (records == null)
return null; ArrayList<String> retVal = new ArrayList<String>(records.length);
for (Record record : records)
retVal.add(convertRecordToString(record));
return retVal;
} protected String normalizeRecordString(String recordString) {
if (recordString == null)
return null; // Reduce everything that is more than one whitespace to a single whitespace
recordString = recordString.replaceAll("\\s{2,}", " ");
// Replace tabs with a single whitespace
recordString = recordString.replaceAll("\\t{1,}", " ");
return recordString;
}
}

  

HashPayloadPcapReader的更多相关文章

随机推荐

  1. MS SQL 监控数据/日志文件增长

        前几天,在所有数据库服务器部署了监控磁盘空间的存储过程和作业后(MS SQL 监控磁盘空间告警),今天突然收到了两封告警邮件,好吧,存储规划是一方面,但是,是不是要分析一下是什么原因造成磁盘空 ...

  2. java获取昨天的日期

    Calendar   cal   =   Calendar.getInstance();  cal.add(Calendar.DATE,   -1);  String yesterday = new ...

  3. java 网络编程之UDP通信和简单的群聊程序

    */ .hljs { display: block; overflow-x: auto; padding: 0.5em; color: #333; background: #f8f8f8; } .hl ...

  4. CSS常用字体名称

    CSS样式中常用的字体名称   css中引入字体: @font-face { font-family: "AncientWar"; src: url('style/css/font ...

  5. 【转】linux下设置ssh无密码登录

    ssh配置 主机A:10.0.5.199 主机B:10.0.5.198 需要配置主机A无密码登录主机A,主机B 先确保所有主机的防火墙处于关闭状态. 在主机A上执行如下: 1. $cd ~/.ssh ...

  6. 关于MyEclipse启动报错:Error starting static Resources;下面伴随Failed to start component [StandardServer[8005]]; A child container failed during start.的错误提示解决办法.

    最后才发现原因是Tomcat的server.xml配置文件有问题:apache-tomcat-7.0.67\conf的service.xml下边多了类似与 <Host appBase=" ...

  7. C程序结构

    从程序流程的角度来看,C语言中的语句可以分为3种基本结构:顺序结构.分支结构和循环结构. ① 顺序结构的执行过程如图5-1所示.在这种结构中,程序会顺序执行各条语句. ② 分支结构的执行过程如图5-2 ...

  8. 51NOD 1237 最大公约数之和 V3 [杜教筛]

    1237 最大公约数之和 V3 题意:求\(\sum_{i=1}^n\sum_{j=1}^n(i,j)\) 令\(A(n)=\sum_{i=1}^n(n,i) = \sum_{d\mid n}d \c ...

  9. strcpy和strcat易忽略点

    首先来看一段C程序: #include <stdio.h> #include <string.h> #include <stdlib.h> void GetMem( ...

  10. 使用scrapy爬取豆瓣上面《战狼2》影评

    这几天一直在学习scrapy框架,刚好学到了CrawlSpider和Rule的搭配使用,就想着要搞点事情练练手!!! 信息提取 算了,由于爬虫运行了好几次,太过分了,被封IP了,就不具体分析了,附上& ...