import scapy.all as scapy
import sys
import os
import socket
import csv
import time
import queue
import threading
from tqdm import tqdm
"""
python pcap_aa.py ./pcap 12.35.45.84:80
"""
def monitor(all_length, q, spe_time=0.5):
'''
监控当前任务进度monitor
:param all_length: 总长度
:param q: 队列
:param spe_time: 监控间隔时间
:return:
'''
try:
t = tqdm(total=all_length)
step = 0
while True:
finish_length = all_length - q.qsize()
add_length = finish_length - step
step += add_length
t.update(add_length)
time.sleep(spe_time)
if q.empty():
final_step = all_length - finish_length
t.update(final_step)
break
except:
pass
class ThreadNum(threading.Thread):
def __init__(self, raw_queue, msg_queue, server_ip, server_port):
threading.Thread.__init__(self)
self.raw_queue = raw_queue
self.msg_queue = msg_queue
self.server_ip = server_ip
self.server_port = server_port
def run(self):
while not self.raw_queue.empty():
info = self.raw_queue.get()
sender(info, self.msg_queue, server_ip, server_port)
def sender(info, msg_queue, server_ip, server_port):
filename,num,raw = info
try:
s = socket.socket()
s.connect((server_ip, server_port))
s.settimeout(5)
s.send(raw)
data = s.recv(4096)
info.append(data)
except Exception as e:
info.append('Exception: ' + str(e))
msg_queue.put(info)
def main(filepath, server_ip, server_port):
THREAD_NUM = 10
threads = []
# 获取所有的pcap包,并放到queue中
raw_queue = queue.Queue()
msg_queue = queue.Queue()
files = get_all_files(filepath)
for _file in files:
# 获取所有流,并放入到queue中
analysis_pcap(_file,raw_queue)
if raw_queue.qsize() < THREAD_NUM:
THREAD_NUM = raw_queue.qsize()
length = raw_queue.qsize()
try:
threading.Thread(target=monitor, args=(length, raw_queue)).start()
except:
print('监控进程挂了。。。')
for i in range(THREAD_NUM):
t = ThreadNum(raw_queue, msg_queue,server_ip,server_port)
t.setDaemon(True)
threads.append(t)
for i in threads:
i.start()
time.sleep(0.1)
for i in threads: i.join()
headers = ['pcap', 'num', 'raw', 'result']
rows = []
_dict = {}
# for file in info.keys():
output_filename = time.strftime('%Y%m%d_%Hh%Mm%Ss_report.csv',time.localtime(time.time()))
print('[+] 正在输出文件:%s '%output_filename)
f = open(output_filename,'w',newline='')
f_scv = csv.writer(f)
f_scv.writerow(headers)
while not msg_queue.empty():
# f_scv.writerow(msg_queue.get())
pcap,num,raw,result = msg_queue.get()
if pcap in _dict:
_dict[pcap].append([num,raw,result])
else:
_dict[pcap] = [[num,raw,result]]
for pcap in _dict.keys():
infos = _dict[pcap]
for num,raw,result in infos:
try:
f_scv.writerow([pcap,num,raw,result])
except:
f_scv.writerow([str(pcap),str(num),'write file exception','write file exception'])
f.close()
def get_all_files(filepath):
files = []
for parent, dirnames, filenames in os.walk(filepath, followlinks=True):
for filename in filenames:
if '.pcap' in filename:
file_path = os.path.join(parent, filename)
files.append(file_path)
return files
def analysis_pcap(pcap_path,raw_queue):
'''
解析pcap包,拿到所有的flow
:param pcap_path
:param raw_queue
'''
count = 0
try:
packets = scapy.rdpcap(pcap_path)
except:
raw_queue.put([pcap_path,0,'Pcap Analysis Exception'])
return
for i in packets:
try:
raw = i.load
# 判断是否为\x00,如果是的话,那说明这里面的数据可能是TCP握手的数据
if not raw.replace(b'\x00', b''): # 如果只有\x00,那也不要这个数据
continue
raw_queue.put([pcap_path,count,raw])
except Exception as e:
# 如果抛异常了,说明没有raw,那就向下个循环走
continue
count += 1
if __name__ == '__main__':
msg = '''
功能: 将pcap包中的tcp流按顺序提取出来,并发送到特定的机器上
注意点:
1. 会将所有tcp流进行发送,每一个tcp都是一个新的请求(如果利用需要或检测需要上下文,那么则不支持)
2. 一个pcap可能有多条流,不论是请求还是响应 或者是其他类型的数据,都会发送,且被reset后,不影响这个pcap包的其他流
3. 会输出csv文件,里面记录了每一条流的结果,可供分析
4. 由于多线程是按流进行,所以最终csv排序,单一pcap的流会乱序
5. 跑太多文件时需要注意,由于读数据、写csv都是内存操作,太大量可能会导致崩溃
Usage: python3 %s FILEPATH SERVER_IP SERVER_PORT
Example:
python3 %s d:/pcap/ 10.100.12.1 8001 // 将d:/pcap里的所有pcap包解析并发送到10.100.12.1:8001
Requirement:
python3.+
scapy
tqdm
''' % (sys.argv[0], sys.argv[0])
if len(sys.argv) < 4:
print(msg)
exit(1)
filepath = sys.argv[1]
server_ip = sys.argv[2]
server_port = int(sys.argv[3])
main(filepath, server_ip, server_port)
向服务器发送pcap包
猜你喜欢
转载自blog.csdn.net/qq_39306128/article/details/123050490
今日推荐
周排行