Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/python-3.x/16.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
使用python3中的字典从pcap文件计算数据包流摘要_Python_Python 3.x_Parsing_Pcap_Dpkt - Fatal编程技术网

使用python3中的字典从pcap文件计算数据包流摘要

使用python3中的字典从pcap文件计算数据包流摘要,python,python-3.x,parsing,pcap,dpkt,Python,Python 3.x,Parsing,Pcap,Dpkt,这就是我目前从pcap文件中提取的信息: import datetime import time import sys import dpkt import socket def inet_to_str(inet): """Convert inet object to a string Args: inet (inet struct): inet network address Returns:

这就是我目前从pcap文件中提取的信息:

import datetime
import time
import sys
import dpkt
import socket

def inet_to_str(inet):
    """Convert inet object to a string

        Args:
            inet (inet struct): inet network address
        Returns:
            str: Printable/readable IP address
    """
    # First try ipv4 and then ipv6
    try:
        return socket.inet_ntop(socket.AF_INET, inet)
    except ValueError:
        return socket.inet_ntop(socket.AF_INET6, inet)

def print_packet_info(ts, src_ip, src_port, dst_ip, dst_port, protocol, pkt_len, ttl):
    ipcounter = 0
    totalbytes = 0
    average = 0
    try:
        sys.argv[1]
        dmp_file = sys.argv[1]
    except (Exception) as e:
        print ('Error: please supply pcap filename!\n')
        return
    with open(dmp_file, 'rb') as f:
    
        pcap = dpkt.pcap.Reader(f)
    
        for ts, buf in pcap:

        # get UTC timestamp, source/destination IP/port
                utc_timestamp = str(datetime.datetime.utcfromtimestamp(ts))

                eth = dpkt.ethernet.Ethernet(buf)
                
                

                if not isinstance(eth.data, dpkt.ip.IP):
                    print ('Non IP Packet type not supported %s\n' % eth.data.__class__.__name__)
                    continue
                ip = eth.data
                ipcounter+=1
                tcp = ip.data

                src_ip = ip.src
                src_port = tcp.sport
                dst_ip = ip.dst
                dst_port = tcp.sport
                pkt_len = ip.len
                totalbytes = totalbytes + pkt_len
                ttl = ip.ttl
                
                protocol_number = ip.p
                
                if protocol_number == 17:
                    protocol = 'udp'
                elif protocol == 6:
                    protocol = 'tcp'
                    
                #print (protocol)

            # create log line
                log_line = '[%s] - %s:%s -> %s:%s (%s, len=%d, ttl=%d)' % \
                (utc_timestamp, inet_to_str(src_ip), src_port, inet_to_str(dst_ip), dst_port, protocol, pkt_len, ttl)

                print (log_line)
 
    average = totalbytes/ipcounter
    print ('\n------------------- SUMMARY -------------------\n')
    print ('Total packets:', ipcounter)
    print ('Total bytes (bytes):', totalbytes)
    print ('Average packet size (bytes):', int(average))
    print ('\n-----------------------------------------------')

def run_example():

    global total_packet_count, total_bytes_count

   # try:
        #sys.argv[1]
        #dmp_file = sys.argv[1]
        #fp_dmp_file = open(dmp_file) 
 
    #except (Exception) as e:
        #print ('Error: please supply pcap filename!\n')
        #return
    ts = time.time()
    src_ip = '1.2.3.4'
    src_port = 99
    dst_ip = '5.6.7.8'
    dst_port = 199
    protocol = "tcp"
    pkt_len = 1000
    ttl = 64

    print_packet_info(ts, src_ip, src_port, dst_ip, dst_port, protocol, pkt_len, ttl)

if __name__ == '__main__':
    run_example()
所以,现在我应该创建一个包的流摘要,它必须看起来像。 . 因此,程序必须能够将数据包分组到一个流中,该流计算传输的数据包总数、数据包总数、开始时间和使用的协议(TCP或UDP)。此外,还包括目标和源IP和端口。 我尝试使用字典,但由于我是python新手,我发现它很难实现

任何帮助都将不胜感激