pktgen.c (2ade0c1d9d93b7642212657ef76f4a1e30233711) | pktgen.c (a8d764b9832d3cc86019f71916665dd2d337d7c2) |
---|---|
1/* 2 * Authors: 3 * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se> 4 * Uppsala University and 5 * Swedish University of Agricultural Sciences 6 * 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * Ben Greear <greearb@candelatech.com> --- 364 unchanged lines hidden (view full) --- 373 struct flow_state *flows; 374 unsigned cflows; /* Concurrent flows (config) */ 375 unsigned lflow; /* Flow length (config) */ 376 unsigned nflows; /* accumulated flows (stats) */ 377 unsigned curfl; /* current sequenced flow (state)*/ 378 379 u16 queue_map_min; 380 u16 queue_map_max; | 1/* 2 * Authors: 3 * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se> 4 * Uppsala University and 5 * Swedish University of Agricultural Sciences 6 * 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * Ben Greear <greearb@candelatech.com> --- 364 unchanged lines hidden (view full) --- 373 struct flow_state *flows; 374 unsigned cflows; /* Concurrent flows (config) */ 375 unsigned lflow; /* Flow length (config) */ 376 unsigned nflows; /* accumulated flows (stats) */ 377 unsigned curfl; /* current sequenced flow (state)*/ 378 379 u16 queue_map_min; 380 u16 queue_map_max; |
381 __u32 skb_priority; /* skb priority field */ |
|
381 int node; /* Memory node */ 382 383#ifdef CONFIG_XFRM 384 __u8 ipsmode; /* IPSEC mode (config) */ 385 __u8 ipsproto; /* IPSEC type (config) */ 386#endif 387 char result[512]; 388}; 389 390struct pktgen_hdr { 391 __be32 pgh_magic; 392 __be32 seq_num; 393 __be32 tv_sec; 394 __be32 tv_usec; 395}; 396 | 382 int node; /* Memory node */ 383 384#ifdef CONFIG_XFRM 385 __u8 ipsmode; /* IPSEC mode (config) */ 386 __u8 ipsproto; /* IPSEC type (config) */ 387#endif 388 char result[512]; 389}; 390 391struct pktgen_hdr { 392 __be32 pgh_magic; 393 __be32 seq_num; 394 __be32 tv_sec; 395 __be32 tv_usec; 396}; 397 |
398static bool pktgen_exiting __read_mostly; 399 |
|
397struct pktgen_thread { 398 spinlock_t if_lock; /* for list of devices */ 399 struct list_head if_list; /* All device here */ 400 struct list_head th_list; 401 struct task_struct *tsk; 402 char result[512]; 403 404 /* Field for thread to receive "posted" events terminate, --- 137 unchanged lines hidden (view full) --- 542 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, 543 pkt_dev->lflow); 544 545 seq_printf(seq, 546 " queue_map_min: %u queue_map_max: %u\n", 547 pkt_dev->queue_map_min, 548 pkt_dev->queue_map_max); 549 | 400struct pktgen_thread { 401 spinlock_t if_lock; /* for list of devices */ 402 struct list_head if_list; /* All device here */ 403 struct list_head th_list; 404 struct task_struct *tsk; 405 char result[512]; 406 407 /* Field for thread to receive "posted" events terminate, --- 137 unchanged lines hidden (view full) --- 545 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, 546 pkt_dev->lflow); 547 548 seq_printf(seq, 549 " queue_map_min: %u queue_map_max: %u\n", 550 pkt_dev->queue_map_min, 551 pkt_dev->queue_map_max); 552 |
553 if (pkt_dev->skb_priority) 554 seq_printf(seq, " skb_priority: %u\n", 555 pkt_dev->skb_priority); 556 |
|
550 if (pkt_dev->flags & F_IPV6) { 551 char b1[128], b2[128], b3[128]; 552 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr); 553 fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr); 554 fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr); 555 seq_printf(seq, 556 " saddr: %s min_saddr: %s max_saddr: %s\n", b1, 557 b2, b3); --- 1148 unchanged lines hidden (view full) --- 1706 pkt_dev->traffic_class = tmp_value; 1707 sprintf(pg_result, "OK: traffic_class=0x%02x", pkt_dev->traffic_class); 1708 } else { 1709 sprintf(pg_result, "ERROR: traffic_class must be 00-ff"); 1710 } 1711 return count; 1712 } 1713 | 557 if (pkt_dev->flags & F_IPV6) { 558 char b1[128], b2[128], b3[128]; 559 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr); 560 fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr); 561 fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr); 562 seq_printf(seq, 563 " saddr: %s min_saddr: %s max_saddr: %s\n", b1, 564 b2, b3); --- 1148 unchanged lines hidden (view full) --- 1713 pkt_dev->traffic_class = tmp_value; 1714 sprintf(pg_result, "OK: traffic_class=0x%02x", pkt_dev->traffic_class); 1715 } else { 1716 sprintf(pg_result, "ERROR: traffic_class must be 00-ff"); 1717 } 1718 return count; 1719 } 1720 |
1721 if (!strcmp(name, "skb_priority")) { 1722 len = num_arg(&user_buffer[i], 9, &value); 1723 if (len < 0) 1724 return len; 1725 1726 i += len; 1727 pkt_dev->skb_priority = value; 1728 sprintf(pg_result, "OK: skb_priority=%i", 1729 pkt_dev->skb_priority); 1730 return count; 1731 } 1732 |
|
1714 sprintf(pkt_dev->result, "No such parameter \"%s\"", name); 1715 return -EINVAL; 1716} 1717 1718static int pktgen_if_open(struct inode *inode, struct file *file) 1719{ 1720 return single_open(file, pktgen_if_show, PDE(inode)->data); 1721} --- 914 unchanged lines hidden (view full) --- 2636 skb = __netdev_alloc_skb(odev, 2637 pkt_dev->cur_pkt_size + 64 2638 + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT); 2639 2640 if (!skb) { 2641 sprintf(pkt_dev->result, "No memory"); 2642 return NULL; 2643 } | 1733 sprintf(pkt_dev->result, "No such parameter \"%s\"", name); 1734 return -EINVAL; 1735} 1736 1737static int pktgen_if_open(struct inode *inode, struct file *file) 1738{ 1739 return single_open(file, pktgen_if_show, PDE(inode)->data); 1740} --- 914 unchanged lines hidden (view full) --- 2655 skb = __netdev_alloc_skb(odev, 2656 pkt_dev->cur_pkt_size + 64 2657 + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT); 2658 2659 if (!skb) { 2660 sprintf(pkt_dev->result, "No memory"); 2661 return NULL; 2662 } |
2663 prefetchw(skb->data); |
|
2644 2645 skb_reserve(skb, datalen); 2646 2647 /* Reserve for ethernet and IP header */ 2648 eth = (__u8 *) skb_push(skb, 14); 2649 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 2650 if (pkt_dev->nr_labels) 2651 mpls_push(mpls, pkt_dev); --- 14 unchanged lines hidden (view full) --- 2666 vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); 2667 *vlan_encapsulated_proto = htons(ETH_P_IP); 2668 } 2669 2670 skb->network_header = skb->tail; 2671 skb->transport_header = skb->network_header + sizeof(struct iphdr); 2672 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); 2673 skb_set_queue_mapping(skb, queue_map); | 2664 2665 skb_reserve(skb, datalen); 2666 2667 /* Reserve for ethernet and IP header */ 2668 eth = (__u8 *) skb_push(skb, 14); 2669 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 2670 if (pkt_dev->nr_labels) 2671 mpls_push(mpls, pkt_dev); --- 14 unchanged lines hidden (view full) --- 2686 vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); 2687 *vlan_encapsulated_proto = htons(ETH_P_IP); 2688 } 2689 2690 skb->network_header = skb->tail; 2691 skb->transport_header = skb->network_header + sizeof(struct iphdr); 2692 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); 2693 skb_set_queue_mapping(skb, queue_map); |
2694 skb->priority = pkt_dev->skb_priority; 2695 |
|
2674 iph = ip_hdr(skb); 2675 udph = udp_hdr(skb); 2676 2677 memcpy(eth, pkt_dev->hh, 12); 2678 *(__be16 *) & eth[12] = protocol; 2679 2680 /* Eth + IPh + UDPh + mpls */ 2681 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - --- 299 unchanged lines hidden (view full) --- 2981 2982 skb = __netdev_alloc_skb(odev, 2983 pkt_dev->cur_pkt_size + 64 2984 + 16 + pkt_dev->pkt_overhead, GFP_NOWAIT); 2985 if (!skb) { 2986 sprintf(pkt_dev->result, "No memory"); 2987 return NULL; 2988 } | 2696 iph = ip_hdr(skb); 2697 udph = udp_hdr(skb); 2698 2699 memcpy(eth, pkt_dev->hh, 12); 2700 *(__be16 *) & eth[12] = protocol; 2701 2702 /* Eth + IPh + UDPh + mpls */ 2703 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - --- 299 unchanged lines hidden (view full) --- 3003 3004 skb = __netdev_alloc_skb(odev, 3005 pkt_dev->cur_pkt_size + 64 3006 + 16 + pkt_dev->pkt_overhead, GFP_NOWAIT); 3007 if (!skb) { 3008 sprintf(pkt_dev->result, "No memory"); 3009 return NULL; 3010 } |
3011 prefetchw(skb->data); |
|
2989 2990 skb_reserve(skb, 16); 2991 2992 /* Reserve for ethernet and IP header */ 2993 eth = (__u8 *) skb_push(skb, 14); 2994 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 2995 if (pkt_dev->nr_labels) 2996 mpls_push(mpls, pkt_dev); --- 14 unchanged lines hidden (view full) --- 3011 vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); 3012 *vlan_encapsulated_proto = htons(ETH_P_IPV6); 3013 } 3014 3015 skb->network_header = skb->tail; 3016 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); 3017 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); 3018 skb_set_queue_mapping(skb, queue_map); | 3012 3013 skb_reserve(skb, 16); 3014 3015 /* Reserve for ethernet and IP header */ 3016 eth = (__u8 *) skb_push(skb, 14); 3017 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 3018 if (pkt_dev->nr_labels) 3019 mpls_push(mpls, pkt_dev); --- 14 unchanged lines hidden (view full) --- 3034 vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); 3035 *vlan_encapsulated_proto = htons(ETH_P_IPV6); 3036 } 3037 3038 skb->network_header = skb->tail; 3039 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); 3040 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); 3041 skb_set_queue_mapping(skb, queue_map); |
3042 skb->priority = pkt_dev->skb_priority; |
|
3019 iph = ipv6_hdr(skb); 3020 udph = udp_hdr(skb); 3021 3022 memcpy(eth, pkt_dev->hh, 12); 3023 *(__be16 *) ð[12] = protocol; 3024 3025 /* Eth + IPh + UDPh + mpls */ 3026 datalen = pkt_dev->cur_pkt_size - 14 - --- 399 unchanged lines hidden (view full) --- 3426} 3427 3428static void pktgen_rem_thread(struct pktgen_thread *t) 3429{ 3430 /* Remove from the thread list */ 3431 3432 remove_proc_entry(t->tsk->comm, pg_proc_dir); 3433 | 3043 iph = ipv6_hdr(skb); 3044 udph = udp_hdr(skb); 3045 3046 memcpy(eth, pkt_dev->hh, 12); 3047 *(__be16 *) ð[12] = protocol; 3048 3049 /* Eth + IPh + UDPh + mpls */ 3050 datalen = pkt_dev->cur_pkt_size - 14 - --- 399 unchanged lines hidden (view full) --- 3450} 3451 3452static void pktgen_rem_thread(struct pktgen_thread *t) 3453{ 3454 /* Remove from the thread list */ 3455 3456 remove_proc_entry(t->tsk->comm, pg_proc_dir); 3457 |
3434 mutex_lock(&pktgen_thread_lock); 3435 3436 list_del(&t->th_list); 3437 3438 mutex_unlock(&pktgen_thread_lock); | |
3439} 3440 3441static void pktgen_resched(struct pktgen_dev *pkt_dev) 3442{ 3443 ktime_t idle_start = ktime_now(); 3444 schedule(); 3445 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); 3446} --- 58 unchanged lines hidden (view full) --- 3505 if (pkt_dev->delay && pkt_dev->last_ok) 3506 spin(pkt_dev, pkt_dev->next_tx); 3507 3508 queue_map = skb_get_queue_mapping(pkt_dev->skb); 3509 txq = netdev_get_tx_queue(odev, queue_map); 3510 3511 __netif_tx_lock_bh(txq); 3512 | 3458} 3459 3460static void pktgen_resched(struct pktgen_dev *pkt_dev) 3461{ 3462 ktime_t idle_start = ktime_now(); 3463 schedule(); 3464 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); 3465} --- 58 unchanged lines hidden (view full) --- 3524 if (pkt_dev->delay && pkt_dev->last_ok) 3525 spin(pkt_dev, pkt_dev->next_tx); 3526 3527 queue_map = skb_get_queue_mapping(pkt_dev->skb); 3528 txq = netdev_get_tx_queue(odev, queue_map); 3529 3530 __netif_tx_lock_bh(txq); 3531 |
3513 if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) { | 3532 if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) { |
3514 ret = NETDEV_TX_BUSY; 3515 pkt_dev->last_ok = 0; 3516 goto unlock; 3517 } 3518 atomic_inc(&(pkt_dev->skb->users)); 3519 ret = (*xmit)(pkt_dev->skb, odev); 3520 3521 switch (ret) { --- 55 unchanged lines hidden (view full) --- 3577 set_current_state(TASK_INTERRUPTIBLE); 3578 3579 set_freezable(); 3580 3581 while (!kthread_should_stop()) { 3582 pkt_dev = next_to_run(t); 3583 3584 if (unlikely(!pkt_dev && t->control == 0)) { | 3533 ret = NETDEV_TX_BUSY; 3534 pkt_dev->last_ok = 0; 3535 goto unlock; 3536 } 3537 atomic_inc(&(pkt_dev->skb->users)); 3538 ret = (*xmit)(pkt_dev->skb, odev); 3539 3540 switch (ret) { --- 55 unchanged lines hidden (view full) --- 3596 set_current_state(TASK_INTERRUPTIBLE); 3597 3598 set_freezable(); 3599 3600 while (!kthread_should_stop()) { 3601 pkt_dev = next_to_run(t); 3602 3603 if (unlikely(!pkt_dev && t->control == 0)) { |
3604 if (pktgen_exiting) 3605 break; |
|
3585 wait_event_interruptible_timeout(t->queue, 3586 t->control != 0, 3587 HZ/10); 3588 try_to_freeze(); 3589 continue; 3590 } 3591 3592 __set_current_state(TASK_RUNNING); --- 36 unchanged lines hidden (view full) --- 3629 pktgen_stop(t); 3630 3631 pr_debug("%s removing all device\n", t->tsk->comm); 3632 pktgen_rem_all_ifs(t); 3633 3634 pr_debug("%s removing thread\n", t->tsk->comm); 3635 pktgen_rem_thread(t); 3636 | 3606 wait_event_interruptible_timeout(t->queue, 3607 t->control != 0, 3608 HZ/10); 3609 try_to_freeze(); 3610 continue; 3611 } 3612 3613 __set_current_state(TASK_RUNNING); --- 36 unchanged lines hidden (view full) --- 3650 pktgen_stop(t); 3651 3652 pr_debug("%s removing all device\n", t->tsk->comm); 3653 pktgen_rem_all_ifs(t); 3654 3655 pr_debug("%s removing thread\n", t->tsk->comm); 3656 pktgen_rem_thread(t); 3657 |
3658 /* Wait for kthread_stop */ 3659 while (!kthread_should_stop()) { 3660 set_current_state(TASK_INTERRUPTIBLE); 3661 schedule(); 3662 } 3663 __set_current_state(TASK_RUNNING); 3664 |
|
3637 return 0; 3638} 3639 3640static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 3641 const char *ifname, bool exact) 3642{ 3643 struct pktgen_dev *p, *pkt_dev = NULL; 3644 size_t len = strlen(ifname); --- 258 unchanged lines hidden (view full) --- 3903} 3904 3905static void __exit pg_cleanup(void) 3906{ 3907 struct pktgen_thread *t; 3908 struct list_head *q, *n; 3909 3910 /* Stop all interfaces & threads */ | 3665 return 0; 3666} 3667 3668static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 3669 const char *ifname, bool exact) 3670{ 3671 struct pktgen_dev *p, *pkt_dev = NULL; 3672 size_t len = strlen(ifname); --- 258 unchanged lines hidden (view full) --- 3931} 3932 3933static void __exit pg_cleanup(void) 3934{ 3935 struct pktgen_thread *t; 3936 struct list_head *q, *n; 3937 3938 /* Stop all interfaces & threads */ |
3939 pktgen_exiting = true; |
|
3911 3912 list_for_each_safe(q, n, &pktgen_threads) { 3913 t = list_entry(q, struct pktgen_thread, th_list); 3914 kthread_stop(t->tsk); 3915 kfree(t); 3916 } 3917 3918 /* Un-register us from receiving netdevice events */ --- 22 unchanged lines hidden --- | 3940 3941 list_for_each_safe(q, n, &pktgen_threads) { 3942 t = list_entry(q, struct pktgen_thread, th_list); 3943 kthread_stop(t->tsk); 3944 kfree(t); 3945 } 3946 3947 /* Un-register us from receiving netdevice events */ --- 22 unchanged lines hidden --- |