1 /* 2 * Authors: 3 * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se> 4 * Uppsala University and 5 * Swedish University of Agricultural Sciences 6 * 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * Ben Greear <greearb@candelatech.com> 9 * Jens L��s <jens.laas@data.slu.se> 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 * 16 * 17 * A tool for loading the network with preconfigurated packets. 18 * The tool is implemented as a linux module. Parameters are output 19 * device, delay (to hard_xmit), number of packets, and whether 20 * to use multiple SKBs or just the same one. 21 * pktgen uses the installed interface's output routine. 22 * 23 * Additional hacking by: 24 * 25 * Jens.Laas@data.slu.se 26 * Improved by ANK. 010120. 27 * Improved by ANK even more. 010212. 28 * MAC address typo fixed. 010417 --ro 29 * Integrated. 020301 --DaveM 30 * Added multiskb option 020301 --DaveM 31 * Scaling of results. 020417--sigurdur@linpro.no 32 * Significant re-work of the module: 33 * * Convert to threaded model to more efficiently be able to transmit 34 * and receive on multiple interfaces at once. 35 * * Converted many counters to __u64 to allow longer runs. 36 * * Allow configuration of ranges, like min/max IP address, MACs, 37 * and UDP-ports, for both source and destination, and can 38 * set to use a random distribution or sequentially walk the range. 39 * * Can now change most values after starting. 40 * * Place 12-byte packet in UDP payload with magic number, 41 * sequence number, and timestamp. 42 * * Add receiver code that detects dropped pkts, re-ordered pkts, and 43 * latencies (with micro-second) precision. 44 * * Add IOCTL interface to easily get counters & configuration. 45 * --Ben Greear <greearb@candelatech.com> 46 * 47 * Renamed multiskb to clone_skb and cleaned up sending core for two distinct 48 * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0 49 * as a "fastpath" with a configurable number of clones after alloc's. 50 * clone_skb=0 means all packets are allocated this also means ranges time 51 * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100 52 * clones. 53 * 54 * Also moved to /proc/net/pktgen/ 55 * --ro 56 * 57 * Sept 10: Fixed threading/locking. Lots of bone-headed and more clever 58 * mistakes. Also merged in DaveM's patch in the -pre6 patch. 59 * --Ben Greear <greearb@candelatech.com> 60 * 61 * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br) 62 * 63 * 64 * 021124 Finished major redesign and rewrite for new functionality. 65 * See Documentation/networking/pktgen.txt for how to use this. 66 * 67 * The new operation: 68 * For each CPU one thread/process is created at start. This process checks 69 * for running devices in the if_list and sends packets until count is 0 it 70 * also the thread checks the thread->control which is used for inter-process 71 * communication. controlling process "posts" operations to the threads this 72 * way. The if_lock should be possible to remove when add/rem_device is merged 73 * into this too. 74 * 75 * By design there should only be *one* "controlling" process. In practice 76 * multiple write accesses gives unpredictable result. Understood by "write" 77 * to /proc gives result code thats should be read be the "writer". 78 * For practical use this should be no problem. 79 * 80 * Note when adding devices to a specific CPU there good idea to also assign 81 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. 82 * --ro 83 * 84 * Fix refcount off by one if first packet fails, potential null deref, 85 * memleak 030710- KJP 86 * 87 * First "ranges" functionality for ipv6 030726 --ro 88 * 89 * Included flow support. 030802 ANK. 90 * 91 * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org> 92 * 93 * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419 94 * ia64 compilation fix from Aron Griffis <aron@hp.com> 040604 95 * 96 * New xmit() return, do_div and misc clean up by Stephen Hemminger 97 * <shemminger@osdl.org> 040923 98 * 99 * Randy Dunlap fixed u64 printk compiler waring 100 * 101 * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> 102 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 103 * 104 * Corrections from Nikolai Malykh (nmalykh@bilim.com) 105 * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230 106 * 107 * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com> 108 * 050103 109 * 110 * MPLS support by Steven Whitehouse <steve@chygwyn.com> 111 * 112 * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com> 113 * 114 */ 115 #include <linux/sys.h> 116 #include <linux/types.h> 117 #include <linux/module.h> 118 #include <linux/moduleparam.h> 119 #include <linux/kernel.h> 120 #include <linux/mutex.h> 121 #include <linux/sched.h> 122 #include <linux/slab.h> 123 #include <linux/vmalloc.h> 124 #include <linux/unistd.h> 125 #include <linux/string.h> 126 #include <linux/ptrace.h> 127 #include <linux/errno.h> 128 #include <linux/ioport.h> 129 #include <linux/interrupt.h> 130 #include <linux/capability.h> 131 #include <linux/freezer.h> 132 #include <linux/delay.h> 133 #include <linux/timer.h> 134 #include <linux/list.h> 135 #include <linux/init.h> 136 #include <linux/skbuff.h> 137 #include <linux/netdevice.h> 138 #include <linux/inet.h> 139 #include <linux/inetdevice.h> 140 #include <linux/rtnetlink.h> 141 #include <linux/if_arp.h> 142 #include <linux/if_vlan.h> 143 #include <linux/in.h> 144 #include <linux/ip.h> 145 #include <linux/ipv6.h> 146 #include <linux/udp.h> 147 #include <linux/proc_fs.h> 148 #include <linux/seq_file.h> 149 #include <linux/wait.h> 150 #include <linux/etherdevice.h> 151 #include <linux/kthread.h> 152 #include <net/checksum.h> 153 #include <net/ipv6.h> 154 #include <net/addrconf.h> 155 #ifdef CONFIG_XFRM 156 #include <net/xfrm.h> 157 #endif 158 #include <asm/byteorder.h> 159 #include <linux/rcupdate.h> 160 #include <asm/bitops.h> 161 #include <asm/io.h> 162 #include <asm/dma.h> 163 #include <asm/uaccess.h> 164 #include <asm/div64.h> /* do_div */ 165 #include <asm/timex.h> 166 167 #define VERSION "pktgen v2.68: Packet Generator for packet performance testing.\n" 168 169 /* The buckets are exponential in 'width' */ 170 #define LAT_BUCKETS_MAX 32 171 #define IP_NAME_SZ 32 172 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 173 #define MPLS_STACK_BOTTOM htonl(0x00000100) 174 175 /* Device flag bits */ 176 #define F_IPSRC_RND (1<<0) /* IP-Src Random */ 177 #define F_IPDST_RND (1<<1) /* IP-Dst Random */ 178 #define F_UDPSRC_RND (1<<2) /* UDP-Src Random */ 179 #define F_UDPDST_RND (1<<3) /* UDP-Dst Random */ 180 #define F_MACSRC_RND (1<<4) /* MAC-Src Random */ 181 #define F_MACDST_RND (1<<5) /* MAC-Dst Random */ 182 #define F_TXSIZE_RND (1<<6) /* Transmit size is random */ 183 #define F_IPV6 (1<<7) /* Interface in IPV6 Mode */ 184 #define F_MPLS_RND (1<<8) /* Random MPLS labels */ 185 #define F_VID_RND (1<<9) /* Random VLAN ID */ 186 #define F_SVID_RND (1<<10) /* Random SVLAN ID */ 187 #define F_FLOW_SEQ (1<<11) /* Sequential flows */ 188 #define F_IPSEC_ON (1<<12) /* ipsec on for flows */ 189 190 /* Thread control flag bits */ 191 #define T_TERMINATE (1<<0) 192 #define T_STOP (1<<1) /* Stop run */ 193 #define T_RUN (1<<2) /* Start run */ 194 #define T_REMDEVALL (1<<3) /* Remove all devs */ 195 #define T_REMDEV (1<<4) /* Remove one dev */ 196 197 /* If lock -- can be removed after some work */ 198 #define if_lock(t) spin_lock(&(t->if_lock)); 199 #define if_unlock(t) spin_unlock(&(t->if_lock)); 200 201 /* Used to help with determining the pkts on receive */ 202 #define PKTGEN_MAGIC 0xbe9be955 203 #define PG_PROC_DIR "pktgen" 204 #define PGCTRL "pgctrl" 205 static struct proc_dir_entry *pg_proc_dir = NULL; 206 207 #define MAX_CFLOWS 65536 208 209 #define VLAN_TAG_SIZE(x) ((x)->vlan_id == 0xffff ? 0 : 4) 210 #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4) 211 212 struct flow_state { 213 __be32 cur_daddr; 214 int count; 215 #ifdef CONFIG_XFRM 216 struct xfrm_state *x; 217 #endif 218 __u32 flags; 219 }; 220 221 /* flow flag bits */ 222 #define F_INIT (1<<0) /* flow has been initialized */ 223 224 struct pktgen_dev { 225 /* 226 * Try to keep frequent/infrequent used vars. separated. 227 */ 228 struct proc_dir_entry *entry; /* proc file */ 229 struct pktgen_thread *pg_thread;/* the owner */ 230 struct list_head list; /* Used for chaining in the thread's run-queue */ 231 232 int running; /* if this changes to false, the test will stop */ 233 234 /* If min != max, then we will either do a linear iteration, or 235 * we will do a random selection from within the range. 236 */ 237 __u32 flags; 238 int removal_mark; /* non-zero => the device is marked for 239 * removal by worker thread */ 240 241 int min_pkt_size; /* = ETH_ZLEN; */ 242 int max_pkt_size; /* = ETH_ZLEN; */ 243 int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ 244 int nfrags; 245 __u32 delay_us; /* Default delay */ 246 __u32 delay_ns; 247 __u64 count; /* Default No packets to send */ 248 __u64 sofar; /* How many pkts we've sent so far */ 249 __u64 tx_bytes; /* How many bytes we've transmitted */ 250 __u64 errors; /* Errors when trying to transmit, pkts will be re-sent */ 251 252 /* runtime counters relating to clone_skb */ 253 __u64 next_tx_us; /* timestamp of when to tx next */ 254 __u32 next_tx_ns; 255 256 __u64 allocated_skbs; 257 __u32 clone_count; 258 int last_ok; /* Was last skb sent? 259 * Or a failed transmit of some sort? This will keep 260 * sequence numbers in order, for example. 261 */ 262 __u64 started_at; /* micro-seconds */ 263 __u64 stopped_at; /* micro-seconds */ 264 __u64 idle_acc; /* micro-seconds */ 265 __u32 seq_num; 266 267 int clone_skb; /* Use multiple SKBs during packet gen. If this number 268 * is greater than 1, then that many copies of the same 269 * packet will be sent before a new packet is allocated. 270 * For instance, if you want to send 1024 identical packets 271 * before creating a new packet, set clone_skb to 1024. 272 */ 273 274 char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 275 char dst_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 276 char src_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 277 char src_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 278 279 struct in6_addr in6_saddr; 280 struct in6_addr in6_daddr; 281 struct in6_addr cur_in6_daddr; 282 struct in6_addr cur_in6_saddr; 283 /* For ranges */ 284 struct in6_addr min_in6_daddr; 285 struct in6_addr max_in6_daddr; 286 struct in6_addr min_in6_saddr; 287 struct in6_addr max_in6_saddr; 288 289 /* If we're doing ranges, random or incremental, then this 290 * defines the min/max for those ranges. 291 */ 292 __be32 saddr_min; /* inclusive, source IP address */ 293 __be32 saddr_max; /* exclusive, source IP address */ 294 __be32 daddr_min; /* inclusive, dest IP address */ 295 __be32 daddr_max; /* exclusive, dest IP address */ 296 297 __u16 udp_src_min; /* inclusive, source UDP port */ 298 __u16 udp_src_max; /* exclusive, source UDP port */ 299 __u16 udp_dst_min; /* inclusive, dest UDP port */ 300 __u16 udp_dst_max; /* exclusive, dest UDP port */ 301 302 /* DSCP + ECN */ 303 __u8 tos; /* six most significant bits of (former) IPv4 TOS are for dscp codepoint */ 304 __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 (see RFC 3260, sec. 4) */ 305 306 /* MPLS */ 307 unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ 308 __be32 labels[MAX_MPLS_LABELS]; 309 310 /* VLAN/SVLAN (802.1Q/Q-in-Q) */ 311 __u8 vlan_p; 312 __u8 vlan_cfi; 313 __u16 vlan_id; /* 0xffff means no vlan tag */ 314 315 __u8 svlan_p; 316 __u8 svlan_cfi; 317 __u16 svlan_id; /* 0xffff means no svlan tag */ 318 319 __u32 src_mac_count; /* How many MACs to iterate through */ 320 __u32 dst_mac_count; /* How many MACs to iterate through */ 321 322 unsigned char dst_mac[ETH_ALEN]; 323 unsigned char src_mac[ETH_ALEN]; 324 325 __u32 cur_dst_mac_offset; 326 __u32 cur_src_mac_offset; 327 __be32 cur_saddr; 328 __be32 cur_daddr; 329 __u16 cur_udp_dst; 330 __u16 cur_udp_src; 331 __u32 cur_pkt_size; 332 333 __u8 hh[14]; 334 /* = { 335 0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB, 336 337 We fill in SRC address later 338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 339 0x08, 0x00 340 }; 341 */ 342 __u16 pad; /* pad out the hh struct to an even 16 bytes */ 343 344 struct sk_buff *skb; /* skb we are to transmit next, mainly used for when we 345 * are transmitting the same one multiple times 346 */ 347 struct net_device *odev; /* The out-going device. Note that the device should 348 * have it's pg_info pointer pointing back to this 349 * device. This will be set when the user specifies 350 * the out-going device name (not when the inject is 351 * started as it used to do.) 352 */ 353 struct flow_state *flows; 354 unsigned cflows; /* Concurrent flows (config) */ 355 unsigned lflow; /* Flow length (config) */ 356 unsigned nflows; /* accumulated flows (stats) */ 357 unsigned curfl; /* current sequenced flow (state)*/ 358 #ifdef CONFIG_XFRM 359 __u8 ipsmode; /* IPSEC mode (config) */ 360 __u8 ipsproto; /* IPSEC type (config) */ 361 #endif 362 char result[512]; 363 }; 364 365 struct pktgen_hdr { 366 __be32 pgh_magic; 367 __be32 seq_num; 368 __be32 tv_sec; 369 __be32 tv_usec; 370 }; 371 372 struct pktgen_thread { 373 spinlock_t if_lock; 374 struct list_head if_list; /* All device here */ 375 struct list_head th_list; 376 struct task_struct *tsk; 377 char result[512]; 378 u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */ 379 380 /* Field for thread to receive "posted" events terminate, stop ifs etc. */ 381 382 u32 control; 383 int pid; 384 int cpu; 385 386 wait_queue_head_t queue; 387 }; 388 389 #define REMOVE 1 390 #define FIND 0 391 392 /* This code works around the fact that do_div cannot handle two 64-bit 393 numbers, and regular 64-bit division doesn't work on x86 kernels. 394 --Ben 395 */ 396 397 #define PG_DIV 0 398 399 /* This was emailed to LMKL by: Chris Caputo <ccaputo@alt.net> 400 * Function copied/adapted/optimized from: 401 * 402 * nemesis.sourceforge.net/browse/lib/static/intmath/ix86/intmath.c.html 403 * 404 * Copyright 1994, University of Cambridge Computer Laboratory 405 * All Rights Reserved. 406 * 407 */ 408 static inline s64 divremdi3(s64 x, s64 y, int type) 409 { 410 u64 a = (x < 0) ? -x : x; 411 u64 b = (y < 0) ? -y : y; 412 u64 res = 0, d = 1; 413 414 if (b > 0) { 415 while (b < a) { 416 b <<= 1; 417 d <<= 1; 418 } 419 } 420 421 do { 422 if (a >= b) { 423 a -= b; 424 res += d; 425 } 426 b >>= 1; 427 d >>= 1; 428 } 429 while (d); 430 431 if (PG_DIV == type) { 432 return (((x ^ y) & (1ll << 63)) == 0) ? res : -(s64) res; 433 } else { 434 return ((x & (1ll << 63)) == 0) ? a : -(s64) a; 435 } 436 } 437 438 /* End of hacks to deal with 64-bit math on x86 */ 439 440 /** Convert to milliseconds */ 441 static inline __u64 tv_to_ms(const struct timeval *tv) 442 { 443 __u64 ms = tv->tv_usec / 1000; 444 ms += (__u64) tv->tv_sec * (__u64) 1000; 445 return ms; 446 } 447 448 /** Convert to micro-seconds */ 449 static inline __u64 tv_to_us(const struct timeval *tv) 450 { 451 __u64 us = tv->tv_usec; 452 us += (__u64) tv->tv_sec * (__u64) 1000000; 453 return us; 454 } 455 456 static inline __u64 pg_div(__u64 n, __u32 base) 457 { 458 __u64 tmp = n; 459 do_div(tmp, base); 460 /* printk("pktgen: pg_div, n: %llu base: %d rv: %llu\n", 461 n, base, tmp); */ 462 return tmp; 463 } 464 465 static inline __u64 pg_div64(__u64 n, __u64 base) 466 { 467 __u64 tmp = n; 468 /* 469 * How do we know if the architecture we are running on 470 * supports division with 64 bit base? 471 * 472 */ 473 #if defined(__sparc_v9__) || defined(__powerpc64__) || defined(__alpha__) || defined(__x86_64__) || defined(__ia64__) 474 475 do_div(tmp, base); 476 #else 477 tmp = divremdi3(n, base, PG_DIV); 478 #endif 479 return tmp; 480 } 481 482 static inline __u64 getCurMs(void) 483 { 484 struct timeval tv; 485 do_gettimeofday(&tv); 486 return tv_to_ms(&tv); 487 } 488 489 static inline __u64 getCurUs(void) 490 { 491 struct timeval tv; 492 do_gettimeofday(&tv); 493 return tv_to_us(&tv); 494 } 495 496 static inline __u64 tv_diff(const struct timeval *a, const struct timeval *b) 497 { 498 return tv_to_us(a) - tv_to_us(b); 499 } 500 501 /* old include end */ 502 503 static char version[] __initdata = VERSION; 504 505 static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); 506 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); 507 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 508 const char *ifname); 509 static int pktgen_device_event(struct notifier_block *, unsigned long, void *); 510 static void pktgen_run_all_threads(void); 511 static void pktgen_stop_all_threads_ifs(void); 512 static int pktgen_stop_device(struct pktgen_dev *pkt_dev); 513 static void pktgen_stop(struct pktgen_thread *t); 514 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); 515 516 static unsigned int scan_ip6(const char *s, char ip[16]); 517 static unsigned int fmt_ip6(char *s, const char ip[16]); 518 519 /* Module parameters, defaults. */ 520 static int pg_count_d = 1000; /* 1000 pkts by default */ 521 static int pg_delay_d; 522 static int pg_clone_skb_d; 523 static int debug; 524 525 static DEFINE_MUTEX(pktgen_thread_lock); 526 static LIST_HEAD(pktgen_threads); 527 528 static struct notifier_block pktgen_notifier_block = { 529 .notifier_call = pktgen_device_event, 530 }; 531 532 /* 533 * /proc handling functions 534 * 535 */ 536 537 static int pgctrl_show(struct seq_file *seq, void *v) 538 { 539 seq_puts(seq, VERSION); 540 return 0; 541 } 542 543 static ssize_t pgctrl_write(struct file *file, const char __user * buf, 544 size_t count, loff_t * ppos) 545 { 546 int err = 0; 547 char data[128]; 548 549 if (!capable(CAP_NET_ADMIN)) { 550 err = -EPERM; 551 goto out; 552 } 553 554 if (count > sizeof(data)) 555 count = sizeof(data); 556 557 if (copy_from_user(data, buf, count)) { 558 err = -EFAULT; 559 goto out; 560 } 561 data[count - 1] = 0; /* Make string */ 562 563 if (!strcmp(data, "stop")) 564 pktgen_stop_all_threads_ifs(); 565 566 else if (!strcmp(data, "start")) 567 pktgen_run_all_threads(); 568 569 else 570 printk("pktgen: Unknown command: %s\n", data); 571 572 err = count; 573 574 out: 575 return err; 576 } 577 578 static int pgctrl_open(struct inode *inode, struct file *file) 579 { 580 return single_open(file, pgctrl_show, PDE(inode)->data); 581 } 582 583 static const struct file_operations pktgen_fops = { 584 .owner = THIS_MODULE, 585 .open = pgctrl_open, 586 .read = seq_read, 587 .llseek = seq_lseek, 588 .write = pgctrl_write, 589 .release = single_release, 590 }; 591 592 static int pktgen_if_show(struct seq_file *seq, void *v) 593 { 594 int i; 595 struct pktgen_dev *pkt_dev = seq->private; 596 __u64 sa; 597 __u64 stopped; 598 __u64 now = getCurUs(); 599 600 seq_printf(seq, 601 "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", 602 (unsigned long long)pkt_dev->count, pkt_dev->min_pkt_size, 603 pkt_dev->max_pkt_size); 604 605 seq_printf(seq, 606 " frags: %d delay: %u clone_skb: %d ifname: %s\n", 607 pkt_dev->nfrags, 608 1000 * pkt_dev->delay_us + pkt_dev->delay_ns, 609 pkt_dev->clone_skb, pkt_dev->odev->name); 610 611 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, 612 pkt_dev->lflow); 613 614 if (pkt_dev->flags & F_IPV6) { 615 char b1[128], b2[128], b3[128]; 616 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr); 617 fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr); 618 fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr); 619 seq_printf(seq, 620 " saddr: %s min_saddr: %s max_saddr: %s\n", b1, 621 b2, b3); 622 623 fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr); 624 fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr); 625 fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr); 626 seq_printf(seq, 627 " daddr: %s min_daddr: %s max_daddr: %s\n", b1, 628 b2, b3); 629 630 } else 631 seq_printf(seq, 632 " dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", 633 pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, 634 pkt_dev->src_max); 635 636 seq_puts(seq, " src_mac: "); 637 638 if (is_zero_ether_addr(pkt_dev->src_mac)) 639 for (i = 0; i < 6; i++) 640 seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], 641 i == 5 ? " " : ":"); 642 else 643 for (i = 0; i < 6; i++) 644 seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], 645 i == 5 ? " " : ":"); 646 647 seq_printf(seq, "dst_mac: "); 648 for (i = 0; i < 6; i++) 649 seq_printf(seq, "%02X%s", pkt_dev->dst_mac[i], 650 i == 5 ? "\n" : ":"); 651 652 seq_printf(seq, 653 " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", 654 pkt_dev->udp_src_min, pkt_dev->udp_src_max, 655 pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); 656 657 seq_printf(seq, 658 " src_mac_count: %d dst_mac_count: %d\n", 659 pkt_dev->src_mac_count, pkt_dev->dst_mac_count); 660 661 if (pkt_dev->nr_labels) { 662 unsigned i; 663 seq_printf(seq, " mpls: "); 664 for (i = 0; i < pkt_dev->nr_labels; i++) 665 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), 666 i == pkt_dev->nr_labels-1 ? "\n" : ", "); 667 } 668 669 if (pkt_dev->vlan_id != 0xffff) { 670 seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n", 671 pkt_dev->vlan_id, pkt_dev->vlan_p, pkt_dev->vlan_cfi); 672 } 673 674 if (pkt_dev->svlan_id != 0xffff) { 675 seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n", 676 pkt_dev->svlan_id, pkt_dev->svlan_p, pkt_dev->svlan_cfi); 677 } 678 679 if (pkt_dev->tos) { 680 seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos); 681 } 682 683 if (pkt_dev->traffic_class) { 684 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); 685 } 686 687 seq_printf(seq, " Flags: "); 688 689 if (pkt_dev->flags & F_IPV6) 690 seq_printf(seq, "IPV6 "); 691 692 if (pkt_dev->flags & F_IPSRC_RND) 693 seq_printf(seq, "IPSRC_RND "); 694 695 if (pkt_dev->flags & F_IPDST_RND) 696 seq_printf(seq, "IPDST_RND "); 697 698 if (pkt_dev->flags & F_TXSIZE_RND) 699 seq_printf(seq, "TXSIZE_RND "); 700 701 if (pkt_dev->flags & F_UDPSRC_RND) 702 seq_printf(seq, "UDPSRC_RND "); 703 704 if (pkt_dev->flags & F_UDPDST_RND) 705 seq_printf(seq, "UDPDST_RND "); 706 707 if (pkt_dev->flags & F_MPLS_RND) 708 seq_printf(seq, "MPLS_RND "); 709 710 if (pkt_dev->cflows) { 711 if (pkt_dev->flags & F_FLOW_SEQ) 712 seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/ 713 else 714 seq_printf(seq, "FLOW_RND "); 715 } 716 717 #ifdef CONFIG_XFRM 718 if (pkt_dev->flags & F_IPSEC_ON) 719 seq_printf(seq, "IPSEC "); 720 #endif 721 722 if (pkt_dev->flags & F_MACSRC_RND) 723 seq_printf(seq, "MACSRC_RND "); 724 725 if (pkt_dev->flags & F_MACDST_RND) 726 seq_printf(seq, "MACDST_RND "); 727 728 if (pkt_dev->flags & F_VID_RND) 729 seq_printf(seq, "VID_RND "); 730 731 if (pkt_dev->flags & F_SVID_RND) 732 seq_printf(seq, "SVID_RND "); 733 734 seq_puts(seq, "\n"); 735 736 sa = pkt_dev->started_at; 737 stopped = pkt_dev->stopped_at; 738 if (pkt_dev->running) 739 stopped = now; /* not really stopped, more like last-running-at */ 740 741 seq_printf(seq, 742 "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", 743 (unsigned long long)pkt_dev->sofar, 744 (unsigned long long)pkt_dev->errors, (unsigned long long)sa, 745 (unsigned long long)stopped, 746 (unsigned long long)pkt_dev->idle_acc); 747 748 seq_printf(seq, 749 " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", 750 pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, 751 pkt_dev->cur_src_mac_offset); 752 753 if (pkt_dev->flags & F_IPV6) { 754 char b1[128], b2[128]; 755 fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr); 756 fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr); 757 seq_printf(seq, " cur_saddr: %s cur_daddr: %s\n", b2, b1); 758 } else 759 seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n", 760 pkt_dev->cur_saddr, pkt_dev->cur_daddr); 761 762 seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", 763 pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); 764 765 seq_printf(seq, " flows: %u\n", pkt_dev->nflows); 766 767 if (pkt_dev->result[0]) 768 seq_printf(seq, "Result: %s\n", pkt_dev->result); 769 else 770 seq_printf(seq, "Result: Idle\n"); 771 772 return 0; 773 } 774 775 776 static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, __u32 *num) 777 { 778 int i = 0; 779 *num = 0; 780 781 for (; i < maxlen; i++) { 782 char c; 783 *num <<= 4; 784 if (get_user(c, &user_buffer[i])) 785 return -EFAULT; 786 if ((c >= '0') && (c <= '9')) 787 *num |= c - '0'; 788 else if ((c >= 'a') && (c <= 'f')) 789 *num |= c - 'a' + 10; 790 else if ((c >= 'A') && (c <= 'F')) 791 *num |= c - 'A' + 10; 792 else 793 break; 794 } 795 return i; 796 } 797 798 static int count_trail_chars(const char __user * user_buffer, 799 unsigned int maxlen) 800 { 801 int i; 802 803 for (i = 0; i < maxlen; i++) { 804 char c; 805 if (get_user(c, &user_buffer[i])) 806 return -EFAULT; 807 switch (c) { 808 case '\"': 809 case '\n': 810 case '\r': 811 case '\t': 812 case ' ': 813 case '=': 814 break; 815 default: 816 goto done; 817 } 818 } 819 done: 820 return i; 821 } 822 823 static unsigned long num_arg(const char __user * user_buffer, 824 unsigned long maxlen, unsigned long *num) 825 { 826 int i = 0; 827 *num = 0; 828 829 for (; i < maxlen; i++) { 830 char c; 831 if (get_user(c, &user_buffer[i])) 832 return -EFAULT; 833 if ((c >= '0') && (c <= '9')) { 834 *num *= 10; 835 *num += c - '0'; 836 } else 837 break; 838 } 839 return i; 840 } 841 842 static int strn_len(const char __user * user_buffer, unsigned int maxlen) 843 { 844 int i = 0; 845 846 for (; i < maxlen; i++) { 847 char c; 848 if (get_user(c, &user_buffer[i])) 849 return -EFAULT; 850 switch (c) { 851 case '\"': 852 case '\n': 853 case '\r': 854 case '\t': 855 case ' ': 856 goto done_str; 857 break; 858 default: 859 break; 860 } 861 } 862 done_str: 863 return i; 864 } 865 866 static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) 867 { 868 unsigned n = 0; 869 char c; 870 ssize_t i = 0; 871 int len; 872 873 pkt_dev->nr_labels = 0; 874 do { 875 __u32 tmp; 876 len = hex32_arg(&buffer[i], 8, &tmp); 877 if (len <= 0) 878 return len; 879 pkt_dev->labels[n] = htonl(tmp); 880 if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM) 881 pkt_dev->flags |= F_MPLS_RND; 882 i += len; 883 if (get_user(c, &buffer[i])) 884 return -EFAULT; 885 i++; 886 n++; 887 if (n >= MAX_MPLS_LABELS) 888 return -E2BIG; 889 } while (c == ','); 890 891 pkt_dev->nr_labels = n; 892 return i; 893 } 894 895 static ssize_t pktgen_if_write(struct file *file, 896 const char __user * user_buffer, size_t count, 897 loff_t * offset) 898 { 899 struct seq_file *seq = (struct seq_file *)file->private_data; 900 struct pktgen_dev *pkt_dev = seq->private; 901 int i = 0, max, len; 902 char name[16], valstr[32]; 903 unsigned long value = 0; 904 char *pg_result = NULL; 905 int tmp = 0; 906 char buf[128]; 907 908 pg_result = &(pkt_dev->result[0]); 909 910 if (count < 1) { 911 printk("pktgen: wrong command format\n"); 912 return -EINVAL; 913 } 914 915 max = count - i; 916 tmp = count_trail_chars(&user_buffer[i], max); 917 if (tmp < 0) { 918 printk("pktgen: illegal format\n"); 919 return tmp; 920 } 921 i += tmp; 922 923 /* Read variable name */ 924 925 len = strn_len(&user_buffer[i], sizeof(name) - 1); 926 if (len < 0) { 927 return len; 928 } 929 memset(name, 0, sizeof(name)); 930 if (copy_from_user(name, &user_buffer[i], len)) 931 return -EFAULT; 932 i += len; 933 934 max = count - i; 935 len = count_trail_chars(&user_buffer[i], max); 936 if (len < 0) 937 return len; 938 939 i += len; 940 941 if (debug) { 942 char tb[count + 1]; 943 if (copy_from_user(tb, user_buffer, count)) 944 return -EFAULT; 945 tb[count] = 0; 946 printk("pktgen: %s,%lu buffer -:%s:-\n", name, 947 (unsigned long)count, tb); 948 } 949 950 if (!strcmp(name, "min_pkt_size")) { 951 len = num_arg(&user_buffer[i], 10, &value); 952 if (len < 0) { 953 return len; 954 } 955 i += len; 956 if (value < 14 + 20 + 8) 957 value = 14 + 20 + 8; 958 if (value != pkt_dev->min_pkt_size) { 959 pkt_dev->min_pkt_size = value; 960 pkt_dev->cur_pkt_size = value; 961 } 962 sprintf(pg_result, "OK: min_pkt_size=%u", 963 pkt_dev->min_pkt_size); 964 return count; 965 } 966 967 if (!strcmp(name, "max_pkt_size")) { 968 len = num_arg(&user_buffer[i], 10, &value); 969 if (len < 0) { 970 return len; 971 } 972 i += len; 973 if (value < 14 + 20 + 8) 974 value = 14 + 20 + 8; 975 if (value != pkt_dev->max_pkt_size) { 976 pkt_dev->max_pkt_size = value; 977 pkt_dev->cur_pkt_size = value; 978 } 979 sprintf(pg_result, "OK: max_pkt_size=%u", 980 pkt_dev->max_pkt_size); 981 return count; 982 } 983 984 /* Shortcut for min = max */ 985 986 if (!strcmp(name, "pkt_size")) { 987 len = num_arg(&user_buffer[i], 10, &value); 988 if (len < 0) { 989 return len; 990 } 991 i += len; 992 if (value < 14 + 20 + 8) 993 value = 14 + 20 + 8; 994 if (value != pkt_dev->min_pkt_size) { 995 pkt_dev->min_pkt_size = value; 996 pkt_dev->max_pkt_size = value; 997 pkt_dev->cur_pkt_size = value; 998 } 999 sprintf(pg_result, "OK: pkt_size=%u", pkt_dev->min_pkt_size); 1000 return count; 1001 } 1002 1003 if (!strcmp(name, "debug")) { 1004 len = num_arg(&user_buffer[i], 10, &value); 1005 if (len < 0) { 1006 return len; 1007 } 1008 i += len; 1009 debug = value; 1010 sprintf(pg_result, "OK: debug=%u", debug); 1011 return count; 1012 } 1013 1014 if (!strcmp(name, "frags")) { 1015 len = num_arg(&user_buffer[i], 10, &value); 1016 if (len < 0) { 1017 return len; 1018 } 1019 i += len; 1020 pkt_dev->nfrags = value; 1021 sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags); 1022 return count; 1023 } 1024 if (!strcmp(name, "delay")) { 1025 len = num_arg(&user_buffer[i], 10, &value); 1026 if (len < 0) { 1027 return len; 1028 } 1029 i += len; 1030 if (value == 0x7FFFFFFF) { 1031 pkt_dev->delay_us = 0x7FFFFFFF; 1032 pkt_dev->delay_ns = 0; 1033 } else { 1034 pkt_dev->delay_us = value / 1000; 1035 pkt_dev->delay_ns = value % 1000; 1036 } 1037 sprintf(pg_result, "OK: delay=%u", 1038 1000 * pkt_dev->delay_us + pkt_dev->delay_ns); 1039 return count; 1040 } 1041 if (!strcmp(name, "udp_src_min")) { 1042 len = num_arg(&user_buffer[i], 10, &value); 1043 if (len < 0) { 1044 return len; 1045 } 1046 i += len; 1047 if (value != pkt_dev->udp_src_min) { 1048 pkt_dev->udp_src_min = value; 1049 pkt_dev->cur_udp_src = value; 1050 } 1051 sprintf(pg_result, "OK: udp_src_min=%u", pkt_dev->udp_src_min); 1052 return count; 1053 } 1054 if (!strcmp(name, "udp_dst_min")) { 1055 len = num_arg(&user_buffer[i], 10, &value); 1056 if (len < 0) { 1057 return len; 1058 } 1059 i += len; 1060 if (value != pkt_dev->udp_dst_min) { 1061 pkt_dev->udp_dst_min = value; 1062 pkt_dev->cur_udp_dst = value; 1063 } 1064 sprintf(pg_result, "OK: udp_dst_min=%u", pkt_dev->udp_dst_min); 1065 return count; 1066 } 1067 if (!strcmp(name, "udp_src_max")) { 1068 len = num_arg(&user_buffer[i], 10, &value); 1069 if (len < 0) { 1070 return len; 1071 } 1072 i += len; 1073 if (value != pkt_dev->udp_src_max) { 1074 pkt_dev->udp_src_max = value; 1075 pkt_dev->cur_udp_src = value; 1076 } 1077 sprintf(pg_result, "OK: udp_src_max=%u", pkt_dev->udp_src_max); 1078 return count; 1079 } 1080 if (!strcmp(name, "udp_dst_max")) { 1081 len = num_arg(&user_buffer[i], 10, &value); 1082 if (len < 0) { 1083 return len; 1084 } 1085 i += len; 1086 if (value != pkt_dev->udp_dst_max) { 1087 pkt_dev->udp_dst_max = value; 1088 pkt_dev->cur_udp_dst = value; 1089 } 1090 sprintf(pg_result, "OK: udp_dst_max=%u", pkt_dev->udp_dst_max); 1091 return count; 1092 } 1093 if (!strcmp(name, "clone_skb")) { 1094 len = num_arg(&user_buffer[i], 10, &value); 1095 if (len < 0) { 1096 return len; 1097 } 1098 i += len; 1099 pkt_dev->clone_skb = value; 1100 1101 sprintf(pg_result, "OK: clone_skb=%d", pkt_dev->clone_skb); 1102 return count; 1103 } 1104 if (!strcmp(name, "count")) { 1105 len = num_arg(&user_buffer[i], 10, &value); 1106 if (len < 0) { 1107 return len; 1108 } 1109 i += len; 1110 pkt_dev->count = value; 1111 sprintf(pg_result, "OK: count=%llu", 1112 (unsigned long long)pkt_dev->count); 1113 return count; 1114 } 1115 if (!strcmp(name, "src_mac_count")) { 1116 len = num_arg(&user_buffer[i], 10, &value); 1117 if (len < 0) { 1118 return len; 1119 } 1120 i += len; 1121 if (pkt_dev->src_mac_count != value) { 1122 pkt_dev->src_mac_count = value; 1123 pkt_dev->cur_src_mac_offset = 0; 1124 } 1125 sprintf(pg_result, "OK: src_mac_count=%d", 1126 pkt_dev->src_mac_count); 1127 return count; 1128 } 1129 if (!strcmp(name, "dst_mac_count")) { 1130 len = num_arg(&user_buffer[i], 10, &value); 1131 if (len < 0) { 1132 return len; 1133 } 1134 i += len; 1135 if (pkt_dev->dst_mac_count != value) { 1136 pkt_dev->dst_mac_count = value; 1137 pkt_dev->cur_dst_mac_offset = 0; 1138 } 1139 sprintf(pg_result, "OK: dst_mac_count=%d", 1140 pkt_dev->dst_mac_count); 1141 return count; 1142 } 1143 if (!strcmp(name, "flag")) { 1144 char f[32]; 1145 memset(f, 0, 32); 1146 len = strn_len(&user_buffer[i], sizeof(f) - 1); 1147 if (len < 0) { 1148 return len; 1149 } 1150 if (copy_from_user(f, &user_buffer[i], len)) 1151 return -EFAULT; 1152 i += len; 1153 if (strcmp(f, "IPSRC_RND") == 0) 1154 pkt_dev->flags |= F_IPSRC_RND; 1155 1156 else if (strcmp(f, "!IPSRC_RND") == 0) 1157 pkt_dev->flags &= ~F_IPSRC_RND; 1158 1159 else if (strcmp(f, "TXSIZE_RND") == 0) 1160 pkt_dev->flags |= F_TXSIZE_RND; 1161 1162 else if (strcmp(f, "!TXSIZE_RND") == 0) 1163 pkt_dev->flags &= ~F_TXSIZE_RND; 1164 1165 else if (strcmp(f, "IPDST_RND") == 0) 1166 pkt_dev->flags |= F_IPDST_RND; 1167 1168 else if (strcmp(f, "!IPDST_RND") == 0) 1169 pkt_dev->flags &= ~F_IPDST_RND; 1170 1171 else if (strcmp(f, "UDPSRC_RND") == 0) 1172 pkt_dev->flags |= F_UDPSRC_RND; 1173 1174 else if (strcmp(f, "!UDPSRC_RND") == 0) 1175 pkt_dev->flags &= ~F_UDPSRC_RND; 1176 1177 else if (strcmp(f, "UDPDST_RND") == 0) 1178 pkt_dev->flags |= F_UDPDST_RND; 1179 1180 else if (strcmp(f, "!UDPDST_RND") == 0) 1181 pkt_dev->flags &= ~F_UDPDST_RND; 1182 1183 else if (strcmp(f, "MACSRC_RND") == 0) 1184 pkt_dev->flags |= F_MACSRC_RND; 1185 1186 else if (strcmp(f, "!MACSRC_RND") == 0) 1187 pkt_dev->flags &= ~F_MACSRC_RND; 1188 1189 else if (strcmp(f, "MACDST_RND") == 0) 1190 pkt_dev->flags |= F_MACDST_RND; 1191 1192 else if (strcmp(f, "!MACDST_RND") == 0) 1193 pkt_dev->flags &= ~F_MACDST_RND; 1194 1195 else if (strcmp(f, "MPLS_RND") == 0) 1196 pkt_dev->flags |= F_MPLS_RND; 1197 1198 else if (strcmp(f, "!MPLS_RND") == 0) 1199 pkt_dev->flags &= ~F_MPLS_RND; 1200 1201 else if (strcmp(f, "VID_RND") == 0) 1202 pkt_dev->flags |= F_VID_RND; 1203 1204 else if (strcmp(f, "!VID_RND") == 0) 1205 pkt_dev->flags &= ~F_VID_RND; 1206 1207 else if (strcmp(f, "SVID_RND") == 0) 1208 pkt_dev->flags |= F_SVID_RND; 1209 1210 else if (strcmp(f, "!SVID_RND") == 0) 1211 pkt_dev->flags &= ~F_SVID_RND; 1212 1213 else if (strcmp(f, "FLOW_SEQ") == 0) 1214 pkt_dev->flags |= F_FLOW_SEQ; 1215 1216 #ifdef CONFIG_XFRM 1217 else if (strcmp(f, "IPSEC") == 0) 1218 pkt_dev->flags |= F_IPSEC_ON; 1219 #endif 1220 1221 else if (strcmp(f, "!IPV6") == 0) 1222 pkt_dev->flags &= ~F_IPV6; 1223 1224 else { 1225 sprintf(pg_result, 1226 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", 1227 f, 1228 "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " 1229 "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC\n"); 1230 return count; 1231 } 1232 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); 1233 return count; 1234 } 1235 if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) { 1236 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1); 1237 if (len < 0) { 1238 return len; 1239 } 1240 1241 if (copy_from_user(buf, &user_buffer[i], len)) 1242 return -EFAULT; 1243 buf[len] = 0; 1244 if (strcmp(buf, pkt_dev->dst_min) != 0) { 1245 memset(pkt_dev->dst_min, 0, sizeof(pkt_dev->dst_min)); 1246 strncpy(pkt_dev->dst_min, buf, len); 1247 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); 1248 pkt_dev->cur_daddr = pkt_dev->daddr_min; 1249 } 1250 if (debug) 1251 printk("pktgen: dst_min set to: %s\n", 1252 pkt_dev->dst_min); 1253 i += len; 1254 sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min); 1255 return count; 1256 } 1257 if (!strcmp(name, "dst_max")) { 1258 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1); 1259 if (len < 0) { 1260 return len; 1261 } 1262 1263 if (copy_from_user(buf, &user_buffer[i], len)) 1264 return -EFAULT; 1265 1266 buf[len] = 0; 1267 if (strcmp(buf, pkt_dev->dst_max) != 0) { 1268 memset(pkt_dev->dst_max, 0, sizeof(pkt_dev->dst_max)); 1269 strncpy(pkt_dev->dst_max, buf, len); 1270 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); 1271 pkt_dev->cur_daddr = pkt_dev->daddr_max; 1272 } 1273 if (debug) 1274 printk("pktgen: dst_max set to: %s\n", 1275 pkt_dev->dst_max); 1276 i += len; 1277 sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max); 1278 return count; 1279 } 1280 if (!strcmp(name, "dst6")) { 1281 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1282 if (len < 0) 1283 return len; 1284 1285 pkt_dev->flags |= F_IPV6; 1286 1287 if (copy_from_user(buf, &user_buffer[i], len)) 1288 return -EFAULT; 1289 buf[len] = 0; 1290 1291 scan_ip6(buf, pkt_dev->in6_daddr.s6_addr); 1292 fmt_ip6(buf, pkt_dev->in6_daddr.s6_addr); 1293 1294 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr); 1295 1296 if (debug) 1297 printk("pktgen: dst6 set to: %s\n", buf); 1298 1299 i += len; 1300 sprintf(pg_result, "OK: dst6=%s", buf); 1301 return count; 1302 } 1303 if (!strcmp(name, "dst6_min")) { 1304 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1305 if (len < 0) 1306 return len; 1307 1308 pkt_dev->flags |= F_IPV6; 1309 1310 if (copy_from_user(buf, &user_buffer[i], len)) 1311 return -EFAULT; 1312 buf[len] = 0; 1313 1314 scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); 1315 fmt_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); 1316 1317 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, 1318 &pkt_dev->min_in6_daddr); 1319 if (debug) 1320 printk("pktgen: dst6_min set to: %s\n", buf); 1321 1322 i += len; 1323 sprintf(pg_result, "OK: dst6_min=%s", buf); 1324 return count; 1325 } 1326 if (!strcmp(name, "dst6_max")) { 1327 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1328 if (len < 0) 1329 return len; 1330 1331 pkt_dev->flags |= F_IPV6; 1332 1333 if (copy_from_user(buf, &user_buffer[i], len)) 1334 return -EFAULT; 1335 buf[len] = 0; 1336 1337 scan_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); 1338 fmt_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); 1339 1340 if (debug) 1341 printk("pktgen: dst6_max set to: %s\n", buf); 1342 1343 i += len; 1344 sprintf(pg_result, "OK: dst6_max=%s", buf); 1345 return count; 1346 } 1347 if (!strcmp(name, "src6")) { 1348 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1349 if (len < 0) 1350 return len; 1351 1352 pkt_dev->flags |= F_IPV6; 1353 1354 if (copy_from_user(buf, &user_buffer[i], len)) 1355 return -EFAULT; 1356 buf[len] = 0; 1357 1358 scan_ip6(buf, pkt_dev->in6_saddr.s6_addr); 1359 fmt_ip6(buf, pkt_dev->in6_saddr.s6_addr); 1360 1361 ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr); 1362 1363 if (debug) 1364 printk("pktgen: src6 set to: %s\n", buf); 1365 1366 i += len; 1367 sprintf(pg_result, "OK: src6=%s", buf); 1368 return count; 1369 } 1370 if (!strcmp(name, "src_min")) { 1371 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1); 1372 if (len < 0) { 1373 return len; 1374 } 1375 if (copy_from_user(buf, &user_buffer[i], len)) 1376 return -EFAULT; 1377 buf[len] = 0; 1378 if (strcmp(buf, pkt_dev->src_min) != 0) { 1379 memset(pkt_dev->src_min, 0, sizeof(pkt_dev->src_min)); 1380 strncpy(pkt_dev->src_min, buf, len); 1381 pkt_dev->saddr_min = in_aton(pkt_dev->src_min); 1382 pkt_dev->cur_saddr = pkt_dev->saddr_min; 1383 } 1384 if (debug) 1385 printk("pktgen: src_min set to: %s\n", 1386 pkt_dev->src_min); 1387 i += len; 1388 sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min); 1389 return count; 1390 } 1391 if (!strcmp(name, "src_max")) { 1392 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1); 1393 if (len < 0) { 1394 return len; 1395 } 1396 if (copy_from_user(buf, &user_buffer[i], len)) 1397 return -EFAULT; 1398 buf[len] = 0; 1399 if (strcmp(buf, pkt_dev->src_max) != 0) { 1400 memset(pkt_dev->src_max, 0, sizeof(pkt_dev->src_max)); 1401 strncpy(pkt_dev->src_max, buf, len); 1402 pkt_dev->saddr_max = in_aton(pkt_dev->src_max); 1403 pkt_dev->cur_saddr = pkt_dev->saddr_max; 1404 } 1405 if (debug) 1406 printk("pktgen: src_max set to: %s\n", 1407 pkt_dev->src_max); 1408 i += len; 1409 sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max); 1410 return count; 1411 } 1412 if (!strcmp(name, "dst_mac")) { 1413 char *v = valstr; 1414 unsigned char old_dmac[ETH_ALEN]; 1415 unsigned char *m = pkt_dev->dst_mac; 1416 memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN); 1417 1418 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1419 if (len < 0) { 1420 return len; 1421 } 1422 memset(valstr, 0, sizeof(valstr)); 1423 if (copy_from_user(valstr, &user_buffer[i], len)) 1424 return -EFAULT; 1425 i += len; 1426 1427 for (*m = 0; *v && m < pkt_dev->dst_mac + 6; v++) { 1428 if (*v >= '0' && *v <= '9') { 1429 *m *= 16; 1430 *m += *v - '0'; 1431 } 1432 if (*v >= 'A' && *v <= 'F') { 1433 *m *= 16; 1434 *m += *v - 'A' + 10; 1435 } 1436 if (*v >= 'a' && *v <= 'f') { 1437 *m *= 16; 1438 *m += *v - 'a' + 10; 1439 } 1440 if (*v == ':') { 1441 m++; 1442 *m = 0; 1443 } 1444 } 1445 1446 /* Set up Dest MAC */ 1447 if (compare_ether_addr(old_dmac, pkt_dev->dst_mac)) 1448 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); 1449 1450 sprintf(pg_result, "OK: dstmac"); 1451 return count; 1452 } 1453 if (!strcmp(name, "src_mac")) { 1454 char *v = valstr; 1455 unsigned char *m = pkt_dev->src_mac; 1456 1457 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1458 if (len < 0) { 1459 return len; 1460 } 1461 memset(valstr, 0, sizeof(valstr)); 1462 if (copy_from_user(valstr, &user_buffer[i], len)) 1463 return -EFAULT; 1464 i += len; 1465 1466 for (*m = 0; *v && m < pkt_dev->src_mac + 6; v++) { 1467 if (*v >= '0' && *v <= '9') { 1468 *m *= 16; 1469 *m += *v - '0'; 1470 } 1471 if (*v >= 'A' && *v <= 'F') { 1472 *m *= 16; 1473 *m += *v - 'A' + 10; 1474 } 1475 if (*v >= 'a' && *v <= 'f') { 1476 *m *= 16; 1477 *m += *v - 'a' + 10; 1478 } 1479 if (*v == ':') { 1480 m++; 1481 *m = 0; 1482 } 1483 } 1484 1485 sprintf(pg_result, "OK: srcmac"); 1486 return count; 1487 } 1488 1489 if (!strcmp(name, "clear_counters")) { 1490 pktgen_clear_counters(pkt_dev); 1491 sprintf(pg_result, "OK: Clearing counters.\n"); 1492 return count; 1493 } 1494 1495 if (!strcmp(name, "flows")) { 1496 len = num_arg(&user_buffer[i], 10, &value); 1497 if (len < 0) { 1498 return len; 1499 } 1500 i += len; 1501 if (value > MAX_CFLOWS) 1502 value = MAX_CFLOWS; 1503 1504 pkt_dev->cflows = value; 1505 sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows); 1506 return count; 1507 } 1508 1509 if (!strcmp(name, "flowlen")) { 1510 len = num_arg(&user_buffer[i], 10, &value); 1511 if (len < 0) { 1512 return len; 1513 } 1514 i += len; 1515 pkt_dev->lflow = value; 1516 sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow); 1517 return count; 1518 } 1519 1520 if (!strcmp(name, "mpls")) { 1521 unsigned n, offset; 1522 len = get_labels(&user_buffer[i], pkt_dev); 1523 if (len < 0) { return len; } 1524 i += len; 1525 offset = sprintf(pg_result, "OK: mpls="); 1526 for (n = 0; n < pkt_dev->nr_labels; n++) 1527 offset += sprintf(pg_result + offset, 1528 "%08x%s", ntohl(pkt_dev->labels[n]), 1529 n == pkt_dev->nr_labels-1 ? "" : ","); 1530 1531 if (pkt_dev->nr_labels && pkt_dev->vlan_id != 0xffff) { 1532 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ 1533 pkt_dev->svlan_id = 0xffff; 1534 1535 if (debug) 1536 printk("pktgen: VLAN/SVLAN auto turned off\n"); 1537 } 1538 return count; 1539 } 1540 1541 if (!strcmp(name, "vlan_id")) { 1542 len = num_arg(&user_buffer[i], 4, &value); 1543 if (len < 0) { 1544 return len; 1545 } 1546 i += len; 1547 if (value <= 4095) { 1548 pkt_dev->vlan_id = value; /* turn on VLAN */ 1549 1550 if (debug) 1551 printk("pktgen: VLAN turned on\n"); 1552 1553 if (debug && pkt_dev->nr_labels) 1554 printk("pktgen: MPLS auto turned off\n"); 1555 1556 pkt_dev->nr_labels = 0; /* turn off MPLS */ 1557 sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id); 1558 } else { 1559 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ 1560 pkt_dev->svlan_id = 0xffff; 1561 1562 if (debug) 1563 printk("pktgen: VLAN/SVLAN turned off\n"); 1564 } 1565 return count; 1566 } 1567 1568 if (!strcmp(name, "vlan_p")) { 1569 len = num_arg(&user_buffer[i], 1, &value); 1570 if (len < 0) { 1571 return len; 1572 } 1573 i += len; 1574 if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) { 1575 pkt_dev->vlan_p = value; 1576 sprintf(pg_result, "OK: vlan_p=%u", pkt_dev->vlan_p); 1577 } else { 1578 sprintf(pg_result, "ERROR: vlan_p must be 0-7"); 1579 } 1580 return count; 1581 } 1582 1583 if (!strcmp(name, "vlan_cfi")) { 1584 len = num_arg(&user_buffer[i], 1, &value); 1585 if (len < 0) { 1586 return len; 1587 } 1588 i += len; 1589 if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) { 1590 pkt_dev->vlan_cfi = value; 1591 sprintf(pg_result, "OK: vlan_cfi=%u", pkt_dev->vlan_cfi); 1592 } else { 1593 sprintf(pg_result, "ERROR: vlan_cfi must be 0-1"); 1594 } 1595 return count; 1596 } 1597 1598 if (!strcmp(name, "svlan_id")) { 1599 len = num_arg(&user_buffer[i], 4, &value); 1600 if (len < 0) { 1601 return len; 1602 } 1603 i += len; 1604 if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) { 1605 pkt_dev->svlan_id = value; /* turn on SVLAN */ 1606 1607 if (debug) 1608 printk("pktgen: SVLAN turned on\n"); 1609 1610 if (debug && pkt_dev->nr_labels) 1611 printk("pktgen: MPLS auto turned off\n"); 1612 1613 pkt_dev->nr_labels = 0; /* turn off MPLS */ 1614 sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id); 1615 } else { 1616 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ 1617 pkt_dev->svlan_id = 0xffff; 1618 1619 if (debug) 1620 printk("pktgen: VLAN/SVLAN turned off\n"); 1621 } 1622 return count; 1623 } 1624 1625 if (!strcmp(name, "svlan_p")) { 1626 len = num_arg(&user_buffer[i], 1, &value); 1627 if (len < 0) { 1628 return len; 1629 } 1630 i += len; 1631 if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) { 1632 pkt_dev->svlan_p = value; 1633 sprintf(pg_result, "OK: svlan_p=%u", pkt_dev->svlan_p); 1634 } else { 1635 sprintf(pg_result, "ERROR: svlan_p must be 0-7"); 1636 } 1637 return count; 1638 } 1639 1640 if (!strcmp(name, "svlan_cfi")) { 1641 len = num_arg(&user_buffer[i], 1, &value); 1642 if (len < 0) { 1643 return len; 1644 } 1645 i += len; 1646 if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) { 1647 pkt_dev->svlan_cfi = value; 1648 sprintf(pg_result, "OK: svlan_cfi=%u", pkt_dev->svlan_cfi); 1649 } else { 1650 sprintf(pg_result, "ERROR: svlan_cfi must be 0-1"); 1651 } 1652 return count; 1653 } 1654 1655 if (!strcmp(name, "tos")) { 1656 __u32 tmp_value = 0; 1657 len = hex32_arg(&user_buffer[i], 2, &tmp_value); 1658 if (len < 0) { 1659 return len; 1660 } 1661 i += len; 1662 if (len == 2) { 1663 pkt_dev->tos = tmp_value; 1664 sprintf(pg_result, "OK: tos=0x%02x", pkt_dev->tos); 1665 } else { 1666 sprintf(pg_result, "ERROR: tos must be 00-ff"); 1667 } 1668 return count; 1669 } 1670 1671 if (!strcmp(name, "traffic_class")) { 1672 __u32 tmp_value = 0; 1673 len = hex32_arg(&user_buffer[i], 2, &tmp_value); 1674 if (len < 0) { 1675 return len; 1676 } 1677 i += len; 1678 if (len == 2) { 1679 pkt_dev->traffic_class = tmp_value; 1680 sprintf(pg_result, "OK: traffic_class=0x%02x", pkt_dev->traffic_class); 1681 } else { 1682 sprintf(pg_result, "ERROR: traffic_class must be 00-ff"); 1683 } 1684 return count; 1685 } 1686 1687 sprintf(pkt_dev->result, "No such parameter \"%s\"", name); 1688 return -EINVAL; 1689 } 1690 1691 static int pktgen_if_open(struct inode *inode, struct file *file) 1692 { 1693 return single_open(file, pktgen_if_show, PDE(inode)->data); 1694 } 1695 1696 static const struct file_operations pktgen_if_fops = { 1697 .owner = THIS_MODULE, 1698 .open = pktgen_if_open, 1699 .read = seq_read, 1700 .llseek = seq_lseek, 1701 .write = pktgen_if_write, 1702 .release = single_release, 1703 }; 1704 1705 static int pktgen_thread_show(struct seq_file *seq, void *v) 1706 { 1707 struct pktgen_thread *t = seq->private; 1708 struct pktgen_dev *pkt_dev; 1709 1710 BUG_ON(!t); 1711 1712 seq_printf(seq, "Name: %s max_before_softirq: %d\n", 1713 t->tsk->comm, t->max_before_softirq); 1714 1715 seq_printf(seq, "Running: "); 1716 1717 if_lock(t); 1718 list_for_each_entry(pkt_dev, &t->if_list, list) 1719 if (pkt_dev->running) 1720 seq_printf(seq, "%s ", pkt_dev->odev->name); 1721 1722 seq_printf(seq, "\nStopped: "); 1723 1724 list_for_each_entry(pkt_dev, &t->if_list, list) 1725 if (!pkt_dev->running) 1726 seq_printf(seq, "%s ", pkt_dev->odev->name); 1727 1728 if (t->result[0]) 1729 seq_printf(seq, "\nResult: %s\n", t->result); 1730 else 1731 seq_printf(seq, "\nResult: NA\n"); 1732 1733 if_unlock(t); 1734 1735 return 0; 1736 } 1737 1738 static ssize_t pktgen_thread_write(struct file *file, 1739 const char __user * user_buffer, 1740 size_t count, loff_t * offset) 1741 { 1742 struct seq_file *seq = (struct seq_file *)file->private_data; 1743 struct pktgen_thread *t = seq->private; 1744 int i = 0, max, len, ret; 1745 char name[40]; 1746 char *pg_result; 1747 unsigned long value = 0; 1748 1749 if (count < 1) { 1750 // sprintf(pg_result, "Wrong command format"); 1751 return -EINVAL; 1752 } 1753 1754 max = count - i; 1755 len = count_trail_chars(&user_buffer[i], max); 1756 if (len < 0) 1757 return len; 1758 1759 i += len; 1760 1761 /* Read variable name */ 1762 1763 len = strn_len(&user_buffer[i], sizeof(name) - 1); 1764 if (len < 0) 1765 return len; 1766 1767 memset(name, 0, sizeof(name)); 1768 if (copy_from_user(name, &user_buffer[i], len)) 1769 return -EFAULT; 1770 i += len; 1771 1772 max = count - i; 1773 len = count_trail_chars(&user_buffer[i], max); 1774 if (len < 0) 1775 return len; 1776 1777 i += len; 1778 1779 if (debug) 1780 printk("pktgen: t=%s, count=%lu\n", name, (unsigned long)count); 1781 1782 if (!t) { 1783 printk("pktgen: ERROR: No thread\n"); 1784 ret = -EINVAL; 1785 goto out; 1786 } 1787 1788 pg_result = &(t->result[0]); 1789 1790 if (!strcmp(name, "add_device")) { 1791 char f[32]; 1792 memset(f, 0, 32); 1793 len = strn_len(&user_buffer[i], sizeof(f) - 1); 1794 if (len < 0) { 1795 ret = len; 1796 goto out; 1797 } 1798 if (copy_from_user(f, &user_buffer[i], len)) 1799 return -EFAULT; 1800 i += len; 1801 mutex_lock(&pktgen_thread_lock); 1802 pktgen_add_device(t, f); 1803 mutex_unlock(&pktgen_thread_lock); 1804 ret = count; 1805 sprintf(pg_result, "OK: add_device=%s", f); 1806 goto out; 1807 } 1808 1809 if (!strcmp(name, "rem_device_all")) { 1810 mutex_lock(&pktgen_thread_lock); 1811 t->control |= T_REMDEVALL; 1812 mutex_unlock(&pktgen_thread_lock); 1813 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ 1814 ret = count; 1815 sprintf(pg_result, "OK: rem_device_all"); 1816 goto out; 1817 } 1818 1819 if (!strcmp(name, "max_before_softirq")) { 1820 len = num_arg(&user_buffer[i], 10, &value); 1821 mutex_lock(&pktgen_thread_lock); 1822 t->max_before_softirq = value; 1823 mutex_unlock(&pktgen_thread_lock); 1824 ret = count; 1825 sprintf(pg_result, "OK: max_before_softirq=%lu", value); 1826 goto out; 1827 } 1828 1829 ret = -EINVAL; 1830 out: 1831 return ret; 1832 } 1833 1834 static int pktgen_thread_open(struct inode *inode, struct file *file) 1835 { 1836 return single_open(file, pktgen_thread_show, PDE(inode)->data); 1837 } 1838 1839 static const struct file_operations pktgen_thread_fops = { 1840 .owner = THIS_MODULE, 1841 .open = pktgen_thread_open, 1842 .read = seq_read, 1843 .llseek = seq_lseek, 1844 .write = pktgen_thread_write, 1845 .release = single_release, 1846 }; 1847 1848 /* Think find or remove for NN */ 1849 static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove) 1850 { 1851 struct pktgen_thread *t; 1852 struct pktgen_dev *pkt_dev = NULL; 1853 1854 list_for_each_entry(t, &pktgen_threads, th_list) { 1855 pkt_dev = pktgen_find_dev(t, ifname); 1856 if (pkt_dev) { 1857 if (remove) { 1858 if_lock(t); 1859 pkt_dev->removal_mark = 1; 1860 t->control |= T_REMDEV; 1861 if_unlock(t); 1862 } 1863 break; 1864 } 1865 } 1866 return pkt_dev; 1867 } 1868 1869 /* 1870 * mark a device for removal 1871 */ 1872 static void pktgen_mark_device(const char *ifname) 1873 { 1874 struct pktgen_dev *pkt_dev = NULL; 1875 const int max_tries = 10, msec_per_try = 125; 1876 int i = 0; 1877 1878 mutex_lock(&pktgen_thread_lock); 1879 pr_debug("pktgen: pktgen_mark_device marking %s for removal\n", ifname); 1880 1881 while (1) { 1882 1883 pkt_dev = __pktgen_NN_threads(ifname, REMOVE); 1884 if (pkt_dev == NULL) 1885 break; /* success */ 1886 1887 mutex_unlock(&pktgen_thread_lock); 1888 pr_debug("pktgen: pktgen_mark_device waiting for %s " 1889 "to disappear....\n", ifname); 1890 schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try)); 1891 mutex_lock(&pktgen_thread_lock); 1892 1893 if (++i >= max_tries) { 1894 printk("pktgen_mark_device: timed out after waiting " 1895 "%d msec for device %s to be removed\n", 1896 msec_per_try * i, ifname); 1897 break; 1898 } 1899 1900 } 1901 1902 mutex_unlock(&pktgen_thread_lock); 1903 } 1904 1905 static void pktgen_change_name(struct net_device *dev) 1906 { 1907 struct pktgen_thread *t; 1908 1909 list_for_each_entry(t, &pktgen_threads, th_list) { 1910 struct pktgen_dev *pkt_dev; 1911 1912 list_for_each_entry(pkt_dev, &t->if_list, list) { 1913 if (pkt_dev->odev != dev) 1914 continue; 1915 1916 remove_proc_entry(pkt_dev->entry->name, pg_proc_dir); 1917 1918 pkt_dev->entry = create_proc_entry(dev->name, 0600, 1919 pg_proc_dir); 1920 if (!pkt_dev->entry) 1921 printk(KERN_ERR "pktgen: can't move proc " 1922 " entry for '%s'\n", dev->name); 1923 break; 1924 } 1925 } 1926 } 1927 1928 static int pktgen_device_event(struct notifier_block *unused, 1929 unsigned long event, void *ptr) 1930 { 1931 struct net_device *dev = ptr; 1932 1933 /* It is OK that we do not hold the group lock right now, 1934 * as we run under the RTNL lock. 1935 */ 1936 1937 switch (event) { 1938 case NETDEV_CHANGENAME: 1939 pktgen_change_name(dev); 1940 break; 1941 1942 case NETDEV_UNREGISTER: 1943 pktgen_mark_device(dev->name); 1944 break; 1945 } 1946 1947 return NOTIFY_DONE; 1948 } 1949 1950 /* Associate pktgen_dev with a device. */ 1951 1952 static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) 1953 { 1954 struct net_device *odev; 1955 int err; 1956 1957 /* Clean old setups */ 1958 if (pkt_dev->odev) { 1959 dev_put(pkt_dev->odev); 1960 pkt_dev->odev = NULL; 1961 } 1962 1963 odev = dev_get_by_name(ifname); 1964 if (!odev) { 1965 printk("pktgen: no such netdevice: \"%s\"\n", ifname); 1966 return -ENODEV; 1967 } 1968 1969 if (odev->type != ARPHRD_ETHER) { 1970 printk("pktgen: not an ethernet device: \"%s\"\n", ifname); 1971 err = -EINVAL; 1972 } else if (!netif_running(odev)) { 1973 printk("pktgen: device is down: \"%s\"\n", ifname); 1974 err = -ENETDOWN; 1975 } else { 1976 pkt_dev->odev = odev; 1977 return 0; 1978 } 1979 1980 dev_put(odev); 1981 return err; 1982 } 1983 1984 /* Read pkt_dev from the interface and set up internal pktgen_dev 1985 * structure to have the right information to create/send packets 1986 */ 1987 static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) 1988 { 1989 if (!pkt_dev->odev) { 1990 printk("pktgen: ERROR: pkt_dev->odev == NULL in setup_inject.\n"); 1991 sprintf(pkt_dev->result, 1992 "ERROR: pkt_dev->odev == NULL in setup_inject.\n"); 1993 return; 1994 } 1995 1996 /* Default to the interface's mac if not explicitly set. */ 1997 1998 if (is_zero_ether_addr(pkt_dev->src_mac)) 1999 memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, ETH_ALEN); 2000 2001 /* Set up Dest MAC */ 2002 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); 2003 2004 /* Set up pkt size */ 2005 pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; 2006 2007 if (pkt_dev->flags & F_IPV6) { 2008 /* 2009 * Skip this automatic address setting until locks or functions 2010 * gets exported 2011 */ 2012 2013 #ifdef NOTNOW 2014 int i, set = 0, err = 1; 2015 struct inet6_dev *idev; 2016 2017 for (i = 0; i < IN6_ADDR_HSIZE; i++) 2018 if (pkt_dev->cur_in6_saddr.s6_addr[i]) { 2019 set = 1; 2020 break; 2021 } 2022 2023 if (!set) { 2024 2025 /* 2026 * Use linklevel address if unconfigured. 2027 * 2028 * use ipv6_get_lladdr if/when it's get exported 2029 */ 2030 2031 rcu_read_lock(); 2032 if ((idev = __in6_dev_get(pkt_dev->odev)) != NULL) { 2033 struct inet6_ifaddr *ifp; 2034 2035 read_lock_bh(&idev->lock); 2036 for (ifp = idev->addr_list; ifp; 2037 ifp = ifp->if_next) { 2038 if (ifp->scope == IFA_LINK 2039 && !(ifp-> 2040 flags & IFA_F_TENTATIVE)) { 2041 ipv6_addr_copy(&pkt_dev-> 2042 cur_in6_saddr, 2043 &ifp->addr); 2044 err = 0; 2045 break; 2046 } 2047 } 2048 read_unlock_bh(&idev->lock); 2049 } 2050 rcu_read_unlock(); 2051 if (err) 2052 printk("pktgen: ERROR: IPv6 link address not availble.\n"); 2053 } 2054 #endif 2055 } else { 2056 pkt_dev->saddr_min = 0; 2057 pkt_dev->saddr_max = 0; 2058 if (strlen(pkt_dev->src_min) == 0) { 2059 2060 struct in_device *in_dev; 2061 2062 rcu_read_lock(); 2063 in_dev = __in_dev_get_rcu(pkt_dev->odev); 2064 if (in_dev) { 2065 if (in_dev->ifa_list) { 2066 pkt_dev->saddr_min = 2067 in_dev->ifa_list->ifa_address; 2068 pkt_dev->saddr_max = pkt_dev->saddr_min; 2069 } 2070 } 2071 rcu_read_unlock(); 2072 } else { 2073 pkt_dev->saddr_min = in_aton(pkt_dev->src_min); 2074 pkt_dev->saddr_max = in_aton(pkt_dev->src_max); 2075 } 2076 2077 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); 2078 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); 2079 } 2080 /* Initialize current values. */ 2081 pkt_dev->cur_dst_mac_offset = 0; 2082 pkt_dev->cur_src_mac_offset = 0; 2083 pkt_dev->cur_saddr = pkt_dev->saddr_min; 2084 pkt_dev->cur_daddr = pkt_dev->daddr_min; 2085 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; 2086 pkt_dev->cur_udp_src = pkt_dev->udp_src_min; 2087 pkt_dev->nflows = 0; 2088 } 2089 2090 static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us) 2091 { 2092 __u64 start; 2093 __u64 now; 2094 2095 start = now = getCurUs(); 2096 printk(KERN_INFO "sleeping for %d\n", (int)(spin_until_us - now)); 2097 while (now < spin_until_us) { 2098 /* TODO: optimize sleeping behavior */ 2099 if (spin_until_us - now > jiffies_to_usecs(1) + 1) 2100 schedule_timeout_interruptible(1); 2101 else if (spin_until_us - now > 100) { 2102 do_softirq(); 2103 if (!pkt_dev->running) 2104 return; 2105 if (need_resched()) 2106 schedule(); 2107 } 2108 2109 now = getCurUs(); 2110 } 2111 2112 pkt_dev->idle_acc += now - start; 2113 } 2114 2115 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2116 { 2117 pkt_dev->pkt_overhead = 0; 2118 pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32); 2119 pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev); 2120 pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev); 2121 } 2122 2123 static inline int f_seen(struct pktgen_dev *pkt_dev, int flow) 2124 { 2125 2126 if (pkt_dev->flows[flow].flags & F_INIT) 2127 return 1; 2128 else 2129 return 0; 2130 } 2131 2132 static inline int f_pick(struct pktgen_dev *pkt_dev) 2133 { 2134 int flow = pkt_dev->curfl; 2135 2136 if (pkt_dev->flags & F_FLOW_SEQ) { 2137 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { 2138 /* reset time */ 2139 pkt_dev->flows[flow].count = 0; 2140 pkt_dev->curfl += 1; 2141 if (pkt_dev->curfl >= pkt_dev->cflows) 2142 pkt_dev->curfl = 0; /*reset */ 2143 } 2144 } else { 2145 flow = random32() % pkt_dev->cflows; 2146 2147 if (pkt_dev->flows[flow].count > pkt_dev->lflow) 2148 pkt_dev->flows[flow].count = 0; 2149 } 2150 2151 return pkt_dev->curfl; 2152 } 2153 2154 2155 #ifdef CONFIG_XFRM 2156 /* If there was already an IPSEC SA, we keep it as is, else 2157 * we go look for it ... 2158 */ 2159 inline 2160 void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) 2161 { 2162 struct xfrm_state *x = pkt_dev->flows[flow].x; 2163 if (!x) { 2164 /*slow path: we dont already have xfrm_state*/ 2165 x = xfrm_stateonly_find((xfrm_address_t *)&pkt_dev->cur_daddr, 2166 (xfrm_address_t *)&pkt_dev->cur_saddr, 2167 AF_INET, 2168 pkt_dev->ipsmode, 2169 pkt_dev->ipsproto, 0); 2170 if (x) { 2171 pkt_dev->flows[flow].x = x; 2172 set_pkt_overhead(pkt_dev); 2173 pkt_dev->pkt_overhead+=x->props.header_len; 2174 } 2175 2176 } 2177 } 2178 #endif 2179 /* Increment/randomize headers according to flags and current values 2180 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst 2181 */ 2182 static void mod_cur_headers(struct pktgen_dev *pkt_dev) 2183 { 2184 __u32 imn; 2185 __u32 imx; 2186 int flow = 0; 2187 2188 if (pkt_dev->cflows) 2189 flow = f_pick(pkt_dev); 2190 2191 /* Deal with source MAC */ 2192 if (pkt_dev->src_mac_count > 1) { 2193 __u32 mc; 2194 __u32 tmp; 2195 2196 if (pkt_dev->flags & F_MACSRC_RND) 2197 mc = random32() % pkt_dev->src_mac_count; 2198 else { 2199 mc = pkt_dev->cur_src_mac_offset++; 2200 if (pkt_dev->cur_src_mac_offset > 2201 pkt_dev->src_mac_count) 2202 pkt_dev->cur_src_mac_offset = 0; 2203 } 2204 2205 tmp = pkt_dev->src_mac[5] + (mc & 0xFF); 2206 pkt_dev->hh[11] = tmp; 2207 tmp = (pkt_dev->src_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); 2208 pkt_dev->hh[10] = tmp; 2209 tmp = (pkt_dev->src_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); 2210 pkt_dev->hh[9] = tmp; 2211 tmp = (pkt_dev->src_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); 2212 pkt_dev->hh[8] = tmp; 2213 tmp = (pkt_dev->src_mac[1] + (tmp >> 8)); 2214 pkt_dev->hh[7] = tmp; 2215 } 2216 2217 /* Deal with Destination MAC */ 2218 if (pkt_dev->dst_mac_count > 1) { 2219 __u32 mc; 2220 __u32 tmp; 2221 2222 if (pkt_dev->flags & F_MACDST_RND) 2223 mc = random32() % pkt_dev->dst_mac_count; 2224 2225 else { 2226 mc = pkt_dev->cur_dst_mac_offset++; 2227 if (pkt_dev->cur_dst_mac_offset > 2228 pkt_dev->dst_mac_count) { 2229 pkt_dev->cur_dst_mac_offset = 0; 2230 } 2231 } 2232 2233 tmp = pkt_dev->dst_mac[5] + (mc & 0xFF); 2234 pkt_dev->hh[5] = tmp; 2235 tmp = (pkt_dev->dst_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); 2236 pkt_dev->hh[4] = tmp; 2237 tmp = (pkt_dev->dst_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); 2238 pkt_dev->hh[3] = tmp; 2239 tmp = (pkt_dev->dst_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); 2240 pkt_dev->hh[2] = tmp; 2241 tmp = (pkt_dev->dst_mac[1] + (tmp >> 8)); 2242 pkt_dev->hh[1] = tmp; 2243 } 2244 2245 if (pkt_dev->flags & F_MPLS_RND) { 2246 unsigned i; 2247 for (i = 0; i < pkt_dev->nr_labels; i++) 2248 if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) 2249 pkt_dev->labels[i] = MPLS_STACK_BOTTOM | 2250 ((__force __be32)random32() & 2251 htonl(0x000fffff)); 2252 } 2253 2254 if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) { 2255 pkt_dev->vlan_id = random32() & (4096-1); 2256 } 2257 2258 if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) { 2259 pkt_dev->svlan_id = random32() & (4096 - 1); 2260 } 2261 2262 if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { 2263 if (pkt_dev->flags & F_UDPSRC_RND) 2264 pkt_dev->cur_udp_src = random32() % 2265 (pkt_dev->udp_src_max - pkt_dev->udp_src_min) 2266 + pkt_dev->udp_src_min; 2267 2268 else { 2269 pkt_dev->cur_udp_src++; 2270 if (pkt_dev->cur_udp_src >= pkt_dev->udp_src_max) 2271 pkt_dev->cur_udp_src = pkt_dev->udp_src_min; 2272 } 2273 } 2274 2275 if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) { 2276 if (pkt_dev->flags & F_UDPDST_RND) { 2277 pkt_dev->cur_udp_dst = random32() % 2278 (pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) 2279 + pkt_dev->udp_dst_min; 2280 } else { 2281 pkt_dev->cur_udp_dst++; 2282 if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max) 2283 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; 2284 } 2285 } 2286 2287 if (!(pkt_dev->flags & F_IPV6)) { 2288 2289 if ((imn = ntohl(pkt_dev->saddr_min)) < (imx = 2290 ntohl(pkt_dev-> 2291 saddr_max))) { 2292 __u32 t; 2293 if (pkt_dev->flags & F_IPSRC_RND) 2294 t = random32() % (imx - imn) + imn; 2295 else { 2296 t = ntohl(pkt_dev->cur_saddr); 2297 t++; 2298 if (t > imx) { 2299 t = imn; 2300 } 2301 } 2302 pkt_dev->cur_saddr = htonl(t); 2303 } 2304 2305 if (pkt_dev->cflows && f_seen(pkt_dev, flow)) { 2306 pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; 2307 } else { 2308 imn = ntohl(pkt_dev->daddr_min); 2309 imx = ntohl(pkt_dev->daddr_max); 2310 if (imn < imx) { 2311 __u32 t; 2312 __be32 s; 2313 if (pkt_dev->flags & F_IPDST_RND) { 2314 2315 t = random32() % (imx - imn) + imn; 2316 s = htonl(t); 2317 2318 while (LOOPBACK(s) || MULTICAST(s) 2319 || BADCLASS(s) || ZERONET(s) 2320 || LOCAL_MCAST(s)) { 2321 t = random32() % (imx - imn) + imn; 2322 s = htonl(t); 2323 } 2324 pkt_dev->cur_daddr = s; 2325 } else { 2326 t = ntohl(pkt_dev->cur_daddr); 2327 t++; 2328 if (t > imx) { 2329 t = imn; 2330 } 2331 pkt_dev->cur_daddr = htonl(t); 2332 } 2333 } 2334 if (pkt_dev->cflows) { 2335 pkt_dev->flows[flow].flags |= F_INIT; 2336 pkt_dev->flows[flow].cur_daddr = 2337 pkt_dev->cur_daddr; 2338 #ifdef CONFIG_XFRM 2339 if (pkt_dev->flags & F_IPSEC_ON) 2340 get_ipsec_sa(pkt_dev, flow); 2341 #endif 2342 pkt_dev->nflows++; 2343 } 2344 } 2345 } else { /* IPV6 * */ 2346 2347 if (pkt_dev->min_in6_daddr.s6_addr32[0] == 0 && 2348 pkt_dev->min_in6_daddr.s6_addr32[1] == 0 && 2349 pkt_dev->min_in6_daddr.s6_addr32[2] == 0 && 2350 pkt_dev->min_in6_daddr.s6_addr32[3] == 0) ; 2351 else { 2352 int i; 2353 2354 /* Only random destinations yet */ 2355 2356 for (i = 0; i < 4; i++) { 2357 pkt_dev->cur_in6_daddr.s6_addr32[i] = 2358 (((__force __be32)random32() | 2359 pkt_dev->min_in6_daddr.s6_addr32[i]) & 2360 pkt_dev->max_in6_daddr.s6_addr32[i]); 2361 } 2362 } 2363 } 2364 2365 if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) { 2366 __u32 t; 2367 if (pkt_dev->flags & F_TXSIZE_RND) { 2368 t = random32() % 2369 (pkt_dev->max_pkt_size - pkt_dev->min_pkt_size) 2370 + pkt_dev->min_pkt_size; 2371 } else { 2372 t = pkt_dev->cur_pkt_size + 1; 2373 if (t > pkt_dev->max_pkt_size) 2374 t = pkt_dev->min_pkt_size; 2375 } 2376 pkt_dev->cur_pkt_size = t; 2377 } 2378 2379 pkt_dev->flows[flow].count++; 2380 } 2381 2382 2383 #ifdef CONFIG_XFRM 2384 static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev) 2385 { 2386 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; 2387 int err = 0; 2388 struct iphdr *iph; 2389 2390 if (!x) 2391 return 0; 2392 /* XXX: we dont support tunnel mode for now until 2393 * we resolve the dst issue */ 2394 if (x->props.mode != XFRM_MODE_TRANSPORT) 2395 return 0; 2396 2397 spin_lock(&x->lock); 2398 iph = ip_hdr(skb); 2399 2400 err = x->mode->output(x, skb); 2401 if (err) 2402 goto error; 2403 err = x->type->output(x, skb); 2404 if (err) 2405 goto error; 2406 2407 x->curlft.bytes +=skb->len; 2408 x->curlft.packets++; 2409 spin_unlock(&x->lock); 2410 2411 error: 2412 spin_unlock(&x->lock); 2413 return err; 2414 } 2415 2416 static inline void free_SAs(struct pktgen_dev *pkt_dev) 2417 { 2418 if (pkt_dev->cflows) { 2419 /* let go of the SAs if we have them */ 2420 int i = 0; 2421 for (; i < pkt_dev->nflows; i++){ 2422 struct xfrm_state *x = pkt_dev->flows[i].x; 2423 if (x) { 2424 xfrm_state_put(x); 2425 pkt_dev->flows[i].x = NULL; 2426 } 2427 } 2428 } 2429 } 2430 2431 static inline int process_ipsec(struct pktgen_dev *pkt_dev, 2432 struct sk_buff *skb, __be16 protocol) 2433 { 2434 if (pkt_dev->flags & F_IPSEC_ON) { 2435 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; 2436 int nhead = 0; 2437 if (x) { 2438 int ret; 2439 __u8 *eth; 2440 nhead = x->props.header_len - skb_headroom(skb); 2441 if (nhead >0) { 2442 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); 2443 if (ret < 0) { 2444 printk("Error expanding ipsec packet %d\n",ret); 2445 return 0; 2446 } 2447 } 2448 2449 /* ipsec is not expecting ll header */ 2450 skb_pull(skb, ETH_HLEN); 2451 ret = pktgen_output_ipsec(skb, pkt_dev); 2452 if (ret) { 2453 printk("Error creating ipsec packet %d\n",ret); 2454 kfree_skb(skb); 2455 return 0; 2456 } 2457 /* restore ll */ 2458 eth = (__u8 *) skb_push(skb, ETH_HLEN); 2459 memcpy(eth, pkt_dev->hh, 12); 2460 *(u16 *) & eth[12] = protocol; 2461 } 2462 } 2463 return 1; 2464 } 2465 #endif 2466 2467 static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) 2468 { 2469 unsigned i; 2470 for (i = 0; i < pkt_dev->nr_labels; i++) { 2471 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; 2472 } 2473 mpls--; 2474 *mpls |= MPLS_STACK_BOTTOM; 2475 } 2476 2477 static inline __be16 build_tci(unsigned int id, unsigned int cfi, 2478 unsigned int prio) 2479 { 2480 return htons(id | (cfi << 12) | (prio << 13)); 2481 } 2482 2483 static struct sk_buff *fill_packet_ipv4(struct net_device *odev, 2484 struct pktgen_dev *pkt_dev) 2485 { 2486 struct sk_buff *skb = NULL; 2487 __u8 *eth; 2488 struct udphdr *udph; 2489 int datalen, iplen; 2490 struct iphdr *iph; 2491 struct pktgen_hdr *pgh = NULL; 2492 __be16 protocol = htons(ETH_P_IP); 2493 __be32 *mpls; 2494 __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */ 2495 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2496 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2497 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2498 2499 2500 if (pkt_dev->nr_labels) 2501 protocol = htons(ETH_P_MPLS_UC); 2502 2503 if (pkt_dev->vlan_id != 0xffff) 2504 protocol = htons(ETH_P_8021Q); 2505 2506 /* Update any of the values, used when we're incrementing various 2507 * fields. 2508 */ 2509 mod_cur_headers(pkt_dev); 2510 2511 datalen = (odev->hard_header_len + 16) & ~0xf; 2512 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen + 2513 pkt_dev->pkt_overhead, GFP_ATOMIC); 2514 if (!skb) { 2515 sprintf(pkt_dev->result, "No memory"); 2516 return NULL; 2517 } 2518 2519 skb_reserve(skb, datalen); 2520 2521 /* Reserve for ethernet and IP header */ 2522 eth = (__u8 *) skb_push(skb, 14); 2523 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 2524 if (pkt_dev->nr_labels) 2525 mpls_push(mpls, pkt_dev); 2526 2527 if (pkt_dev->vlan_id != 0xffff) { 2528 if (pkt_dev->svlan_id != 0xffff) { 2529 svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16)); 2530 *svlan_tci = build_tci(pkt_dev->svlan_id, 2531 pkt_dev->svlan_cfi, 2532 pkt_dev->svlan_p); 2533 svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); 2534 *svlan_encapsulated_proto = htons(ETH_P_8021Q); 2535 } 2536 vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16)); 2537 *vlan_tci = build_tci(pkt_dev->vlan_id, 2538 pkt_dev->vlan_cfi, 2539 pkt_dev->vlan_p); 2540 vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); 2541 *vlan_encapsulated_proto = htons(ETH_P_IP); 2542 } 2543 2544 skb->network_header = skb->tail; 2545 skb->transport_header = skb->network_header + sizeof(struct iphdr); 2546 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); 2547 2548 iph = ip_hdr(skb); 2549 udph = udp_hdr(skb); 2550 2551 memcpy(eth, pkt_dev->hh, 12); 2552 *(__be16 *) & eth[12] = protocol; 2553 2554 /* Eth + IPh + UDPh + mpls */ 2555 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - 2556 pkt_dev->pkt_overhead; 2557 if (datalen < sizeof(struct pktgen_hdr)) 2558 datalen = sizeof(struct pktgen_hdr); 2559 2560 udph->source = htons(pkt_dev->cur_udp_src); 2561 udph->dest = htons(pkt_dev->cur_udp_dst); 2562 udph->len = htons(datalen + 8); /* DATA + udphdr */ 2563 udph->check = 0; /* No checksum */ 2564 2565 iph->ihl = 5; 2566 iph->version = 4; 2567 iph->ttl = 32; 2568 iph->tos = pkt_dev->tos; 2569 iph->protocol = IPPROTO_UDP; /* UDP */ 2570 iph->saddr = pkt_dev->cur_saddr; 2571 iph->daddr = pkt_dev->cur_daddr; 2572 iph->frag_off = 0; 2573 iplen = 20 + 8 + datalen; 2574 iph->tot_len = htons(iplen); 2575 iph->check = 0; 2576 iph->check = ip_fast_csum((void *)iph, iph->ihl); 2577 skb->protocol = protocol; 2578 skb->mac_header = (skb->network_header - ETH_HLEN - 2579 pkt_dev->pkt_overhead); 2580 skb->dev = odev; 2581 skb->pkt_type = PACKET_HOST; 2582 2583 if (pkt_dev->nfrags <= 0) 2584 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2585 else { 2586 int frags = pkt_dev->nfrags; 2587 int i; 2588 2589 pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8); 2590 2591 if (frags > MAX_SKB_FRAGS) 2592 frags = MAX_SKB_FRAGS; 2593 if (datalen > frags * PAGE_SIZE) { 2594 skb_put(skb, datalen - frags * PAGE_SIZE); 2595 datalen = frags * PAGE_SIZE; 2596 } 2597 2598 i = 0; 2599 while (datalen > 0) { 2600 struct page *page = alloc_pages(GFP_KERNEL, 0); 2601 skb_shinfo(skb)->frags[i].page = page; 2602 skb_shinfo(skb)->frags[i].page_offset = 0; 2603 skb_shinfo(skb)->frags[i].size = 2604 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); 2605 datalen -= skb_shinfo(skb)->frags[i].size; 2606 skb->len += skb_shinfo(skb)->frags[i].size; 2607 skb->data_len += skb_shinfo(skb)->frags[i].size; 2608 i++; 2609 skb_shinfo(skb)->nr_frags = i; 2610 } 2611 2612 while (i < frags) { 2613 int rem; 2614 2615 if (i == 0) 2616 break; 2617 2618 rem = skb_shinfo(skb)->frags[i - 1].size / 2; 2619 if (rem == 0) 2620 break; 2621 2622 skb_shinfo(skb)->frags[i - 1].size -= rem; 2623 2624 skb_shinfo(skb)->frags[i] = 2625 skb_shinfo(skb)->frags[i - 1]; 2626 get_page(skb_shinfo(skb)->frags[i].page); 2627 skb_shinfo(skb)->frags[i].page = 2628 skb_shinfo(skb)->frags[i - 1].page; 2629 skb_shinfo(skb)->frags[i].page_offset += 2630 skb_shinfo(skb)->frags[i - 1].size; 2631 skb_shinfo(skb)->frags[i].size = rem; 2632 i++; 2633 skb_shinfo(skb)->nr_frags = i; 2634 } 2635 } 2636 2637 /* Stamp the time, and sequence number, convert them to network byte order */ 2638 2639 if (pgh) { 2640 struct timeval timestamp; 2641 2642 pgh->pgh_magic = htonl(PKTGEN_MAGIC); 2643 pgh->seq_num = htonl(pkt_dev->seq_num); 2644 2645 do_gettimeofday(×tamp); 2646 pgh->tv_sec = htonl(timestamp.tv_sec); 2647 pgh->tv_usec = htonl(timestamp.tv_usec); 2648 } 2649 2650 #ifdef CONFIG_XFRM 2651 if (!process_ipsec(pkt_dev, skb, protocol)) 2652 return NULL; 2653 #endif 2654 2655 return skb; 2656 } 2657 2658 /* 2659 * scan_ip6, fmt_ip taken from dietlibc-0.21 2660 * Author Felix von Leitner <felix-dietlibc@fefe.de> 2661 * 2662 * Slightly modified for kernel. 2663 * Should be candidate for net/ipv4/utils.c 2664 * --ro 2665 */ 2666 2667 static unsigned int scan_ip6(const char *s, char ip[16]) 2668 { 2669 unsigned int i; 2670 unsigned int len = 0; 2671 unsigned long u; 2672 char suffix[16]; 2673 unsigned int prefixlen = 0; 2674 unsigned int suffixlen = 0; 2675 __be32 tmp; 2676 2677 for (i = 0; i < 16; i++) 2678 ip[i] = 0; 2679 2680 for (;;) { 2681 if (*s == ':') { 2682 len++; 2683 if (s[1] == ':') { /* Found "::", skip to part 2 */ 2684 s += 2; 2685 len++; 2686 break; 2687 } 2688 s++; 2689 } 2690 { 2691 char *tmp; 2692 u = simple_strtoul(s, &tmp, 16); 2693 i = tmp - s; 2694 } 2695 2696 if (!i) 2697 return 0; 2698 if (prefixlen == 12 && s[i] == '.') { 2699 2700 /* the last 4 bytes may be written as IPv4 address */ 2701 2702 tmp = in_aton(s); 2703 memcpy((struct in_addr *)(ip + 12), &tmp, sizeof(tmp)); 2704 return i + len; 2705 } 2706 ip[prefixlen++] = (u >> 8); 2707 ip[prefixlen++] = (u & 255); 2708 s += i; 2709 len += i; 2710 if (prefixlen == 16) 2711 return len; 2712 } 2713 2714 /* part 2, after "::" */ 2715 for (;;) { 2716 if (*s == ':') { 2717 if (suffixlen == 0) 2718 break; 2719 s++; 2720 len++; 2721 } else if (suffixlen != 0) 2722 break; 2723 { 2724 char *tmp; 2725 u = simple_strtol(s, &tmp, 16); 2726 i = tmp - s; 2727 } 2728 if (!i) { 2729 if (*s) 2730 len--; 2731 break; 2732 } 2733 if (suffixlen + prefixlen <= 12 && s[i] == '.') { 2734 tmp = in_aton(s); 2735 memcpy((struct in_addr *)(suffix + suffixlen), &tmp, 2736 sizeof(tmp)); 2737 suffixlen += 4; 2738 len += strlen(s); 2739 break; 2740 } 2741 suffix[suffixlen++] = (u >> 8); 2742 suffix[suffixlen++] = (u & 255); 2743 s += i; 2744 len += i; 2745 if (prefixlen + suffixlen == 16) 2746 break; 2747 } 2748 for (i = 0; i < suffixlen; i++) 2749 ip[16 - suffixlen + i] = suffix[i]; 2750 return len; 2751 } 2752 2753 static char tohex(char hexdigit) 2754 { 2755 return hexdigit > 9 ? hexdigit + 'a' - 10 : hexdigit + '0'; 2756 } 2757 2758 static int fmt_xlong(char *s, unsigned int i) 2759 { 2760 char *bak = s; 2761 *s = tohex((i >> 12) & 0xf); 2762 if (s != bak || *s != '0') 2763 ++s; 2764 *s = tohex((i >> 8) & 0xf); 2765 if (s != bak || *s != '0') 2766 ++s; 2767 *s = tohex((i >> 4) & 0xf); 2768 if (s != bak || *s != '0') 2769 ++s; 2770 *s = tohex(i & 0xf); 2771 return s - bak + 1; 2772 } 2773 2774 static unsigned int fmt_ip6(char *s, const char ip[16]) 2775 { 2776 unsigned int len; 2777 unsigned int i; 2778 unsigned int temp; 2779 unsigned int compressing; 2780 int j; 2781 2782 len = 0; 2783 compressing = 0; 2784 for (j = 0; j < 16; j += 2) { 2785 2786 #ifdef V4MAPPEDPREFIX 2787 if (j == 12 && !memcmp(ip, V4mappedprefix, 12)) { 2788 inet_ntoa_r(*(struct in_addr *)(ip + 12), s); 2789 temp = strlen(s); 2790 return len + temp; 2791 } 2792 #endif 2793 temp = ((unsigned long)(unsigned char)ip[j] << 8) + 2794 (unsigned long)(unsigned char)ip[j + 1]; 2795 if (temp == 0) { 2796 if (!compressing) { 2797 compressing = 1; 2798 if (j == 0) { 2799 *s++ = ':'; 2800 ++len; 2801 } 2802 } 2803 } else { 2804 if (compressing) { 2805 compressing = 0; 2806 *s++ = ':'; 2807 ++len; 2808 } 2809 i = fmt_xlong(s, temp); 2810 len += i; 2811 s += i; 2812 if (j < 14) { 2813 *s++ = ':'; 2814 ++len; 2815 } 2816 } 2817 } 2818 if (compressing) { 2819 *s++ = ':'; 2820 ++len; 2821 } 2822 *s = 0; 2823 return len; 2824 } 2825 2826 static struct sk_buff *fill_packet_ipv6(struct net_device *odev, 2827 struct pktgen_dev *pkt_dev) 2828 { 2829 struct sk_buff *skb = NULL; 2830 __u8 *eth; 2831 struct udphdr *udph; 2832 int datalen; 2833 struct ipv6hdr *iph; 2834 struct pktgen_hdr *pgh = NULL; 2835 __be16 protocol = htons(ETH_P_IPV6); 2836 __be32 *mpls; 2837 __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */ 2838 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2839 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2840 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2841 2842 if (pkt_dev->nr_labels) 2843 protocol = htons(ETH_P_MPLS_UC); 2844 2845 if (pkt_dev->vlan_id != 0xffff) 2846 protocol = htons(ETH_P_8021Q); 2847 2848 /* Update any of the values, used when we're incrementing various 2849 * fields. 2850 */ 2851 mod_cur_headers(pkt_dev); 2852 2853 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + 2854 pkt_dev->pkt_overhead, GFP_ATOMIC); 2855 if (!skb) { 2856 sprintf(pkt_dev->result, "No memory"); 2857 return NULL; 2858 } 2859 2860 skb_reserve(skb, 16); 2861 2862 /* Reserve for ethernet and IP header */ 2863 eth = (__u8 *) skb_push(skb, 14); 2864 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 2865 if (pkt_dev->nr_labels) 2866 mpls_push(mpls, pkt_dev); 2867 2868 if (pkt_dev->vlan_id != 0xffff) { 2869 if (pkt_dev->svlan_id != 0xffff) { 2870 svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16)); 2871 *svlan_tci = build_tci(pkt_dev->svlan_id, 2872 pkt_dev->svlan_cfi, 2873 pkt_dev->svlan_p); 2874 svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); 2875 *svlan_encapsulated_proto = htons(ETH_P_8021Q); 2876 } 2877 vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16)); 2878 *vlan_tci = build_tci(pkt_dev->vlan_id, 2879 pkt_dev->vlan_cfi, 2880 pkt_dev->vlan_p); 2881 vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); 2882 *vlan_encapsulated_proto = htons(ETH_P_IPV6); 2883 } 2884 2885 skb->network_header = skb->tail; 2886 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); 2887 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); 2888 2889 iph = ipv6_hdr(skb); 2890 udph = udp_hdr(skb); 2891 2892 memcpy(eth, pkt_dev->hh, 12); 2893 *(__be16 *) & eth[12] = protocol; 2894 2895 /* Eth + IPh + UDPh + mpls */ 2896 datalen = pkt_dev->cur_pkt_size - 14 - 2897 sizeof(struct ipv6hdr) - sizeof(struct udphdr) - 2898 pkt_dev->pkt_overhead; 2899 2900 if (datalen < sizeof(struct pktgen_hdr)) { 2901 datalen = sizeof(struct pktgen_hdr); 2902 if (net_ratelimit()) 2903 printk(KERN_INFO "pktgen: increased datalen to %d\n", 2904 datalen); 2905 } 2906 2907 udph->source = htons(pkt_dev->cur_udp_src); 2908 udph->dest = htons(pkt_dev->cur_udp_dst); 2909 udph->len = htons(datalen + sizeof(struct udphdr)); 2910 udph->check = 0; /* No checksum */ 2911 2912 *(__be32 *) iph = htonl(0x60000000); /* Version + flow */ 2913 2914 if (pkt_dev->traffic_class) { 2915 /* Version + traffic class + flow (0) */ 2916 *(__be32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20)); 2917 } 2918 2919 iph->hop_limit = 32; 2920 2921 iph->payload_len = htons(sizeof(struct udphdr) + datalen); 2922 iph->nexthdr = IPPROTO_UDP; 2923 2924 ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr); 2925 ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr); 2926 2927 skb->mac_header = (skb->network_header - ETH_HLEN - 2928 pkt_dev->pkt_overhead); 2929 skb->protocol = protocol; 2930 skb->dev = odev; 2931 skb->pkt_type = PACKET_HOST; 2932 2933 if (pkt_dev->nfrags <= 0) 2934 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2935 else { 2936 int frags = pkt_dev->nfrags; 2937 int i; 2938 2939 pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8); 2940 2941 if (frags > MAX_SKB_FRAGS) 2942 frags = MAX_SKB_FRAGS; 2943 if (datalen > frags * PAGE_SIZE) { 2944 skb_put(skb, datalen - frags * PAGE_SIZE); 2945 datalen = frags * PAGE_SIZE; 2946 } 2947 2948 i = 0; 2949 while (datalen > 0) { 2950 struct page *page = alloc_pages(GFP_KERNEL, 0); 2951 skb_shinfo(skb)->frags[i].page = page; 2952 skb_shinfo(skb)->frags[i].page_offset = 0; 2953 skb_shinfo(skb)->frags[i].size = 2954 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); 2955 datalen -= skb_shinfo(skb)->frags[i].size; 2956 skb->len += skb_shinfo(skb)->frags[i].size; 2957 skb->data_len += skb_shinfo(skb)->frags[i].size; 2958 i++; 2959 skb_shinfo(skb)->nr_frags = i; 2960 } 2961 2962 while (i < frags) { 2963 int rem; 2964 2965 if (i == 0) 2966 break; 2967 2968 rem = skb_shinfo(skb)->frags[i - 1].size / 2; 2969 if (rem == 0) 2970 break; 2971 2972 skb_shinfo(skb)->frags[i - 1].size -= rem; 2973 2974 skb_shinfo(skb)->frags[i] = 2975 skb_shinfo(skb)->frags[i - 1]; 2976 get_page(skb_shinfo(skb)->frags[i].page); 2977 skb_shinfo(skb)->frags[i].page = 2978 skb_shinfo(skb)->frags[i - 1].page; 2979 skb_shinfo(skb)->frags[i].page_offset += 2980 skb_shinfo(skb)->frags[i - 1].size; 2981 skb_shinfo(skb)->frags[i].size = rem; 2982 i++; 2983 skb_shinfo(skb)->nr_frags = i; 2984 } 2985 } 2986 2987 /* Stamp the time, and sequence number, convert them to network byte order */ 2988 /* should we update cloned packets too ? */ 2989 if (pgh) { 2990 struct timeval timestamp; 2991 2992 pgh->pgh_magic = htonl(PKTGEN_MAGIC); 2993 pgh->seq_num = htonl(pkt_dev->seq_num); 2994 2995 do_gettimeofday(×tamp); 2996 pgh->tv_sec = htonl(timestamp.tv_sec); 2997 pgh->tv_usec = htonl(timestamp.tv_usec); 2998 } 2999 /* pkt_dev->seq_num++; FF: you really mean this? */ 3000 3001 return skb; 3002 } 3003 3004 static inline struct sk_buff *fill_packet(struct net_device *odev, 3005 struct pktgen_dev *pkt_dev) 3006 { 3007 if (pkt_dev->flags & F_IPV6) 3008 return fill_packet_ipv6(odev, pkt_dev); 3009 else 3010 return fill_packet_ipv4(odev, pkt_dev); 3011 } 3012 3013 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev) 3014 { 3015 pkt_dev->seq_num = 1; 3016 pkt_dev->idle_acc = 0; 3017 pkt_dev->sofar = 0; 3018 pkt_dev->tx_bytes = 0; 3019 pkt_dev->errors = 0; 3020 } 3021 3022 /* Set up structure for sending pkts, clear counters */ 3023 3024 static void pktgen_run(struct pktgen_thread *t) 3025 { 3026 struct pktgen_dev *pkt_dev; 3027 int started = 0; 3028 3029 pr_debug("pktgen: entering pktgen_run. %p\n", t); 3030 3031 if_lock(t); 3032 list_for_each_entry(pkt_dev, &t->if_list, list) { 3033 3034 /* 3035 * setup odev and create initial packet. 3036 */ 3037 pktgen_setup_inject(pkt_dev); 3038 3039 if (pkt_dev->odev) { 3040 pktgen_clear_counters(pkt_dev); 3041 pkt_dev->running = 1; /* Cranke yeself! */ 3042 pkt_dev->skb = NULL; 3043 pkt_dev->started_at = getCurUs(); 3044 pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */ 3045 pkt_dev->next_tx_ns = 0; 3046 set_pkt_overhead(pkt_dev); 3047 3048 strcpy(pkt_dev->result, "Starting"); 3049 started++; 3050 } else 3051 strcpy(pkt_dev->result, "Error starting"); 3052 } 3053 if_unlock(t); 3054 if (started) 3055 t->control &= ~(T_STOP); 3056 } 3057 3058 static void pktgen_stop_all_threads_ifs(void) 3059 { 3060 struct pktgen_thread *t; 3061 3062 pr_debug("pktgen: entering pktgen_stop_all_threads_ifs.\n"); 3063 3064 mutex_lock(&pktgen_thread_lock); 3065 3066 list_for_each_entry(t, &pktgen_threads, th_list) 3067 t->control |= T_STOP; 3068 3069 mutex_unlock(&pktgen_thread_lock); 3070 } 3071 3072 static int thread_is_running(struct pktgen_thread *t) 3073 { 3074 struct pktgen_dev *pkt_dev; 3075 int res = 0; 3076 3077 list_for_each_entry(pkt_dev, &t->if_list, list) 3078 if (pkt_dev->running) { 3079 res = 1; 3080 break; 3081 } 3082 return res; 3083 } 3084 3085 static int pktgen_wait_thread_run(struct pktgen_thread *t) 3086 { 3087 if_lock(t); 3088 3089 while (thread_is_running(t)) { 3090 3091 if_unlock(t); 3092 3093 msleep_interruptible(100); 3094 3095 if (signal_pending(current)) 3096 goto signal; 3097 if_lock(t); 3098 } 3099 if_unlock(t); 3100 return 1; 3101 signal: 3102 return 0; 3103 } 3104 3105 static int pktgen_wait_all_threads_run(void) 3106 { 3107 struct pktgen_thread *t; 3108 int sig = 1; 3109 3110 mutex_lock(&pktgen_thread_lock); 3111 3112 list_for_each_entry(t, &pktgen_threads, th_list) { 3113 sig = pktgen_wait_thread_run(t); 3114 if (sig == 0) 3115 break; 3116 } 3117 3118 if (sig == 0) 3119 list_for_each_entry(t, &pktgen_threads, th_list) 3120 t->control |= (T_STOP); 3121 3122 mutex_unlock(&pktgen_thread_lock); 3123 return sig; 3124 } 3125 3126 static void pktgen_run_all_threads(void) 3127 { 3128 struct pktgen_thread *t; 3129 3130 pr_debug("pktgen: entering pktgen_run_all_threads.\n"); 3131 3132 mutex_lock(&pktgen_thread_lock); 3133 3134 list_for_each_entry(t, &pktgen_threads, th_list) 3135 t->control |= (T_RUN); 3136 3137 mutex_unlock(&pktgen_thread_lock); 3138 3139 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ 3140 3141 pktgen_wait_all_threads_run(); 3142 } 3143 3144 static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) 3145 { 3146 __u64 total_us, bps, mbps, pps, idle; 3147 char *p = pkt_dev->result; 3148 3149 total_us = pkt_dev->stopped_at - pkt_dev->started_at; 3150 3151 idle = pkt_dev->idle_acc; 3152 3153 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", 3154 (unsigned long long)total_us, 3155 (unsigned long long)(total_us - idle), 3156 (unsigned long long)idle, 3157 (unsigned long long)pkt_dev->sofar, 3158 pkt_dev->cur_pkt_size, nr_frags); 3159 3160 pps = pkt_dev->sofar * USEC_PER_SEC; 3161 3162 while ((total_us >> 32) != 0) { 3163 pps >>= 1; 3164 total_us >>= 1; 3165 } 3166 3167 do_div(pps, total_us); 3168 3169 bps = pps * 8 * pkt_dev->cur_pkt_size; 3170 3171 mbps = bps; 3172 do_div(mbps, 1000000); 3173 p += sprintf(p, " %llupps %lluMb/sec (%llubps) errors: %llu", 3174 (unsigned long long)pps, 3175 (unsigned long long)mbps, 3176 (unsigned long long)bps, 3177 (unsigned long long)pkt_dev->errors); 3178 } 3179 3180 /* Set stopped-at timer, remove from running list, do counters & statistics */ 3181 3182 static int pktgen_stop_device(struct pktgen_dev *pkt_dev) 3183 { 3184 int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; 3185 3186 if (!pkt_dev->running) { 3187 printk("pktgen: interface: %s is already stopped\n", 3188 pkt_dev->odev->name); 3189 return -EINVAL; 3190 } 3191 3192 pkt_dev->stopped_at = getCurUs(); 3193 pkt_dev->running = 0; 3194 3195 show_results(pkt_dev, nr_frags); 3196 3197 return 0; 3198 } 3199 3200 static struct pktgen_dev *next_to_run(struct pktgen_thread *t) 3201 { 3202 struct pktgen_dev *pkt_dev, *best = NULL; 3203 3204 if_lock(t); 3205 3206 list_for_each_entry(pkt_dev, &t->if_list, list) { 3207 if (!pkt_dev->running) 3208 continue; 3209 if (best == NULL) 3210 best = pkt_dev; 3211 else if (pkt_dev->next_tx_us < best->next_tx_us) 3212 best = pkt_dev; 3213 } 3214 if_unlock(t); 3215 return best; 3216 } 3217 3218 static void pktgen_stop(struct pktgen_thread *t) 3219 { 3220 struct pktgen_dev *pkt_dev; 3221 3222 pr_debug("pktgen: entering pktgen_stop\n"); 3223 3224 if_lock(t); 3225 3226 list_for_each_entry(pkt_dev, &t->if_list, list) { 3227 pktgen_stop_device(pkt_dev); 3228 if (pkt_dev->skb) 3229 kfree_skb(pkt_dev->skb); 3230 3231 pkt_dev->skb = NULL; 3232 } 3233 3234 if_unlock(t); 3235 } 3236 3237 /* 3238 * one of our devices needs to be removed - find it 3239 * and remove it 3240 */ 3241 static void pktgen_rem_one_if(struct pktgen_thread *t) 3242 { 3243 struct list_head *q, *n; 3244 struct pktgen_dev *cur; 3245 3246 pr_debug("pktgen: entering pktgen_rem_one_if\n"); 3247 3248 if_lock(t); 3249 3250 list_for_each_safe(q, n, &t->if_list) { 3251 cur = list_entry(q, struct pktgen_dev, list); 3252 3253 if (!cur->removal_mark) 3254 continue; 3255 3256 if (cur->skb) 3257 kfree_skb(cur->skb); 3258 cur->skb = NULL; 3259 3260 pktgen_remove_device(t, cur); 3261 3262 break; 3263 } 3264 3265 if_unlock(t); 3266 } 3267 3268 static void pktgen_rem_all_ifs(struct pktgen_thread *t) 3269 { 3270 struct list_head *q, *n; 3271 struct pktgen_dev *cur; 3272 3273 /* Remove all devices, free mem */ 3274 3275 pr_debug("pktgen: entering pktgen_rem_all_ifs\n"); 3276 if_lock(t); 3277 3278 list_for_each_safe(q, n, &t->if_list) { 3279 cur = list_entry(q, struct pktgen_dev, list); 3280 3281 if (cur->skb) 3282 kfree_skb(cur->skb); 3283 cur->skb = NULL; 3284 3285 pktgen_remove_device(t, cur); 3286 } 3287 3288 if_unlock(t); 3289 } 3290 3291 static void pktgen_rem_thread(struct pktgen_thread *t) 3292 { 3293 /* Remove from the thread list */ 3294 3295 remove_proc_entry(t->tsk->comm, pg_proc_dir); 3296 3297 mutex_lock(&pktgen_thread_lock); 3298 3299 list_del(&t->th_list); 3300 3301 mutex_unlock(&pktgen_thread_lock); 3302 } 3303 3304 static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) 3305 { 3306 struct net_device *odev = NULL; 3307 __u64 idle_start = 0; 3308 int ret; 3309 3310 odev = pkt_dev->odev; 3311 3312 if (pkt_dev->delay_us || pkt_dev->delay_ns) { 3313 u64 now; 3314 3315 now = getCurUs(); 3316 if (now < pkt_dev->next_tx_us) 3317 spin(pkt_dev, pkt_dev->next_tx_us); 3318 3319 /* This is max DELAY, this has special meaning of 3320 * "never transmit" 3321 */ 3322 if (pkt_dev->delay_us == 0x7FFFFFFF) { 3323 pkt_dev->next_tx_us = getCurUs() + pkt_dev->delay_us; 3324 pkt_dev->next_tx_ns = pkt_dev->delay_ns; 3325 goto out; 3326 } 3327 } 3328 3329 if ((netif_queue_stopped(odev) || 3330 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) || 3331 need_resched()) { 3332 idle_start = getCurUs(); 3333 3334 if (!netif_running(odev)) { 3335 pktgen_stop_device(pkt_dev); 3336 if (pkt_dev->skb) 3337 kfree_skb(pkt_dev->skb); 3338 pkt_dev->skb = NULL; 3339 goto out; 3340 } 3341 if (need_resched()) 3342 schedule(); 3343 3344 pkt_dev->idle_acc += getCurUs() - idle_start; 3345 3346 if (netif_queue_stopped(odev) || 3347 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { 3348 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3349 pkt_dev->next_tx_ns = 0; 3350 goto out; /* Try the next interface */ 3351 } 3352 } 3353 3354 if (pkt_dev->last_ok || !pkt_dev->skb) { 3355 if ((++pkt_dev->clone_count >= pkt_dev->clone_skb) 3356 || (!pkt_dev->skb)) { 3357 /* build a new pkt */ 3358 if (pkt_dev->skb) 3359 kfree_skb(pkt_dev->skb); 3360 3361 pkt_dev->skb = fill_packet(odev, pkt_dev); 3362 if (pkt_dev->skb == NULL) { 3363 printk("pktgen: ERROR: couldn't allocate skb in fill_packet.\n"); 3364 schedule(); 3365 pkt_dev->clone_count--; /* back out increment, OOM */ 3366 goto out; 3367 } 3368 pkt_dev->allocated_skbs++; 3369 pkt_dev->clone_count = 0; /* reset counter */ 3370 } 3371 } 3372 3373 netif_tx_lock_bh(odev); 3374 if (!netif_queue_stopped(odev) && 3375 !netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { 3376 3377 atomic_inc(&(pkt_dev->skb->users)); 3378 retry_now: 3379 ret = odev->hard_start_xmit(pkt_dev->skb, odev); 3380 if (likely(ret == NETDEV_TX_OK)) { 3381 pkt_dev->last_ok = 1; 3382 pkt_dev->sofar++; 3383 pkt_dev->seq_num++; 3384 pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; 3385 3386 } else if (ret == NETDEV_TX_LOCKED 3387 && (odev->features & NETIF_F_LLTX)) { 3388 cpu_relax(); 3389 goto retry_now; 3390 } else { /* Retry it next time */ 3391 3392 atomic_dec(&(pkt_dev->skb->users)); 3393 3394 if (debug && net_ratelimit()) 3395 printk(KERN_INFO "pktgen: Hard xmit error\n"); 3396 3397 pkt_dev->errors++; 3398 pkt_dev->last_ok = 0; 3399 } 3400 3401 pkt_dev->next_tx_us = getCurUs(); 3402 pkt_dev->next_tx_ns = 0; 3403 3404 pkt_dev->next_tx_us += pkt_dev->delay_us; 3405 pkt_dev->next_tx_ns += pkt_dev->delay_ns; 3406 3407 if (pkt_dev->next_tx_ns > 1000) { 3408 pkt_dev->next_tx_us++; 3409 pkt_dev->next_tx_ns -= 1000; 3410 } 3411 } 3412 3413 else { /* Retry it next time */ 3414 pkt_dev->last_ok = 0; 3415 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3416 pkt_dev->next_tx_ns = 0; 3417 } 3418 3419 netif_tx_unlock_bh(odev); 3420 3421 /* If pkt_dev->count is zero, then run forever */ 3422 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3423 if (atomic_read(&(pkt_dev->skb->users)) != 1) { 3424 idle_start = getCurUs(); 3425 while (atomic_read(&(pkt_dev->skb->users)) != 1) { 3426 if (signal_pending(current)) { 3427 break; 3428 } 3429 schedule(); 3430 } 3431 pkt_dev->idle_acc += getCurUs() - idle_start; 3432 } 3433 3434 /* Done with this */ 3435 pktgen_stop_device(pkt_dev); 3436 if (pkt_dev->skb) 3437 kfree_skb(pkt_dev->skb); 3438 pkt_dev->skb = NULL; 3439 } 3440 out:; 3441 } 3442 3443 /* 3444 * Main loop of the thread goes here 3445 */ 3446 3447 static int pktgen_thread_worker(void *arg) 3448 { 3449 DEFINE_WAIT(wait); 3450 struct pktgen_thread *t = arg; 3451 struct pktgen_dev *pkt_dev = NULL; 3452 int cpu = t->cpu; 3453 u32 max_before_softirq; 3454 u32 tx_since_softirq = 0; 3455 3456 BUG_ON(smp_processor_id() != cpu); 3457 3458 init_waitqueue_head(&t->queue); 3459 3460 t->pid = current->pid; 3461 3462 pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid); 3463 3464 max_before_softirq = t->max_before_softirq; 3465 3466 set_current_state(TASK_INTERRUPTIBLE); 3467 3468 set_freezable(); 3469 3470 while (!kthread_should_stop()) { 3471 pkt_dev = next_to_run(t); 3472 3473 if (!pkt_dev && 3474 (t->control & (T_STOP | T_RUN | T_REMDEVALL | T_REMDEV)) 3475 == 0) { 3476 prepare_to_wait(&(t->queue), &wait, 3477 TASK_INTERRUPTIBLE); 3478 schedule_timeout(HZ / 10); 3479 finish_wait(&(t->queue), &wait); 3480 } 3481 3482 __set_current_state(TASK_RUNNING); 3483 3484 if (pkt_dev) { 3485 3486 pktgen_xmit(pkt_dev); 3487 3488 /* 3489 * We like to stay RUNNING but must also give 3490 * others fair share. 3491 */ 3492 3493 tx_since_softirq += pkt_dev->last_ok; 3494 3495 if (tx_since_softirq > max_before_softirq) { 3496 if (local_softirq_pending()) 3497 do_softirq(); 3498 tx_since_softirq = 0; 3499 } 3500 } 3501 3502 if (t->control & T_STOP) { 3503 pktgen_stop(t); 3504 t->control &= ~(T_STOP); 3505 } 3506 3507 if (t->control & T_RUN) { 3508 pktgen_run(t); 3509 t->control &= ~(T_RUN); 3510 } 3511 3512 if (t->control & T_REMDEVALL) { 3513 pktgen_rem_all_ifs(t); 3514 t->control &= ~(T_REMDEVALL); 3515 } 3516 3517 if (t->control & T_REMDEV) { 3518 pktgen_rem_one_if(t); 3519 t->control &= ~(T_REMDEV); 3520 } 3521 3522 try_to_freeze(); 3523 3524 set_current_state(TASK_INTERRUPTIBLE); 3525 } 3526 3527 pr_debug("pktgen: %s stopping all device\n", t->tsk->comm); 3528 pktgen_stop(t); 3529 3530 pr_debug("pktgen: %s removing all device\n", t->tsk->comm); 3531 pktgen_rem_all_ifs(t); 3532 3533 pr_debug("pktgen: %s removing thread.\n", t->tsk->comm); 3534 pktgen_rem_thread(t); 3535 3536 return 0; 3537 } 3538 3539 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 3540 const char *ifname) 3541 { 3542 struct pktgen_dev *p, *pkt_dev = NULL; 3543 if_lock(t); 3544 3545 list_for_each_entry(p, &t->if_list, list) 3546 if (strncmp(p->odev->name, ifname, IFNAMSIZ) == 0) { 3547 pkt_dev = p; 3548 break; 3549 } 3550 3551 if_unlock(t); 3552 pr_debug("pktgen: find_dev(%s) returning %p\n", ifname, pkt_dev); 3553 return pkt_dev; 3554 } 3555 3556 /* 3557 * Adds a dev at front of if_list. 3558 */ 3559 3560 static int add_dev_to_thread(struct pktgen_thread *t, 3561 struct pktgen_dev *pkt_dev) 3562 { 3563 int rv = 0; 3564 3565 if_lock(t); 3566 3567 if (pkt_dev->pg_thread) { 3568 printk("pktgen: ERROR: already assigned to a thread.\n"); 3569 rv = -EBUSY; 3570 goto out; 3571 } 3572 3573 list_add(&pkt_dev->list, &t->if_list); 3574 pkt_dev->pg_thread = t; 3575 pkt_dev->running = 0; 3576 3577 out: 3578 if_unlock(t); 3579 return rv; 3580 } 3581 3582 /* Called under thread lock */ 3583 3584 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) 3585 { 3586 struct pktgen_dev *pkt_dev; 3587 int err; 3588 3589 /* We don't allow a device to be on several threads */ 3590 3591 pkt_dev = __pktgen_NN_threads(ifname, FIND); 3592 if (pkt_dev) { 3593 printk("pktgen: ERROR: interface already used.\n"); 3594 return -EBUSY; 3595 } 3596 3597 pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL); 3598 if (!pkt_dev) 3599 return -ENOMEM; 3600 3601 pkt_dev->flows = vmalloc(MAX_CFLOWS * sizeof(struct flow_state)); 3602 if (pkt_dev->flows == NULL) { 3603 kfree(pkt_dev); 3604 return -ENOMEM; 3605 } 3606 memset(pkt_dev->flows, 0, MAX_CFLOWS * sizeof(struct flow_state)); 3607 3608 pkt_dev->removal_mark = 0; 3609 pkt_dev->min_pkt_size = ETH_ZLEN; 3610 pkt_dev->max_pkt_size = ETH_ZLEN; 3611 pkt_dev->nfrags = 0; 3612 pkt_dev->clone_skb = pg_clone_skb_d; 3613 pkt_dev->delay_us = pg_delay_d / 1000; 3614 pkt_dev->delay_ns = pg_delay_d % 1000; 3615 pkt_dev->count = pg_count_d; 3616 pkt_dev->sofar = 0; 3617 pkt_dev->udp_src_min = 9; /* sink port */ 3618 pkt_dev->udp_src_max = 9; 3619 pkt_dev->udp_dst_min = 9; 3620 pkt_dev->udp_dst_max = 9; 3621 3622 pkt_dev->vlan_p = 0; 3623 pkt_dev->vlan_cfi = 0; 3624 pkt_dev->vlan_id = 0xffff; 3625 pkt_dev->svlan_p = 0; 3626 pkt_dev->svlan_cfi = 0; 3627 pkt_dev->svlan_id = 0xffff; 3628 3629 err = pktgen_setup_dev(pkt_dev, ifname); 3630 if (err) 3631 goto out1; 3632 3633 pkt_dev->entry = create_proc_entry(ifname, 0600, pg_proc_dir); 3634 if (!pkt_dev->entry) { 3635 printk("pktgen: cannot create %s/%s procfs entry.\n", 3636 PG_PROC_DIR, ifname); 3637 err = -EINVAL; 3638 goto out2; 3639 } 3640 pkt_dev->entry->proc_fops = &pktgen_if_fops; 3641 pkt_dev->entry->data = pkt_dev; 3642 #ifdef CONFIG_XFRM 3643 pkt_dev->ipsmode = XFRM_MODE_TRANSPORT; 3644 pkt_dev->ipsproto = IPPROTO_ESP; 3645 #endif 3646 3647 return add_dev_to_thread(t, pkt_dev); 3648 out2: 3649 dev_put(pkt_dev->odev); 3650 out1: 3651 #ifdef CONFIG_XFRM 3652 free_SAs(pkt_dev); 3653 #endif 3654 if (pkt_dev->flows) 3655 vfree(pkt_dev->flows); 3656 kfree(pkt_dev); 3657 return err; 3658 } 3659 3660 static int __init pktgen_create_thread(int cpu) 3661 { 3662 struct pktgen_thread *t; 3663 struct proc_dir_entry *pe; 3664 struct task_struct *p; 3665 3666 t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL); 3667 if (!t) { 3668 printk("pktgen: ERROR: out of memory, can't create new thread.\n"); 3669 return -ENOMEM; 3670 } 3671 3672 spin_lock_init(&t->if_lock); 3673 t->cpu = cpu; 3674 3675 INIT_LIST_HEAD(&t->if_list); 3676 3677 list_add_tail(&t->th_list, &pktgen_threads); 3678 3679 p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu); 3680 if (IS_ERR(p)) { 3681 printk("pktgen: kernel_thread() failed for cpu %d\n", t->cpu); 3682 list_del(&t->th_list); 3683 kfree(t); 3684 return PTR_ERR(p); 3685 } 3686 kthread_bind(p, cpu); 3687 t->tsk = p; 3688 3689 pe = create_proc_entry(t->tsk->comm, 0600, pg_proc_dir); 3690 if (!pe) { 3691 printk("pktgen: cannot create %s/%s procfs entry.\n", 3692 PG_PROC_DIR, t->tsk->comm); 3693 kthread_stop(p); 3694 list_del(&t->th_list); 3695 kfree(t); 3696 return -EINVAL; 3697 } 3698 3699 pe->proc_fops = &pktgen_thread_fops; 3700 pe->data = t; 3701 3702 wake_up_process(p); 3703 3704 return 0; 3705 } 3706 3707 /* 3708 * Removes a device from the thread if_list. 3709 */ 3710 static void _rem_dev_from_if_list(struct pktgen_thread *t, 3711 struct pktgen_dev *pkt_dev) 3712 { 3713 struct list_head *q, *n; 3714 struct pktgen_dev *p; 3715 3716 list_for_each_safe(q, n, &t->if_list) { 3717 p = list_entry(q, struct pktgen_dev, list); 3718 if (p == pkt_dev) 3719 list_del(&p->list); 3720 } 3721 } 3722 3723 static int pktgen_remove_device(struct pktgen_thread *t, 3724 struct pktgen_dev *pkt_dev) 3725 { 3726 3727 pr_debug("pktgen: remove_device pkt_dev=%p\n", pkt_dev); 3728 3729 if (pkt_dev->running) { 3730 printk("pktgen:WARNING: trying to remove a running interface, stopping it now.\n"); 3731 pktgen_stop_device(pkt_dev); 3732 } 3733 3734 /* Dis-associate from the interface */ 3735 3736 if (pkt_dev->odev) { 3737 dev_put(pkt_dev->odev); 3738 pkt_dev->odev = NULL; 3739 } 3740 3741 /* And update the thread if_list */ 3742 3743 _rem_dev_from_if_list(t, pkt_dev); 3744 3745 if (pkt_dev->entry) 3746 remove_proc_entry(pkt_dev->entry->name, pg_proc_dir); 3747 3748 #ifdef CONFIG_XFRM 3749 free_SAs(pkt_dev); 3750 #endif 3751 if (pkt_dev->flows) 3752 vfree(pkt_dev->flows); 3753 kfree(pkt_dev); 3754 return 0; 3755 } 3756 3757 static int __init pg_init(void) 3758 { 3759 int cpu; 3760 struct proc_dir_entry *pe; 3761 3762 printk(version); 3763 3764 pg_proc_dir = proc_mkdir(PG_PROC_DIR, proc_net); 3765 if (!pg_proc_dir) 3766 return -ENODEV; 3767 pg_proc_dir->owner = THIS_MODULE; 3768 3769 pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir); 3770 if (pe == NULL) { 3771 printk("pktgen: ERROR: cannot create %s procfs entry.\n", 3772 PGCTRL); 3773 proc_net_remove(PG_PROC_DIR); 3774 return -EINVAL; 3775 } 3776 3777 pe->proc_fops = &pktgen_fops; 3778 pe->data = NULL; 3779 3780 /* Register us to receive netdevice events */ 3781 register_netdevice_notifier(&pktgen_notifier_block); 3782 3783 for_each_online_cpu(cpu) { 3784 int err; 3785 3786 err = pktgen_create_thread(cpu); 3787 if (err) 3788 printk("pktgen: WARNING: Cannot create thread for cpu %d (%d)\n", 3789 cpu, err); 3790 } 3791 3792 if (list_empty(&pktgen_threads)) { 3793 printk("pktgen: ERROR: Initialization failed for all threads\n"); 3794 unregister_netdevice_notifier(&pktgen_notifier_block); 3795 remove_proc_entry(PGCTRL, pg_proc_dir); 3796 proc_net_remove(PG_PROC_DIR); 3797 return -ENODEV; 3798 } 3799 3800 return 0; 3801 } 3802 3803 static void __exit pg_cleanup(void) 3804 { 3805 struct pktgen_thread *t; 3806 struct list_head *q, *n; 3807 wait_queue_head_t queue; 3808 init_waitqueue_head(&queue); 3809 3810 /* Stop all interfaces & threads */ 3811 3812 list_for_each_safe(q, n, &pktgen_threads) { 3813 t = list_entry(q, struct pktgen_thread, th_list); 3814 kthread_stop(t->tsk); 3815 kfree(t); 3816 } 3817 3818 /* Un-register us from receiving netdevice events */ 3819 unregister_netdevice_notifier(&pktgen_notifier_block); 3820 3821 /* Clean up proc file system */ 3822 remove_proc_entry(PGCTRL, pg_proc_dir); 3823 proc_net_remove(PG_PROC_DIR); 3824 } 3825 3826 module_init(pg_init); 3827 module_exit(pg_cleanup); 3828 3829 MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se"); 3830 MODULE_DESCRIPTION("Packet Generator tool"); 3831 MODULE_LICENSE("GPL"); 3832 module_param(pg_count_d, int, 0); 3833 module_param(pg_delay_d, int, 0); 3834 module_param(pg_clone_skb_d, int, 0); 3835 module_param(debug, int, 0); 3836