1 /* 2 * Authors: 3 * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se> 4 * Uppsala University and 5 * Swedish University of Agricultural Sciences 6 * 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * Ben Greear <greearb@candelatech.com> 9 * Jens Låås <jens.laas@data.slu.se> 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 * 16 * 17 * A tool for loading the network with preconfigurated packets. 18 * The tool is implemented as a linux module. Parameters are output 19 * device, delay (to hard_xmit), number of packets, and whether 20 * to use multiple SKBs or just the same one. 21 * pktgen uses the installed interface's output routine. 22 * 23 * Additional hacking by: 24 * 25 * Jens.Laas@data.slu.se 26 * Improved by ANK. 010120. 27 * Improved by ANK even more. 010212. 28 * MAC address typo fixed. 010417 --ro 29 * Integrated. 020301 --DaveM 30 * Added multiskb option 020301 --DaveM 31 * Scaling of results. 020417--sigurdur@linpro.no 32 * Significant re-work of the module: 33 * * Convert to threaded model to more efficiently be able to transmit 34 * and receive on multiple interfaces at once. 35 * * Converted many counters to __u64 to allow longer runs. 36 * * Allow configuration of ranges, like min/max IP address, MACs, 37 * and UDP-ports, for both source and destination, and can 38 * set to use a random distribution or sequentially walk the range. 39 * * Can now change most values after starting. 40 * * Place 12-byte packet in UDP payload with magic number, 41 * sequence number, and timestamp. 42 * * Add receiver code that detects dropped pkts, re-ordered pkts, and 43 * latencies (with micro-second) precision. 44 * * Add IOCTL interface to easily get counters & configuration. 45 * --Ben Greear <greearb@candelatech.com> 46 * 47 * Renamed multiskb to clone_skb and cleaned up sending core for two distinct 48 * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0 49 * as a "fastpath" with a configurable number of clones after alloc's. 50 * clone_skb=0 means all packets are allocated this also means ranges time 51 * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100 52 * clones. 53 * 54 * Also moved to /proc/net/pktgen/ 55 * --ro 56 * 57 * Sept 10: Fixed threading/locking. Lots of bone-headed and more clever 58 * mistakes. Also merged in DaveM's patch in the -pre6 patch. 59 * --Ben Greear <greearb@candelatech.com> 60 * 61 * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br) 62 * 63 * 64 * 021124 Finished major redesign and rewrite for new functionality. 65 * See Documentation/networking/pktgen.txt for how to use this. 66 * 67 * The new operation: 68 * For each CPU one thread/process is created at start. This process checks 69 * for running devices in the if_list and sends packets until count is 0 it 70 * also the thread checks the thread->control which is used for inter-process 71 * communication. controlling process "posts" operations to the threads this 72 * way. The if_lock should be possible to remove when add/rem_device is merged 73 * into this too. 74 * 75 * By design there should only be *one* "controlling" process. In practice 76 * multiple write accesses gives unpredictable result. Understood by "write" 77 * to /proc gives result code thats should be read be the "writer". 78 * For practical use this should be no problem. 79 * 80 * Note when adding devices to a specific CPU there good idea to also assign 81 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. 82 * --ro 83 * 84 * Fix refcount off by one if first packet fails, potential null deref, 85 * memleak 030710- KJP 86 * 87 * First "ranges" functionality for ipv6 030726 --ro 88 * 89 * Included flow support. 030802 ANK. 90 * 91 * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org> 92 * 93 * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419 94 * ia64 compilation fix from Aron Griffis <aron@hp.com> 040604 95 * 96 * New xmit() return, do_div and misc clean up by Stephen Hemminger 97 * <shemminger@osdl.org> 040923 98 * 99 * Randy Dunlap fixed u64 printk compiler waring 100 * 101 * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> 102 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 103 * 104 * Corrections from Nikolai Malykh (nmalykh@bilim.com) 105 * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230 106 * 107 * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com> 108 * 050103 109 * 110 * MPLS support by Steven Whitehouse <steve@chygwyn.com> 111 * 112 * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com> 113 * 114 * Fixed src_mac command to set source mac of packet to value specified in 115 * command by Adit Ranadive <adit.262@gmail.com> 116 * 117 */ 118 119 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 120 121 #include <linux/sys.h> 122 #include <linux/types.h> 123 #include <linux/module.h> 124 #include <linux/moduleparam.h> 125 #include <linux/kernel.h> 126 #include <linux/mutex.h> 127 #include <linux/sched.h> 128 #include <linux/slab.h> 129 #include <linux/vmalloc.h> 130 #include <linux/unistd.h> 131 #include <linux/string.h> 132 #include <linux/ptrace.h> 133 #include <linux/errno.h> 134 #include <linux/ioport.h> 135 #include <linux/interrupt.h> 136 #include <linux/capability.h> 137 #include <linux/hrtimer.h> 138 #include <linux/freezer.h> 139 #include <linux/delay.h> 140 #include <linux/timer.h> 141 #include <linux/list.h> 142 #include <linux/init.h> 143 #include <linux/skbuff.h> 144 #include <linux/netdevice.h> 145 #include <linux/inet.h> 146 #include <linux/inetdevice.h> 147 #include <linux/rtnetlink.h> 148 #include <linux/if_arp.h> 149 #include <linux/if_vlan.h> 150 #include <linux/in.h> 151 #include <linux/ip.h> 152 #include <linux/ipv6.h> 153 #include <linux/udp.h> 154 #include <linux/proc_fs.h> 155 #include <linux/seq_file.h> 156 #include <linux/wait.h> 157 #include <linux/etherdevice.h> 158 #include <linux/kthread.h> 159 #include <linux/prefetch.h> 160 #include <net/net_namespace.h> 161 #include <net/checksum.h> 162 #include <net/ipv6.h> 163 #include <net/udp.h> 164 #include <net/ip6_checksum.h> 165 #include <net/addrconf.h> 166 #ifdef CONFIG_XFRM 167 #include <net/xfrm.h> 168 #endif 169 #include <net/netns/generic.h> 170 #include <asm/byteorder.h> 171 #include <linux/rcupdate.h> 172 #include <linux/bitops.h> 173 #include <linux/io.h> 174 #include <linux/timex.h> 175 #include <linux/uaccess.h> 176 #include <asm/dma.h> 177 #include <asm/div64.h> /* do_div */ 178 179 #define VERSION "2.74" 180 #define IP_NAME_SZ 32 181 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 182 #define MPLS_STACK_BOTTOM htonl(0x00000100) 183 184 #define func_enter() pr_debug("entering %s\n", __func__); 185 186 /* Device flag bits */ 187 #define F_IPSRC_RND (1<<0) /* IP-Src Random */ 188 #define F_IPDST_RND (1<<1) /* IP-Dst Random */ 189 #define F_UDPSRC_RND (1<<2) /* UDP-Src Random */ 190 #define F_UDPDST_RND (1<<3) /* UDP-Dst Random */ 191 #define F_MACSRC_RND (1<<4) /* MAC-Src Random */ 192 #define F_MACDST_RND (1<<5) /* MAC-Dst Random */ 193 #define F_TXSIZE_RND (1<<6) /* Transmit size is random */ 194 #define F_IPV6 (1<<7) /* Interface in IPV6 Mode */ 195 #define F_MPLS_RND (1<<8) /* Random MPLS labels */ 196 #define F_VID_RND (1<<9) /* Random VLAN ID */ 197 #define F_SVID_RND (1<<10) /* Random SVLAN ID */ 198 #define F_FLOW_SEQ (1<<11) /* Sequential flows */ 199 #define F_IPSEC_ON (1<<12) /* ipsec on for flows */ 200 #define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ 201 #define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ 202 #define F_NODE (1<<15) /* Node memory alloc*/ 203 #define F_UDPCSUM (1<<16) /* Include UDP checksum */ 204 205 /* Thread control flag bits */ 206 #define T_STOP (1<<0) /* Stop run */ 207 #define T_RUN (1<<1) /* Start run */ 208 #define T_REMDEVALL (1<<2) /* Remove all devs */ 209 #define T_REMDEV (1<<3) /* Remove one dev */ 210 211 /* If lock -- can be removed after some work */ 212 #define if_lock(t) spin_lock(&(t->if_lock)); 213 #define if_unlock(t) spin_unlock(&(t->if_lock)); 214 215 /* Used to help with determining the pkts on receive */ 216 #define PKTGEN_MAGIC 0xbe9be955 217 #define PG_PROC_DIR "pktgen" 218 #define PGCTRL "pgctrl" 219 220 #define MAX_CFLOWS 65536 221 222 #define VLAN_TAG_SIZE(x) ((x)->vlan_id == 0xffff ? 0 : 4) 223 #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4) 224 225 struct flow_state { 226 __be32 cur_daddr; 227 int count; 228 #ifdef CONFIG_XFRM 229 struct xfrm_state *x; 230 #endif 231 __u32 flags; 232 }; 233 234 /* flow flag bits */ 235 #define F_INIT (1<<0) /* flow has been initialized */ 236 237 struct pktgen_dev { 238 /* 239 * Try to keep frequent/infrequent used vars. separated. 240 */ 241 struct proc_dir_entry *entry; /* proc file */ 242 struct pktgen_thread *pg_thread;/* the owner */ 243 struct list_head list; /* chaining in the thread's run-queue */ 244 245 int running; /* if false, the test will stop */ 246 247 /* If min != max, then we will either do a linear iteration, or 248 * we will do a random selection from within the range. 249 */ 250 __u32 flags; 251 int removal_mark; /* non-zero => the device is marked for 252 * removal by worker thread */ 253 254 int min_pkt_size; 255 int max_pkt_size; 256 int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ 257 int nfrags; 258 struct page *page; 259 u64 delay; /* nano-seconds */ 260 261 __u64 count; /* Default No packets to send */ 262 __u64 sofar; /* How many pkts we've sent so far */ 263 __u64 tx_bytes; /* How many bytes we've transmitted */ 264 __u64 errors; /* Errors when trying to transmit, */ 265 266 /* runtime counters relating to clone_skb */ 267 268 __u64 allocated_skbs; 269 __u32 clone_count; 270 int last_ok; /* Was last skb sent? 271 * Or a failed transmit of some sort? 272 * This will keep sequence numbers in order 273 */ 274 ktime_t next_tx; 275 ktime_t started_at; 276 ktime_t stopped_at; 277 u64 idle_acc; /* nano-seconds */ 278 279 __u32 seq_num; 280 281 int clone_skb; /* 282 * Use multiple SKBs during packet gen. 283 * If this number is greater than 1, then 284 * that many copies of the same packet will be 285 * sent before a new packet is allocated. 286 * If you want to send 1024 identical packets 287 * before creating a new packet, 288 * set clone_skb to 1024. 289 */ 290 291 char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 292 char dst_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 293 char src_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 294 char src_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 295 296 struct in6_addr in6_saddr; 297 struct in6_addr in6_daddr; 298 struct in6_addr cur_in6_daddr; 299 struct in6_addr cur_in6_saddr; 300 /* For ranges */ 301 struct in6_addr min_in6_daddr; 302 struct in6_addr max_in6_daddr; 303 struct in6_addr min_in6_saddr; 304 struct in6_addr max_in6_saddr; 305 306 /* If we're doing ranges, random or incremental, then this 307 * defines the min/max for those ranges. 308 */ 309 __be32 saddr_min; /* inclusive, source IP address */ 310 __be32 saddr_max; /* exclusive, source IP address */ 311 __be32 daddr_min; /* inclusive, dest IP address */ 312 __be32 daddr_max; /* exclusive, dest IP address */ 313 314 __u16 udp_src_min; /* inclusive, source UDP port */ 315 __u16 udp_src_max; /* exclusive, source UDP port */ 316 __u16 udp_dst_min; /* inclusive, dest UDP port */ 317 __u16 udp_dst_max; /* exclusive, dest UDP port */ 318 319 /* DSCP + ECN */ 320 __u8 tos; /* six MSB of (former) IPv4 TOS 321 are for dscp codepoint */ 322 __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 323 (see RFC 3260, sec. 4) */ 324 325 /* MPLS */ 326 unsigned int nr_labels; /* Depth of stack, 0 = no MPLS */ 327 __be32 labels[MAX_MPLS_LABELS]; 328 329 /* VLAN/SVLAN (802.1Q/Q-in-Q) */ 330 __u8 vlan_p; 331 __u8 vlan_cfi; 332 __u16 vlan_id; /* 0xffff means no vlan tag */ 333 334 __u8 svlan_p; 335 __u8 svlan_cfi; 336 __u16 svlan_id; /* 0xffff means no svlan tag */ 337 338 __u32 src_mac_count; /* How many MACs to iterate through */ 339 __u32 dst_mac_count; /* How many MACs to iterate through */ 340 341 unsigned char dst_mac[ETH_ALEN]; 342 unsigned char src_mac[ETH_ALEN]; 343 344 __u32 cur_dst_mac_offset; 345 __u32 cur_src_mac_offset; 346 __be32 cur_saddr; 347 __be32 cur_daddr; 348 __u16 ip_id; 349 __u16 cur_udp_dst; 350 __u16 cur_udp_src; 351 __u16 cur_queue_map; 352 __u32 cur_pkt_size; 353 __u32 last_pkt_size; 354 355 __u8 hh[14]; 356 /* = { 357 0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB, 358 359 We fill in SRC address later 360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 361 0x08, 0x00 362 }; 363 */ 364 __u16 pad; /* pad out the hh struct to an even 16 bytes */ 365 366 struct sk_buff *skb; /* skb we are to transmit next, used for when we 367 * are transmitting the same one multiple times 368 */ 369 struct net_device *odev; /* The out-going device. 370 * Note that the device should have it's 371 * pg_info pointer pointing back to this 372 * device. 373 * Set when the user specifies the out-going 374 * device name (not when the inject is 375 * started as it used to do.) 376 */ 377 char odevname[32]; 378 struct flow_state *flows; 379 unsigned int cflows; /* Concurrent flows (config) */ 380 unsigned int lflow; /* Flow length (config) */ 381 unsigned int nflows; /* accumulated flows (stats) */ 382 unsigned int curfl; /* current sequenced flow (state)*/ 383 384 u16 queue_map_min; 385 u16 queue_map_max; 386 __u32 skb_priority; /* skb priority field */ 387 int node; /* Memory node */ 388 389 #ifdef CONFIG_XFRM 390 __u8 ipsmode; /* IPSEC mode (config) */ 391 __u8 ipsproto; /* IPSEC type (config) */ 392 __u32 spi; 393 struct dst_entry dst; 394 struct dst_ops dstops; 395 #endif 396 char result[512]; 397 }; 398 399 struct pktgen_hdr { 400 __be32 pgh_magic; 401 __be32 seq_num; 402 __be32 tv_sec; 403 __be32 tv_usec; 404 }; 405 406 407 static int pg_net_id __read_mostly; 408 409 struct pktgen_net { 410 struct net *net; 411 struct proc_dir_entry *proc_dir; 412 struct list_head pktgen_threads; 413 bool pktgen_exiting; 414 }; 415 416 struct pktgen_thread { 417 spinlock_t if_lock; /* for list of devices */ 418 struct list_head if_list; /* All device here */ 419 struct list_head th_list; 420 struct task_struct *tsk; 421 char result[512]; 422 423 /* Field for thread to receive "posted" events terminate, 424 stop ifs etc. */ 425 426 u32 control; 427 int cpu; 428 429 wait_queue_head_t queue; 430 struct completion start_done; 431 struct pktgen_net *net; 432 }; 433 434 #define REMOVE 1 435 #define FIND 0 436 437 static const char version[] = 438 "Packet Generator for packet performance testing. " 439 "Version: " VERSION "\n"; 440 441 static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); 442 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); 443 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 444 const char *ifname, bool exact); 445 static int pktgen_device_event(struct notifier_block *, unsigned long, void *); 446 static void pktgen_run_all_threads(struct pktgen_net *pn); 447 static void pktgen_reset_all_threads(struct pktgen_net *pn); 448 static void pktgen_stop_all_threads_ifs(struct pktgen_net *pn); 449 450 static void pktgen_stop(struct pktgen_thread *t); 451 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); 452 453 /* Module parameters, defaults. */ 454 static int pg_count_d __read_mostly = 1000; 455 static int pg_delay_d __read_mostly; 456 static int pg_clone_skb_d __read_mostly; 457 static int debug __read_mostly; 458 459 static DEFINE_MUTEX(pktgen_thread_lock); 460 461 static struct notifier_block pktgen_notifier_block = { 462 .notifier_call = pktgen_device_event, 463 }; 464 465 /* 466 * /proc handling functions 467 * 468 */ 469 470 static int pgctrl_show(struct seq_file *seq, void *v) 471 { 472 seq_puts(seq, version); 473 return 0; 474 } 475 476 static ssize_t pgctrl_write(struct file *file, const char __user *buf, 477 size_t count, loff_t *ppos) 478 { 479 int err = 0; 480 char data[128]; 481 struct pktgen_net *pn = net_generic(current->nsproxy->net_ns, pg_net_id); 482 483 if (!capable(CAP_NET_ADMIN)) { 484 err = -EPERM; 485 goto out; 486 } 487 488 if (count > sizeof(data)) 489 count = sizeof(data); 490 491 if (copy_from_user(data, buf, count)) { 492 err = -EFAULT; 493 goto out; 494 } 495 data[count - 1] = 0; /* Make string */ 496 497 if (!strcmp(data, "stop")) 498 pktgen_stop_all_threads_ifs(pn); 499 500 else if (!strcmp(data, "start")) 501 pktgen_run_all_threads(pn); 502 503 else if (!strcmp(data, "reset")) 504 pktgen_reset_all_threads(pn); 505 506 else 507 pr_warning("Unknown command: %s\n", data); 508 509 err = count; 510 511 out: 512 return err; 513 } 514 515 static int pgctrl_open(struct inode *inode, struct file *file) 516 { 517 return single_open(file, pgctrl_show, PDE_DATA(inode)); 518 } 519 520 static const struct file_operations pktgen_fops = { 521 .owner = THIS_MODULE, 522 .open = pgctrl_open, 523 .read = seq_read, 524 .llseek = seq_lseek, 525 .write = pgctrl_write, 526 .release = single_release, 527 }; 528 529 static int pktgen_if_show(struct seq_file *seq, void *v) 530 { 531 const struct pktgen_dev *pkt_dev = seq->private; 532 ktime_t stopped; 533 u64 idle; 534 535 seq_printf(seq, 536 "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", 537 (unsigned long long)pkt_dev->count, pkt_dev->min_pkt_size, 538 pkt_dev->max_pkt_size); 539 540 seq_printf(seq, 541 " frags: %d delay: %llu clone_skb: %d ifname: %s\n", 542 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay, 543 pkt_dev->clone_skb, pkt_dev->odevname); 544 545 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, 546 pkt_dev->lflow); 547 548 seq_printf(seq, 549 " queue_map_min: %u queue_map_max: %u\n", 550 pkt_dev->queue_map_min, 551 pkt_dev->queue_map_max); 552 553 if (pkt_dev->skb_priority) 554 seq_printf(seq, " skb_priority: %u\n", 555 pkt_dev->skb_priority); 556 557 if (pkt_dev->flags & F_IPV6) { 558 seq_printf(seq, 559 " saddr: %pI6c min_saddr: %pI6c max_saddr: %pI6c\n" 560 " daddr: %pI6c min_daddr: %pI6c max_daddr: %pI6c\n", 561 &pkt_dev->in6_saddr, 562 &pkt_dev->min_in6_saddr, &pkt_dev->max_in6_saddr, 563 &pkt_dev->in6_daddr, 564 &pkt_dev->min_in6_daddr, &pkt_dev->max_in6_daddr); 565 } else { 566 seq_printf(seq, 567 " dst_min: %s dst_max: %s\n", 568 pkt_dev->dst_min, pkt_dev->dst_max); 569 seq_printf(seq, 570 " src_min: %s src_max: %s\n", 571 pkt_dev->src_min, pkt_dev->src_max); 572 } 573 574 seq_puts(seq, " src_mac: "); 575 576 seq_printf(seq, "%pM ", 577 is_zero_ether_addr(pkt_dev->src_mac) ? 578 pkt_dev->odev->dev_addr : pkt_dev->src_mac); 579 580 seq_printf(seq, "dst_mac: "); 581 seq_printf(seq, "%pM\n", pkt_dev->dst_mac); 582 583 seq_printf(seq, 584 " udp_src_min: %d udp_src_max: %d" 585 " udp_dst_min: %d udp_dst_max: %d\n", 586 pkt_dev->udp_src_min, pkt_dev->udp_src_max, 587 pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); 588 589 seq_printf(seq, 590 " src_mac_count: %d dst_mac_count: %d\n", 591 pkt_dev->src_mac_count, pkt_dev->dst_mac_count); 592 593 if (pkt_dev->nr_labels) { 594 unsigned int i; 595 seq_printf(seq, " mpls: "); 596 for (i = 0; i < pkt_dev->nr_labels; i++) 597 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), 598 i == pkt_dev->nr_labels-1 ? "\n" : ", "); 599 } 600 601 if (pkt_dev->vlan_id != 0xffff) 602 seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n", 603 pkt_dev->vlan_id, pkt_dev->vlan_p, 604 pkt_dev->vlan_cfi); 605 606 if (pkt_dev->svlan_id != 0xffff) 607 seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n", 608 pkt_dev->svlan_id, pkt_dev->svlan_p, 609 pkt_dev->svlan_cfi); 610 611 if (pkt_dev->tos) 612 seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos); 613 614 if (pkt_dev->traffic_class) 615 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); 616 617 if (pkt_dev->node >= 0) 618 seq_printf(seq, " node: %d\n", pkt_dev->node); 619 620 seq_printf(seq, " Flags: "); 621 622 if (pkt_dev->flags & F_IPV6) 623 seq_printf(seq, "IPV6 "); 624 625 if (pkt_dev->flags & F_IPSRC_RND) 626 seq_printf(seq, "IPSRC_RND "); 627 628 if (pkt_dev->flags & F_IPDST_RND) 629 seq_printf(seq, "IPDST_RND "); 630 631 if (pkt_dev->flags & F_TXSIZE_RND) 632 seq_printf(seq, "TXSIZE_RND "); 633 634 if (pkt_dev->flags & F_UDPSRC_RND) 635 seq_printf(seq, "UDPSRC_RND "); 636 637 if (pkt_dev->flags & F_UDPDST_RND) 638 seq_printf(seq, "UDPDST_RND "); 639 640 if (pkt_dev->flags & F_UDPCSUM) 641 seq_printf(seq, "UDPCSUM "); 642 643 if (pkt_dev->flags & F_MPLS_RND) 644 seq_printf(seq, "MPLS_RND "); 645 646 if (pkt_dev->flags & F_QUEUE_MAP_RND) 647 seq_printf(seq, "QUEUE_MAP_RND "); 648 649 if (pkt_dev->flags & F_QUEUE_MAP_CPU) 650 seq_printf(seq, "QUEUE_MAP_CPU "); 651 652 if (pkt_dev->cflows) { 653 if (pkt_dev->flags & F_FLOW_SEQ) 654 seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/ 655 else 656 seq_printf(seq, "FLOW_RND "); 657 } 658 659 #ifdef CONFIG_XFRM 660 if (pkt_dev->flags & F_IPSEC_ON) { 661 seq_printf(seq, "IPSEC "); 662 if (pkt_dev->spi) 663 seq_printf(seq, "spi:%u", pkt_dev->spi); 664 } 665 #endif 666 667 if (pkt_dev->flags & F_MACSRC_RND) 668 seq_printf(seq, "MACSRC_RND "); 669 670 if (pkt_dev->flags & F_MACDST_RND) 671 seq_printf(seq, "MACDST_RND "); 672 673 if (pkt_dev->flags & F_VID_RND) 674 seq_printf(seq, "VID_RND "); 675 676 if (pkt_dev->flags & F_SVID_RND) 677 seq_printf(seq, "SVID_RND "); 678 679 if (pkt_dev->flags & F_NODE) 680 seq_printf(seq, "NODE_ALLOC "); 681 682 seq_puts(seq, "\n"); 683 684 /* not really stopped, more like last-running-at */ 685 stopped = pkt_dev->running ? ktime_get() : pkt_dev->stopped_at; 686 idle = pkt_dev->idle_acc; 687 do_div(idle, NSEC_PER_USEC); 688 689 seq_printf(seq, 690 "Current:\n pkts-sofar: %llu errors: %llu\n", 691 (unsigned long long)pkt_dev->sofar, 692 (unsigned long long)pkt_dev->errors); 693 694 seq_printf(seq, 695 " started: %lluus stopped: %lluus idle: %lluus\n", 696 (unsigned long long) ktime_to_us(pkt_dev->started_at), 697 (unsigned long long) ktime_to_us(stopped), 698 (unsigned long long) idle); 699 700 seq_printf(seq, 701 " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", 702 pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, 703 pkt_dev->cur_src_mac_offset); 704 705 if (pkt_dev->flags & F_IPV6) { 706 seq_printf(seq, " cur_saddr: %pI6c cur_daddr: %pI6c\n", 707 &pkt_dev->cur_in6_saddr, 708 &pkt_dev->cur_in6_daddr); 709 } else 710 seq_printf(seq, " cur_saddr: %pI4 cur_daddr: %pI4\n", 711 &pkt_dev->cur_saddr, &pkt_dev->cur_daddr); 712 713 seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", 714 pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); 715 716 seq_printf(seq, " cur_queue_map: %u\n", pkt_dev->cur_queue_map); 717 718 seq_printf(seq, " flows: %u\n", pkt_dev->nflows); 719 720 if (pkt_dev->result[0]) 721 seq_printf(seq, "Result: %s\n", pkt_dev->result); 722 else 723 seq_printf(seq, "Result: Idle\n"); 724 725 return 0; 726 } 727 728 729 static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, 730 __u32 *num) 731 { 732 int i = 0; 733 *num = 0; 734 735 for (; i < maxlen; i++) { 736 int value; 737 char c; 738 *num <<= 4; 739 if (get_user(c, &user_buffer[i])) 740 return -EFAULT; 741 value = hex_to_bin(c); 742 if (value >= 0) 743 *num |= value; 744 else 745 break; 746 } 747 return i; 748 } 749 750 static int count_trail_chars(const char __user * user_buffer, 751 unsigned int maxlen) 752 { 753 int i; 754 755 for (i = 0; i < maxlen; i++) { 756 char c; 757 if (get_user(c, &user_buffer[i])) 758 return -EFAULT; 759 switch (c) { 760 case '\"': 761 case '\n': 762 case '\r': 763 case '\t': 764 case ' ': 765 case '=': 766 break; 767 default: 768 goto done; 769 } 770 } 771 done: 772 return i; 773 } 774 775 static long num_arg(const char __user *user_buffer, unsigned long maxlen, 776 unsigned long *num) 777 { 778 int i; 779 *num = 0; 780 781 for (i = 0; i < maxlen; i++) { 782 char c; 783 if (get_user(c, &user_buffer[i])) 784 return -EFAULT; 785 if ((c >= '0') && (c <= '9')) { 786 *num *= 10; 787 *num += c - '0'; 788 } else 789 break; 790 } 791 return i; 792 } 793 794 static int strn_len(const char __user * user_buffer, unsigned int maxlen) 795 { 796 int i; 797 798 for (i = 0; i < maxlen; i++) { 799 char c; 800 if (get_user(c, &user_buffer[i])) 801 return -EFAULT; 802 switch (c) { 803 case '\"': 804 case '\n': 805 case '\r': 806 case '\t': 807 case ' ': 808 goto done_str; 809 break; 810 default: 811 break; 812 } 813 } 814 done_str: 815 return i; 816 } 817 818 static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) 819 { 820 unsigned int n = 0; 821 char c; 822 ssize_t i = 0; 823 int len; 824 825 pkt_dev->nr_labels = 0; 826 do { 827 __u32 tmp; 828 len = hex32_arg(&buffer[i], 8, &tmp); 829 if (len <= 0) 830 return len; 831 pkt_dev->labels[n] = htonl(tmp); 832 if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM) 833 pkt_dev->flags |= F_MPLS_RND; 834 i += len; 835 if (get_user(c, &buffer[i])) 836 return -EFAULT; 837 i++; 838 n++; 839 if (n >= MAX_MPLS_LABELS) 840 return -E2BIG; 841 } while (c == ','); 842 843 pkt_dev->nr_labels = n; 844 return i; 845 } 846 847 static ssize_t pktgen_if_write(struct file *file, 848 const char __user * user_buffer, size_t count, 849 loff_t * offset) 850 { 851 struct seq_file *seq = file->private_data; 852 struct pktgen_dev *pkt_dev = seq->private; 853 int i, max, len; 854 char name[16], valstr[32]; 855 unsigned long value = 0; 856 char *pg_result = NULL; 857 int tmp = 0; 858 char buf[128]; 859 860 pg_result = &(pkt_dev->result[0]); 861 862 if (count < 1) { 863 pr_warning("wrong command format\n"); 864 return -EINVAL; 865 } 866 867 max = count; 868 tmp = count_trail_chars(user_buffer, max); 869 if (tmp < 0) { 870 pr_warning("illegal format\n"); 871 return tmp; 872 } 873 i = tmp; 874 875 /* Read variable name */ 876 877 len = strn_len(&user_buffer[i], sizeof(name) - 1); 878 if (len < 0) 879 return len; 880 881 memset(name, 0, sizeof(name)); 882 if (copy_from_user(name, &user_buffer[i], len)) 883 return -EFAULT; 884 i += len; 885 886 max = count - i; 887 len = count_trail_chars(&user_buffer[i], max); 888 if (len < 0) 889 return len; 890 891 i += len; 892 893 if (debug) { 894 size_t copy = min_t(size_t, count, 1023); 895 char tb[copy + 1]; 896 if (copy_from_user(tb, user_buffer, copy)) 897 return -EFAULT; 898 tb[copy] = 0; 899 pr_debug("%s,%lu buffer -:%s:-\n", 900 name, (unsigned long)count, tb); 901 } 902 903 if (!strcmp(name, "min_pkt_size")) { 904 len = num_arg(&user_buffer[i], 10, &value); 905 if (len < 0) 906 return len; 907 908 i += len; 909 if (value < 14 + 20 + 8) 910 value = 14 + 20 + 8; 911 if (value != pkt_dev->min_pkt_size) { 912 pkt_dev->min_pkt_size = value; 913 pkt_dev->cur_pkt_size = value; 914 } 915 sprintf(pg_result, "OK: min_pkt_size=%u", 916 pkt_dev->min_pkt_size); 917 return count; 918 } 919 920 if (!strcmp(name, "max_pkt_size")) { 921 len = num_arg(&user_buffer[i], 10, &value); 922 if (len < 0) 923 return len; 924 925 i += len; 926 if (value < 14 + 20 + 8) 927 value = 14 + 20 + 8; 928 if (value != pkt_dev->max_pkt_size) { 929 pkt_dev->max_pkt_size = value; 930 pkt_dev->cur_pkt_size = value; 931 } 932 sprintf(pg_result, "OK: max_pkt_size=%u", 933 pkt_dev->max_pkt_size); 934 return count; 935 } 936 937 /* Shortcut for min = max */ 938 939 if (!strcmp(name, "pkt_size")) { 940 len = num_arg(&user_buffer[i], 10, &value); 941 if (len < 0) 942 return len; 943 944 i += len; 945 if (value < 14 + 20 + 8) 946 value = 14 + 20 + 8; 947 if (value != pkt_dev->min_pkt_size) { 948 pkt_dev->min_pkt_size = value; 949 pkt_dev->max_pkt_size = value; 950 pkt_dev->cur_pkt_size = value; 951 } 952 sprintf(pg_result, "OK: pkt_size=%u", pkt_dev->min_pkt_size); 953 return count; 954 } 955 956 if (!strcmp(name, "debug")) { 957 len = num_arg(&user_buffer[i], 10, &value); 958 if (len < 0) 959 return len; 960 961 i += len; 962 debug = value; 963 sprintf(pg_result, "OK: debug=%u", debug); 964 return count; 965 } 966 967 if (!strcmp(name, "frags")) { 968 len = num_arg(&user_buffer[i], 10, &value); 969 if (len < 0) 970 return len; 971 972 i += len; 973 pkt_dev->nfrags = value; 974 sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags); 975 return count; 976 } 977 if (!strcmp(name, "delay")) { 978 len = num_arg(&user_buffer[i], 10, &value); 979 if (len < 0) 980 return len; 981 982 i += len; 983 if (value == 0x7FFFFFFF) 984 pkt_dev->delay = ULLONG_MAX; 985 else 986 pkt_dev->delay = (u64)value; 987 988 sprintf(pg_result, "OK: delay=%llu", 989 (unsigned long long) pkt_dev->delay); 990 return count; 991 } 992 if (!strcmp(name, "rate")) { 993 len = num_arg(&user_buffer[i], 10, &value); 994 if (len < 0) 995 return len; 996 997 i += len; 998 if (!value) 999 return len; 1000 pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value; 1001 if (debug) 1002 pr_info("Delay set at: %llu ns\n", pkt_dev->delay); 1003 1004 sprintf(pg_result, "OK: rate=%lu", value); 1005 return count; 1006 } 1007 if (!strcmp(name, "ratep")) { 1008 len = num_arg(&user_buffer[i], 10, &value); 1009 if (len < 0) 1010 return len; 1011 1012 i += len; 1013 if (!value) 1014 return len; 1015 pkt_dev->delay = NSEC_PER_SEC/value; 1016 if (debug) 1017 pr_info("Delay set at: %llu ns\n", pkt_dev->delay); 1018 1019 sprintf(pg_result, "OK: rate=%lu", value); 1020 return count; 1021 } 1022 if (!strcmp(name, "udp_src_min")) { 1023 len = num_arg(&user_buffer[i], 10, &value); 1024 if (len < 0) 1025 return len; 1026 1027 i += len; 1028 if (value != pkt_dev->udp_src_min) { 1029 pkt_dev->udp_src_min = value; 1030 pkt_dev->cur_udp_src = value; 1031 } 1032 sprintf(pg_result, "OK: udp_src_min=%u", pkt_dev->udp_src_min); 1033 return count; 1034 } 1035 if (!strcmp(name, "udp_dst_min")) { 1036 len = num_arg(&user_buffer[i], 10, &value); 1037 if (len < 0) 1038 return len; 1039 1040 i += len; 1041 if (value != pkt_dev->udp_dst_min) { 1042 pkt_dev->udp_dst_min = value; 1043 pkt_dev->cur_udp_dst = value; 1044 } 1045 sprintf(pg_result, "OK: udp_dst_min=%u", pkt_dev->udp_dst_min); 1046 return count; 1047 } 1048 if (!strcmp(name, "udp_src_max")) { 1049 len = num_arg(&user_buffer[i], 10, &value); 1050 if (len < 0) 1051 return len; 1052 1053 i += len; 1054 if (value != pkt_dev->udp_src_max) { 1055 pkt_dev->udp_src_max = value; 1056 pkt_dev->cur_udp_src = value; 1057 } 1058 sprintf(pg_result, "OK: udp_src_max=%u", pkt_dev->udp_src_max); 1059 return count; 1060 } 1061 if (!strcmp(name, "udp_dst_max")) { 1062 len = num_arg(&user_buffer[i], 10, &value); 1063 if (len < 0) 1064 return len; 1065 1066 i += len; 1067 if (value != pkt_dev->udp_dst_max) { 1068 pkt_dev->udp_dst_max = value; 1069 pkt_dev->cur_udp_dst = value; 1070 } 1071 sprintf(pg_result, "OK: udp_dst_max=%u", pkt_dev->udp_dst_max); 1072 return count; 1073 } 1074 if (!strcmp(name, "clone_skb")) { 1075 len = num_arg(&user_buffer[i], 10, &value); 1076 if (len < 0) 1077 return len; 1078 if ((value > 0) && 1079 (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))) 1080 return -ENOTSUPP; 1081 i += len; 1082 pkt_dev->clone_skb = value; 1083 1084 sprintf(pg_result, "OK: clone_skb=%d", pkt_dev->clone_skb); 1085 return count; 1086 } 1087 if (!strcmp(name, "count")) { 1088 len = num_arg(&user_buffer[i], 10, &value); 1089 if (len < 0) 1090 return len; 1091 1092 i += len; 1093 pkt_dev->count = value; 1094 sprintf(pg_result, "OK: count=%llu", 1095 (unsigned long long)pkt_dev->count); 1096 return count; 1097 } 1098 if (!strcmp(name, "src_mac_count")) { 1099 len = num_arg(&user_buffer[i], 10, &value); 1100 if (len < 0) 1101 return len; 1102 1103 i += len; 1104 if (pkt_dev->src_mac_count != value) { 1105 pkt_dev->src_mac_count = value; 1106 pkt_dev->cur_src_mac_offset = 0; 1107 } 1108 sprintf(pg_result, "OK: src_mac_count=%d", 1109 pkt_dev->src_mac_count); 1110 return count; 1111 } 1112 if (!strcmp(name, "dst_mac_count")) { 1113 len = num_arg(&user_buffer[i], 10, &value); 1114 if (len < 0) 1115 return len; 1116 1117 i += len; 1118 if (pkt_dev->dst_mac_count != value) { 1119 pkt_dev->dst_mac_count = value; 1120 pkt_dev->cur_dst_mac_offset = 0; 1121 } 1122 sprintf(pg_result, "OK: dst_mac_count=%d", 1123 pkt_dev->dst_mac_count); 1124 return count; 1125 } 1126 if (!strcmp(name, "node")) { 1127 len = num_arg(&user_buffer[i], 10, &value); 1128 if (len < 0) 1129 return len; 1130 1131 i += len; 1132 1133 if (node_possible(value)) { 1134 pkt_dev->node = value; 1135 sprintf(pg_result, "OK: node=%d", pkt_dev->node); 1136 if (pkt_dev->page) { 1137 put_page(pkt_dev->page); 1138 pkt_dev->page = NULL; 1139 } 1140 } 1141 else 1142 sprintf(pg_result, "ERROR: node not possible"); 1143 return count; 1144 } 1145 if (!strcmp(name, "flag")) { 1146 char f[32]; 1147 memset(f, 0, 32); 1148 len = strn_len(&user_buffer[i], sizeof(f) - 1); 1149 if (len < 0) 1150 return len; 1151 1152 if (copy_from_user(f, &user_buffer[i], len)) 1153 return -EFAULT; 1154 i += len; 1155 if (strcmp(f, "IPSRC_RND") == 0) 1156 pkt_dev->flags |= F_IPSRC_RND; 1157 1158 else if (strcmp(f, "!IPSRC_RND") == 0) 1159 pkt_dev->flags &= ~F_IPSRC_RND; 1160 1161 else if (strcmp(f, "TXSIZE_RND") == 0) 1162 pkt_dev->flags |= F_TXSIZE_RND; 1163 1164 else if (strcmp(f, "!TXSIZE_RND") == 0) 1165 pkt_dev->flags &= ~F_TXSIZE_RND; 1166 1167 else if (strcmp(f, "IPDST_RND") == 0) 1168 pkt_dev->flags |= F_IPDST_RND; 1169 1170 else if (strcmp(f, "!IPDST_RND") == 0) 1171 pkt_dev->flags &= ~F_IPDST_RND; 1172 1173 else if (strcmp(f, "UDPSRC_RND") == 0) 1174 pkt_dev->flags |= F_UDPSRC_RND; 1175 1176 else if (strcmp(f, "!UDPSRC_RND") == 0) 1177 pkt_dev->flags &= ~F_UDPSRC_RND; 1178 1179 else if (strcmp(f, "UDPDST_RND") == 0) 1180 pkt_dev->flags |= F_UDPDST_RND; 1181 1182 else if (strcmp(f, "!UDPDST_RND") == 0) 1183 pkt_dev->flags &= ~F_UDPDST_RND; 1184 1185 else if (strcmp(f, "MACSRC_RND") == 0) 1186 pkt_dev->flags |= F_MACSRC_RND; 1187 1188 else if (strcmp(f, "!MACSRC_RND") == 0) 1189 pkt_dev->flags &= ~F_MACSRC_RND; 1190 1191 else if (strcmp(f, "MACDST_RND") == 0) 1192 pkt_dev->flags |= F_MACDST_RND; 1193 1194 else if (strcmp(f, "!MACDST_RND") == 0) 1195 pkt_dev->flags &= ~F_MACDST_RND; 1196 1197 else if (strcmp(f, "MPLS_RND") == 0) 1198 pkt_dev->flags |= F_MPLS_RND; 1199 1200 else if (strcmp(f, "!MPLS_RND") == 0) 1201 pkt_dev->flags &= ~F_MPLS_RND; 1202 1203 else if (strcmp(f, "VID_RND") == 0) 1204 pkt_dev->flags |= F_VID_RND; 1205 1206 else if (strcmp(f, "!VID_RND") == 0) 1207 pkt_dev->flags &= ~F_VID_RND; 1208 1209 else if (strcmp(f, "SVID_RND") == 0) 1210 pkt_dev->flags |= F_SVID_RND; 1211 1212 else if (strcmp(f, "!SVID_RND") == 0) 1213 pkt_dev->flags &= ~F_SVID_RND; 1214 1215 else if (strcmp(f, "FLOW_SEQ") == 0) 1216 pkt_dev->flags |= F_FLOW_SEQ; 1217 1218 else if (strcmp(f, "QUEUE_MAP_RND") == 0) 1219 pkt_dev->flags |= F_QUEUE_MAP_RND; 1220 1221 else if (strcmp(f, "!QUEUE_MAP_RND") == 0) 1222 pkt_dev->flags &= ~F_QUEUE_MAP_RND; 1223 1224 else if (strcmp(f, "QUEUE_MAP_CPU") == 0) 1225 pkt_dev->flags |= F_QUEUE_MAP_CPU; 1226 1227 else if (strcmp(f, "!QUEUE_MAP_CPU") == 0) 1228 pkt_dev->flags &= ~F_QUEUE_MAP_CPU; 1229 #ifdef CONFIG_XFRM 1230 else if (strcmp(f, "IPSEC") == 0) 1231 pkt_dev->flags |= F_IPSEC_ON; 1232 #endif 1233 1234 else if (strcmp(f, "!IPV6") == 0) 1235 pkt_dev->flags &= ~F_IPV6; 1236 1237 else if (strcmp(f, "NODE_ALLOC") == 0) 1238 pkt_dev->flags |= F_NODE; 1239 1240 else if (strcmp(f, "!NODE_ALLOC") == 0) 1241 pkt_dev->flags &= ~F_NODE; 1242 1243 else if (strcmp(f, "UDPCSUM") == 0) 1244 pkt_dev->flags |= F_UDPCSUM; 1245 1246 else if (strcmp(f, "!UDPCSUM") == 0) 1247 pkt_dev->flags &= ~F_UDPCSUM; 1248 1249 else { 1250 sprintf(pg_result, 1251 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", 1252 f, 1253 "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " 1254 "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC, NODE_ALLOC\n"); 1255 return count; 1256 } 1257 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); 1258 return count; 1259 } 1260 if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) { 1261 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1); 1262 if (len < 0) 1263 return len; 1264 1265 if (copy_from_user(buf, &user_buffer[i], len)) 1266 return -EFAULT; 1267 buf[len] = 0; 1268 if (strcmp(buf, pkt_dev->dst_min) != 0) { 1269 memset(pkt_dev->dst_min, 0, sizeof(pkt_dev->dst_min)); 1270 strncpy(pkt_dev->dst_min, buf, len); 1271 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); 1272 pkt_dev->cur_daddr = pkt_dev->daddr_min; 1273 } 1274 if (debug) 1275 pr_debug("dst_min set to: %s\n", pkt_dev->dst_min); 1276 i += len; 1277 sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min); 1278 return count; 1279 } 1280 if (!strcmp(name, "dst_max")) { 1281 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1); 1282 if (len < 0) 1283 return len; 1284 1285 1286 if (copy_from_user(buf, &user_buffer[i], len)) 1287 return -EFAULT; 1288 1289 buf[len] = 0; 1290 if (strcmp(buf, pkt_dev->dst_max) != 0) { 1291 memset(pkt_dev->dst_max, 0, sizeof(pkt_dev->dst_max)); 1292 strncpy(pkt_dev->dst_max, buf, len); 1293 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); 1294 pkt_dev->cur_daddr = pkt_dev->daddr_max; 1295 } 1296 if (debug) 1297 pr_debug("dst_max set to: %s\n", pkt_dev->dst_max); 1298 i += len; 1299 sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max); 1300 return count; 1301 } 1302 if (!strcmp(name, "dst6")) { 1303 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1304 if (len < 0) 1305 return len; 1306 1307 pkt_dev->flags |= F_IPV6; 1308 1309 if (copy_from_user(buf, &user_buffer[i], len)) 1310 return -EFAULT; 1311 buf[len] = 0; 1312 1313 in6_pton(buf, -1, pkt_dev->in6_daddr.s6_addr, -1, NULL); 1314 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr); 1315 1316 pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr; 1317 1318 if (debug) 1319 pr_debug("dst6 set to: %s\n", buf); 1320 1321 i += len; 1322 sprintf(pg_result, "OK: dst6=%s", buf); 1323 return count; 1324 } 1325 if (!strcmp(name, "dst6_min")) { 1326 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1327 if (len < 0) 1328 return len; 1329 1330 pkt_dev->flags |= F_IPV6; 1331 1332 if (copy_from_user(buf, &user_buffer[i], len)) 1333 return -EFAULT; 1334 buf[len] = 0; 1335 1336 in6_pton(buf, -1, pkt_dev->min_in6_daddr.s6_addr, -1, NULL); 1337 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr); 1338 1339 pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr; 1340 if (debug) 1341 pr_debug("dst6_min set to: %s\n", buf); 1342 1343 i += len; 1344 sprintf(pg_result, "OK: dst6_min=%s", buf); 1345 return count; 1346 } 1347 if (!strcmp(name, "dst6_max")) { 1348 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1349 if (len < 0) 1350 return len; 1351 1352 pkt_dev->flags |= F_IPV6; 1353 1354 if (copy_from_user(buf, &user_buffer[i], len)) 1355 return -EFAULT; 1356 buf[len] = 0; 1357 1358 in6_pton(buf, -1, pkt_dev->max_in6_daddr.s6_addr, -1, NULL); 1359 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr); 1360 1361 if (debug) 1362 pr_debug("dst6_max set to: %s\n", buf); 1363 1364 i += len; 1365 sprintf(pg_result, "OK: dst6_max=%s", buf); 1366 return count; 1367 } 1368 if (!strcmp(name, "src6")) { 1369 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1370 if (len < 0) 1371 return len; 1372 1373 pkt_dev->flags |= F_IPV6; 1374 1375 if (copy_from_user(buf, &user_buffer[i], len)) 1376 return -EFAULT; 1377 buf[len] = 0; 1378 1379 in6_pton(buf, -1, pkt_dev->in6_saddr.s6_addr, -1, NULL); 1380 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr); 1381 1382 pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr; 1383 1384 if (debug) 1385 pr_debug("src6 set to: %s\n", buf); 1386 1387 i += len; 1388 sprintf(pg_result, "OK: src6=%s", buf); 1389 return count; 1390 } 1391 if (!strcmp(name, "src_min")) { 1392 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1); 1393 if (len < 0) 1394 return len; 1395 1396 if (copy_from_user(buf, &user_buffer[i], len)) 1397 return -EFAULT; 1398 buf[len] = 0; 1399 if (strcmp(buf, pkt_dev->src_min) != 0) { 1400 memset(pkt_dev->src_min, 0, sizeof(pkt_dev->src_min)); 1401 strncpy(pkt_dev->src_min, buf, len); 1402 pkt_dev->saddr_min = in_aton(pkt_dev->src_min); 1403 pkt_dev->cur_saddr = pkt_dev->saddr_min; 1404 } 1405 if (debug) 1406 pr_debug("src_min set to: %s\n", pkt_dev->src_min); 1407 i += len; 1408 sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min); 1409 return count; 1410 } 1411 if (!strcmp(name, "src_max")) { 1412 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1); 1413 if (len < 0) 1414 return len; 1415 1416 if (copy_from_user(buf, &user_buffer[i], len)) 1417 return -EFAULT; 1418 buf[len] = 0; 1419 if (strcmp(buf, pkt_dev->src_max) != 0) { 1420 memset(pkt_dev->src_max, 0, sizeof(pkt_dev->src_max)); 1421 strncpy(pkt_dev->src_max, buf, len); 1422 pkt_dev->saddr_max = in_aton(pkt_dev->src_max); 1423 pkt_dev->cur_saddr = pkt_dev->saddr_max; 1424 } 1425 if (debug) 1426 pr_debug("src_max set to: %s\n", pkt_dev->src_max); 1427 i += len; 1428 sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max); 1429 return count; 1430 } 1431 if (!strcmp(name, "dst_mac")) { 1432 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1433 if (len < 0) 1434 return len; 1435 1436 memset(valstr, 0, sizeof(valstr)); 1437 if (copy_from_user(valstr, &user_buffer[i], len)) 1438 return -EFAULT; 1439 1440 if (!mac_pton(valstr, pkt_dev->dst_mac)) 1441 return -EINVAL; 1442 /* Set up Dest MAC */ 1443 ether_addr_copy(&pkt_dev->hh[0], pkt_dev->dst_mac); 1444 1445 sprintf(pg_result, "OK: dstmac %pM", pkt_dev->dst_mac); 1446 return count; 1447 } 1448 if (!strcmp(name, "src_mac")) { 1449 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1450 if (len < 0) 1451 return len; 1452 1453 memset(valstr, 0, sizeof(valstr)); 1454 if (copy_from_user(valstr, &user_buffer[i], len)) 1455 return -EFAULT; 1456 1457 if (!mac_pton(valstr, pkt_dev->src_mac)) 1458 return -EINVAL; 1459 /* Set up Src MAC */ 1460 ether_addr_copy(&pkt_dev->hh[6], pkt_dev->src_mac); 1461 1462 sprintf(pg_result, "OK: srcmac %pM", pkt_dev->src_mac); 1463 return count; 1464 } 1465 1466 if (!strcmp(name, "clear_counters")) { 1467 pktgen_clear_counters(pkt_dev); 1468 sprintf(pg_result, "OK: Clearing counters.\n"); 1469 return count; 1470 } 1471 1472 if (!strcmp(name, "flows")) { 1473 len = num_arg(&user_buffer[i], 10, &value); 1474 if (len < 0) 1475 return len; 1476 1477 i += len; 1478 if (value > MAX_CFLOWS) 1479 value = MAX_CFLOWS; 1480 1481 pkt_dev->cflows = value; 1482 sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows); 1483 return count; 1484 } 1485 #ifdef CONFIG_XFRM 1486 if (!strcmp(name, "spi")) { 1487 len = num_arg(&user_buffer[i], 10, &value); 1488 if (len < 0) 1489 return len; 1490 1491 i += len; 1492 pkt_dev->spi = value; 1493 sprintf(pg_result, "OK: spi=%u", pkt_dev->spi); 1494 return count; 1495 } 1496 #endif 1497 if (!strcmp(name, "flowlen")) { 1498 len = num_arg(&user_buffer[i], 10, &value); 1499 if (len < 0) 1500 return len; 1501 1502 i += len; 1503 pkt_dev->lflow = value; 1504 sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow); 1505 return count; 1506 } 1507 1508 if (!strcmp(name, "queue_map_min")) { 1509 len = num_arg(&user_buffer[i], 5, &value); 1510 if (len < 0) 1511 return len; 1512 1513 i += len; 1514 pkt_dev->queue_map_min = value; 1515 sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min); 1516 return count; 1517 } 1518 1519 if (!strcmp(name, "queue_map_max")) { 1520 len = num_arg(&user_buffer[i], 5, &value); 1521 if (len < 0) 1522 return len; 1523 1524 i += len; 1525 pkt_dev->queue_map_max = value; 1526 sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max); 1527 return count; 1528 } 1529 1530 if (!strcmp(name, "mpls")) { 1531 unsigned int n, cnt; 1532 1533 len = get_labels(&user_buffer[i], pkt_dev); 1534 if (len < 0) 1535 return len; 1536 i += len; 1537 cnt = sprintf(pg_result, "OK: mpls="); 1538 for (n = 0; n < pkt_dev->nr_labels; n++) 1539 cnt += sprintf(pg_result + cnt, 1540 "%08x%s", ntohl(pkt_dev->labels[n]), 1541 n == pkt_dev->nr_labels-1 ? "" : ","); 1542 1543 if (pkt_dev->nr_labels && pkt_dev->vlan_id != 0xffff) { 1544 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ 1545 pkt_dev->svlan_id = 0xffff; 1546 1547 if (debug) 1548 pr_debug("VLAN/SVLAN auto turned off\n"); 1549 } 1550 return count; 1551 } 1552 1553 if (!strcmp(name, "vlan_id")) { 1554 len = num_arg(&user_buffer[i], 4, &value); 1555 if (len < 0) 1556 return len; 1557 1558 i += len; 1559 if (value <= 4095) { 1560 pkt_dev->vlan_id = value; /* turn on VLAN */ 1561 1562 if (debug) 1563 pr_debug("VLAN turned on\n"); 1564 1565 if (debug && pkt_dev->nr_labels) 1566 pr_debug("MPLS auto turned off\n"); 1567 1568 pkt_dev->nr_labels = 0; /* turn off MPLS */ 1569 sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id); 1570 } else { 1571 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ 1572 pkt_dev->svlan_id = 0xffff; 1573 1574 if (debug) 1575 pr_debug("VLAN/SVLAN turned off\n"); 1576 } 1577 return count; 1578 } 1579 1580 if (!strcmp(name, "vlan_p")) { 1581 len = num_arg(&user_buffer[i], 1, &value); 1582 if (len < 0) 1583 return len; 1584 1585 i += len; 1586 if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) { 1587 pkt_dev->vlan_p = value; 1588 sprintf(pg_result, "OK: vlan_p=%u", pkt_dev->vlan_p); 1589 } else { 1590 sprintf(pg_result, "ERROR: vlan_p must be 0-7"); 1591 } 1592 return count; 1593 } 1594 1595 if (!strcmp(name, "vlan_cfi")) { 1596 len = num_arg(&user_buffer[i], 1, &value); 1597 if (len < 0) 1598 return len; 1599 1600 i += len; 1601 if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) { 1602 pkt_dev->vlan_cfi = value; 1603 sprintf(pg_result, "OK: vlan_cfi=%u", pkt_dev->vlan_cfi); 1604 } else { 1605 sprintf(pg_result, "ERROR: vlan_cfi must be 0-1"); 1606 } 1607 return count; 1608 } 1609 1610 if (!strcmp(name, "svlan_id")) { 1611 len = num_arg(&user_buffer[i], 4, &value); 1612 if (len < 0) 1613 return len; 1614 1615 i += len; 1616 if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) { 1617 pkt_dev->svlan_id = value; /* turn on SVLAN */ 1618 1619 if (debug) 1620 pr_debug("SVLAN turned on\n"); 1621 1622 if (debug && pkt_dev->nr_labels) 1623 pr_debug("MPLS auto turned off\n"); 1624 1625 pkt_dev->nr_labels = 0; /* turn off MPLS */ 1626 sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id); 1627 } else { 1628 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ 1629 pkt_dev->svlan_id = 0xffff; 1630 1631 if (debug) 1632 pr_debug("VLAN/SVLAN turned off\n"); 1633 } 1634 return count; 1635 } 1636 1637 if (!strcmp(name, "svlan_p")) { 1638 len = num_arg(&user_buffer[i], 1, &value); 1639 if (len < 0) 1640 return len; 1641 1642 i += len; 1643 if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) { 1644 pkt_dev->svlan_p = value; 1645 sprintf(pg_result, "OK: svlan_p=%u", pkt_dev->svlan_p); 1646 } else { 1647 sprintf(pg_result, "ERROR: svlan_p must be 0-7"); 1648 } 1649 return count; 1650 } 1651 1652 if (!strcmp(name, "svlan_cfi")) { 1653 len = num_arg(&user_buffer[i], 1, &value); 1654 if (len < 0) 1655 return len; 1656 1657 i += len; 1658 if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) { 1659 pkt_dev->svlan_cfi = value; 1660 sprintf(pg_result, "OK: svlan_cfi=%u", pkt_dev->svlan_cfi); 1661 } else { 1662 sprintf(pg_result, "ERROR: svlan_cfi must be 0-1"); 1663 } 1664 return count; 1665 } 1666 1667 if (!strcmp(name, "tos")) { 1668 __u32 tmp_value = 0; 1669 len = hex32_arg(&user_buffer[i], 2, &tmp_value); 1670 if (len < 0) 1671 return len; 1672 1673 i += len; 1674 if (len == 2) { 1675 pkt_dev->tos = tmp_value; 1676 sprintf(pg_result, "OK: tos=0x%02x", pkt_dev->tos); 1677 } else { 1678 sprintf(pg_result, "ERROR: tos must be 00-ff"); 1679 } 1680 return count; 1681 } 1682 1683 if (!strcmp(name, "traffic_class")) { 1684 __u32 tmp_value = 0; 1685 len = hex32_arg(&user_buffer[i], 2, &tmp_value); 1686 if (len < 0) 1687 return len; 1688 1689 i += len; 1690 if (len == 2) { 1691 pkt_dev->traffic_class = tmp_value; 1692 sprintf(pg_result, "OK: traffic_class=0x%02x", pkt_dev->traffic_class); 1693 } else { 1694 sprintf(pg_result, "ERROR: traffic_class must be 00-ff"); 1695 } 1696 return count; 1697 } 1698 1699 if (!strcmp(name, "skb_priority")) { 1700 len = num_arg(&user_buffer[i], 9, &value); 1701 if (len < 0) 1702 return len; 1703 1704 i += len; 1705 pkt_dev->skb_priority = value; 1706 sprintf(pg_result, "OK: skb_priority=%i", 1707 pkt_dev->skb_priority); 1708 return count; 1709 } 1710 1711 sprintf(pkt_dev->result, "No such parameter \"%s\"", name); 1712 return -EINVAL; 1713 } 1714 1715 static int pktgen_if_open(struct inode *inode, struct file *file) 1716 { 1717 return single_open(file, pktgen_if_show, PDE_DATA(inode)); 1718 } 1719 1720 static const struct file_operations pktgen_if_fops = { 1721 .owner = THIS_MODULE, 1722 .open = pktgen_if_open, 1723 .read = seq_read, 1724 .llseek = seq_lseek, 1725 .write = pktgen_if_write, 1726 .release = single_release, 1727 }; 1728 1729 static int pktgen_thread_show(struct seq_file *seq, void *v) 1730 { 1731 struct pktgen_thread *t = seq->private; 1732 const struct pktgen_dev *pkt_dev; 1733 1734 BUG_ON(!t); 1735 1736 seq_printf(seq, "Running: "); 1737 1738 if_lock(t); 1739 list_for_each_entry(pkt_dev, &t->if_list, list) 1740 if (pkt_dev->running) 1741 seq_printf(seq, "%s ", pkt_dev->odevname); 1742 1743 seq_printf(seq, "\nStopped: "); 1744 1745 list_for_each_entry(pkt_dev, &t->if_list, list) 1746 if (!pkt_dev->running) 1747 seq_printf(seq, "%s ", pkt_dev->odevname); 1748 1749 if (t->result[0]) 1750 seq_printf(seq, "\nResult: %s\n", t->result); 1751 else 1752 seq_printf(seq, "\nResult: NA\n"); 1753 1754 if_unlock(t); 1755 1756 return 0; 1757 } 1758 1759 static ssize_t pktgen_thread_write(struct file *file, 1760 const char __user * user_buffer, 1761 size_t count, loff_t * offset) 1762 { 1763 struct seq_file *seq = file->private_data; 1764 struct pktgen_thread *t = seq->private; 1765 int i, max, len, ret; 1766 char name[40]; 1767 char *pg_result; 1768 1769 if (count < 1) { 1770 // sprintf(pg_result, "Wrong command format"); 1771 return -EINVAL; 1772 } 1773 1774 max = count; 1775 len = count_trail_chars(user_buffer, max); 1776 if (len < 0) 1777 return len; 1778 1779 i = len; 1780 1781 /* Read variable name */ 1782 1783 len = strn_len(&user_buffer[i], sizeof(name) - 1); 1784 if (len < 0) 1785 return len; 1786 1787 memset(name, 0, sizeof(name)); 1788 if (copy_from_user(name, &user_buffer[i], len)) 1789 return -EFAULT; 1790 i += len; 1791 1792 max = count - i; 1793 len = count_trail_chars(&user_buffer[i], max); 1794 if (len < 0) 1795 return len; 1796 1797 i += len; 1798 1799 if (debug) 1800 pr_debug("t=%s, count=%lu\n", name, (unsigned long)count); 1801 1802 if (!t) { 1803 pr_err("ERROR: No thread\n"); 1804 ret = -EINVAL; 1805 goto out; 1806 } 1807 1808 pg_result = &(t->result[0]); 1809 1810 if (!strcmp(name, "add_device")) { 1811 char f[32]; 1812 memset(f, 0, 32); 1813 len = strn_len(&user_buffer[i], sizeof(f) - 1); 1814 if (len < 0) { 1815 ret = len; 1816 goto out; 1817 } 1818 if (copy_from_user(f, &user_buffer[i], len)) 1819 return -EFAULT; 1820 i += len; 1821 mutex_lock(&pktgen_thread_lock); 1822 ret = pktgen_add_device(t, f); 1823 mutex_unlock(&pktgen_thread_lock); 1824 if (!ret) { 1825 ret = count; 1826 sprintf(pg_result, "OK: add_device=%s", f); 1827 } else 1828 sprintf(pg_result, "ERROR: can not add device %s", f); 1829 goto out; 1830 } 1831 1832 if (!strcmp(name, "rem_device_all")) { 1833 mutex_lock(&pktgen_thread_lock); 1834 t->control |= T_REMDEVALL; 1835 mutex_unlock(&pktgen_thread_lock); 1836 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ 1837 ret = count; 1838 sprintf(pg_result, "OK: rem_device_all"); 1839 goto out; 1840 } 1841 1842 if (!strcmp(name, "max_before_softirq")) { 1843 sprintf(pg_result, "OK: Note! max_before_softirq is obsoleted -- Do not use"); 1844 ret = count; 1845 goto out; 1846 } 1847 1848 ret = -EINVAL; 1849 out: 1850 return ret; 1851 } 1852 1853 static int pktgen_thread_open(struct inode *inode, struct file *file) 1854 { 1855 return single_open(file, pktgen_thread_show, PDE_DATA(inode)); 1856 } 1857 1858 static const struct file_operations pktgen_thread_fops = { 1859 .owner = THIS_MODULE, 1860 .open = pktgen_thread_open, 1861 .read = seq_read, 1862 .llseek = seq_lseek, 1863 .write = pktgen_thread_write, 1864 .release = single_release, 1865 }; 1866 1867 /* Think find or remove for NN */ 1868 static struct pktgen_dev *__pktgen_NN_threads(const struct pktgen_net *pn, 1869 const char *ifname, int remove) 1870 { 1871 struct pktgen_thread *t; 1872 struct pktgen_dev *pkt_dev = NULL; 1873 bool exact = (remove == FIND); 1874 1875 list_for_each_entry(t, &pn->pktgen_threads, th_list) { 1876 pkt_dev = pktgen_find_dev(t, ifname, exact); 1877 if (pkt_dev) { 1878 if (remove) { 1879 if_lock(t); 1880 pkt_dev->removal_mark = 1; 1881 t->control |= T_REMDEV; 1882 if_unlock(t); 1883 } 1884 break; 1885 } 1886 } 1887 return pkt_dev; 1888 } 1889 1890 /* 1891 * mark a device for removal 1892 */ 1893 static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname) 1894 { 1895 struct pktgen_dev *pkt_dev = NULL; 1896 const int max_tries = 10, msec_per_try = 125; 1897 int i = 0; 1898 1899 mutex_lock(&pktgen_thread_lock); 1900 pr_debug("%s: marking %s for removal\n", __func__, ifname); 1901 1902 while (1) { 1903 1904 pkt_dev = __pktgen_NN_threads(pn, ifname, REMOVE); 1905 if (pkt_dev == NULL) 1906 break; /* success */ 1907 1908 mutex_unlock(&pktgen_thread_lock); 1909 pr_debug("%s: waiting for %s to disappear....\n", 1910 __func__, ifname); 1911 schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try)); 1912 mutex_lock(&pktgen_thread_lock); 1913 1914 if (++i >= max_tries) { 1915 pr_err("%s: timed out after waiting %d msec for device %s to be removed\n", 1916 __func__, msec_per_try * i, ifname); 1917 break; 1918 } 1919 1920 } 1921 1922 mutex_unlock(&pktgen_thread_lock); 1923 } 1924 1925 static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *dev) 1926 { 1927 struct pktgen_thread *t; 1928 1929 list_for_each_entry(t, &pn->pktgen_threads, th_list) { 1930 struct pktgen_dev *pkt_dev; 1931 1932 list_for_each_entry(pkt_dev, &t->if_list, list) { 1933 if (pkt_dev->odev != dev) 1934 continue; 1935 1936 proc_remove(pkt_dev->entry); 1937 1938 pkt_dev->entry = proc_create_data(dev->name, 0600, 1939 pn->proc_dir, 1940 &pktgen_if_fops, 1941 pkt_dev); 1942 if (!pkt_dev->entry) 1943 pr_err("can't move proc entry for '%s'\n", 1944 dev->name); 1945 break; 1946 } 1947 } 1948 } 1949 1950 static int pktgen_device_event(struct notifier_block *unused, 1951 unsigned long event, void *ptr) 1952 { 1953 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1954 struct pktgen_net *pn = net_generic(dev_net(dev), pg_net_id); 1955 1956 if (pn->pktgen_exiting) 1957 return NOTIFY_DONE; 1958 1959 /* It is OK that we do not hold the group lock right now, 1960 * as we run under the RTNL lock. 1961 */ 1962 1963 switch (event) { 1964 case NETDEV_CHANGENAME: 1965 pktgen_change_name(pn, dev); 1966 break; 1967 1968 case NETDEV_UNREGISTER: 1969 pktgen_mark_device(pn, dev->name); 1970 break; 1971 } 1972 1973 return NOTIFY_DONE; 1974 } 1975 1976 static struct net_device *pktgen_dev_get_by_name(const struct pktgen_net *pn, 1977 struct pktgen_dev *pkt_dev, 1978 const char *ifname) 1979 { 1980 char b[IFNAMSIZ+5]; 1981 int i; 1982 1983 for (i = 0; ifname[i] != '@'; i++) { 1984 if (i == IFNAMSIZ) 1985 break; 1986 1987 b[i] = ifname[i]; 1988 } 1989 b[i] = 0; 1990 1991 return dev_get_by_name(pn->net, b); 1992 } 1993 1994 1995 /* Associate pktgen_dev with a device. */ 1996 1997 static int pktgen_setup_dev(const struct pktgen_net *pn, 1998 struct pktgen_dev *pkt_dev, const char *ifname) 1999 { 2000 struct net_device *odev; 2001 int err; 2002 2003 /* Clean old setups */ 2004 if (pkt_dev->odev) { 2005 dev_put(pkt_dev->odev); 2006 pkt_dev->odev = NULL; 2007 } 2008 2009 odev = pktgen_dev_get_by_name(pn, pkt_dev, ifname); 2010 if (!odev) { 2011 pr_err("no such netdevice: \"%s\"\n", ifname); 2012 return -ENODEV; 2013 } 2014 2015 if (odev->type != ARPHRD_ETHER) { 2016 pr_err("not an ethernet device: \"%s\"\n", ifname); 2017 err = -EINVAL; 2018 } else if (!netif_running(odev)) { 2019 pr_err("device is down: \"%s\"\n", ifname); 2020 err = -ENETDOWN; 2021 } else { 2022 pkt_dev->odev = odev; 2023 return 0; 2024 } 2025 2026 dev_put(odev); 2027 return err; 2028 } 2029 2030 /* Read pkt_dev from the interface and set up internal pktgen_dev 2031 * structure to have the right information to create/send packets 2032 */ 2033 static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) 2034 { 2035 int ntxq; 2036 2037 if (!pkt_dev->odev) { 2038 pr_err("ERROR: pkt_dev->odev == NULL in setup_inject\n"); 2039 sprintf(pkt_dev->result, 2040 "ERROR: pkt_dev->odev == NULL in setup_inject.\n"); 2041 return; 2042 } 2043 2044 /* make sure that we don't pick a non-existing transmit queue */ 2045 ntxq = pkt_dev->odev->real_num_tx_queues; 2046 2047 if (ntxq <= pkt_dev->queue_map_min) { 2048 pr_warning("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", 2049 pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, 2050 pkt_dev->odevname); 2051 pkt_dev->queue_map_min = (ntxq ?: 1) - 1; 2052 } 2053 if (pkt_dev->queue_map_max >= ntxq) { 2054 pr_warning("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", 2055 pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, 2056 pkt_dev->odevname); 2057 pkt_dev->queue_map_max = (ntxq ?: 1) - 1; 2058 } 2059 2060 /* Default to the interface's mac if not explicitly set. */ 2061 2062 if (is_zero_ether_addr(pkt_dev->src_mac)) 2063 ether_addr_copy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr); 2064 2065 /* Set up Dest MAC */ 2066 ether_addr_copy(&(pkt_dev->hh[0]), pkt_dev->dst_mac); 2067 2068 if (pkt_dev->flags & F_IPV6) { 2069 int i, set = 0, err = 1; 2070 struct inet6_dev *idev; 2071 2072 if (pkt_dev->min_pkt_size == 0) { 2073 pkt_dev->min_pkt_size = 14 + sizeof(struct ipv6hdr) 2074 + sizeof(struct udphdr) 2075 + sizeof(struct pktgen_hdr) 2076 + pkt_dev->pkt_overhead; 2077 } 2078 2079 for (i = 0; i < IN6_ADDR_HSIZE; i++) 2080 if (pkt_dev->cur_in6_saddr.s6_addr[i]) { 2081 set = 1; 2082 break; 2083 } 2084 2085 if (!set) { 2086 2087 /* 2088 * Use linklevel address if unconfigured. 2089 * 2090 * use ipv6_get_lladdr if/when it's get exported 2091 */ 2092 2093 rcu_read_lock(); 2094 idev = __in6_dev_get(pkt_dev->odev); 2095 if (idev) { 2096 struct inet6_ifaddr *ifp; 2097 2098 read_lock_bh(&idev->lock); 2099 list_for_each_entry(ifp, &idev->addr_list, if_list) { 2100 if ((ifp->scope & IFA_LINK) && 2101 !(ifp->flags & IFA_F_TENTATIVE)) { 2102 pkt_dev->cur_in6_saddr = ifp->addr; 2103 err = 0; 2104 break; 2105 } 2106 } 2107 read_unlock_bh(&idev->lock); 2108 } 2109 rcu_read_unlock(); 2110 if (err) 2111 pr_err("ERROR: IPv6 link address not available\n"); 2112 } 2113 } else { 2114 if (pkt_dev->min_pkt_size == 0) { 2115 pkt_dev->min_pkt_size = 14 + sizeof(struct iphdr) 2116 + sizeof(struct udphdr) 2117 + sizeof(struct pktgen_hdr) 2118 + pkt_dev->pkt_overhead; 2119 } 2120 2121 pkt_dev->saddr_min = 0; 2122 pkt_dev->saddr_max = 0; 2123 if (strlen(pkt_dev->src_min) == 0) { 2124 2125 struct in_device *in_dev; 2126 2127 rcu_read_lock(); 2128 in_dev = __in_dev_get_rcu(pkt_dev->odev); 2129 if (in_dev) { 2130 if (in_dev->ifa_list) { 2131 pkt_dev->saddr_min = 2132 in_dev->ifa_list->ifa_address; 2133 pkt_dev->saddr_max = pkt_dev->saddr_min; 2134 } 2135 } 2136 rcu_read_unlock(); 2137 } else { 2138 pkt_dev->saddr_min = in_aton(pkt_dev->src_min); 2139 pkt_dev->saddr_max = in_aton(pkt_dev->src_max); 2140 } 2141 2142 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); 2143 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); 2144 } 2145 /* Initialize current values. */ 2146 pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; 2147 if (pkt_dev->min_pkt_size > pkt_dev->max_pkt_size) 2148 pkt_dev->max_pkt_size = pkt_dev->min_pkt_size; 2149 2150 pkt_dev->cur_dst_mac_offset = 0; 2151 pkt_dev->cur_src_mac_offset = 0; 2152 pkt_dev->cur_saddr = pkt_dev->saddr_min; 2153 pkt_dev->cur_daddr = pkt_dev->daddr_min; 2154 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; 2155 pkt_dev->cur_udp_src = pkt_dev->udp_src_min; 2156 pkt_dev->nflows = 0; 2157 } 2158 2159 2160 static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) 2161 { 2162 ktime_t start_time, end_time; 2163 s64 remaining; 2164 struct hrtimer_sleeper t; 2165 2166 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 2167 hrtimer_set_expires(&t.timer, spin_until); 2168 2169 remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); 2170 if (remaining <= 0) { 2171 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2172 return; 2173 } 2174 2175 start_time = ktime_get(); 2176 if (remaining < 100000) { 2177 /* for small delays (<100us), just loop until limit is reached */ 2178 do { 2179 end_time = ktime_get(); 2180 } while (ktime_compare(end_time, spin_until) < 0); 2181 } else { 2182 /* see do_nanosleep */ 2183 hrtimer_init_sleeper(&t, current); 2184 do { 2185 set_current_state(TASK_INTERRUPTIBLE); 2186 hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); 2187 if (!hrtimer_active(&t.timer)) 2188 t.task = NULL; 2189 2190 if (likely(t.task)) 2191 schedule(); 2192 2193 hrtimer_cancel(&t.timer); 2194 } while (t.task && pkt_dev->running && !signal_pending(current)); 2195 __set_current_state(TASK_RUNNING); 2196 end_time = ktime_get(); 2197 } 2198 2199 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2200 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2201 } 2202 2203 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2204 { 2205 pkt_dev->pkt_overhead = 0; 2206 pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32); 2207 pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev); 2208 pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev); 2209 } 2210 2211 static inline int f_seen(const struct pktgen_dev *pkt_dev, int flow) 2212 { 2213 return !!(pkt_dev->flows[flow].flags & F_INIT); 2214 } 2215 2216 static inline int f_pick(struct pktgen_dev *pkt_dev) 2217 { 2218 int flow = pkt_dev->curfl; 2219 2220 if (pkt_dev->flags & F_FLOW_SEQ) { 2221 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { 2222 /* reset time */ 2223 pkt_dev->flows[flow].count = 0; 2224 pkt_dev->flows[flow].flags = 0; 2225 pkt_dev->curfl += 1; 2226 if (pkt_dev->curfl >= pkt_dev->cflows) 2227 pkt_dev->curfl = 0; /*reset */ 2228 } 2229 } else { 2230 flow = prandom_u32() % pkt_dev->cflows; 2231 pkt_dev->curfl = flow; 2232 2233 if (pkt_dev->flows[flow].count > pkt_dev->lflow) { 2234 pkt_dev->flows[flow].count = 0; 2235 pkt_dev->flows[flow].flags = 0; 2236 } 2237 } 2238 2239 return pkt_dev->curfl; 2240 } 2241 2242 2243 #ifdef CONFIG_XFRM 2244 /* If there was already an IPSEC SA, we keep it as is, else 2245 * we go look for it ... 2246 */ 2247 #define DUMMY_MARK 0 2248 static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) 2249 { 2250 struct xfrm_state *x = pkt_dev->flows[flow].x; 2251 struct pktgen_net *pn = net_generic(dev_net(pkt_dev->odev), pg_net_id); 2252 if (!x) { 2253 2254 if (pkt_dev->spi) { 2255 /* We need as quick as possible to find the right SA 2256 * Searching with minimum criteria to archieve this. 2257 */ 2258 x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET); 2259 } else { 2260 /* slow path: we dont already have xfrm_state */ 2261 x = xfrm_stateonly_find(pn->net, DUMMY_MARK, 2262 (xfrm_address_t *)&pkt_dev->cur_daddr, 2263 (xfrm_address_t *)&pkt_dev->cur_saddr, 2264 AF_INET, 2265 pkt_dev->ipsmode, 2266 pkt_dev->ipsproto, 0); 2267 } 2268 if (x) { 2269 pkt_dev->flows[flow].x = x; 2270 set_pkt_overhead(pkt_dev); 2271 pkt_dev->pkt_overhead += x->props.header_len; 2272 } 2273 2274 } 2275 } 2276 #endif 2277 static void set_cur_queue_map(struct pktgen_dev *pkt_dev) 2278 { 2279 2280 if (pkt_dev->flags & F_QUEUE_MAP_CPU) 2281 pkt_dev->cur_queue_map = smp_processor_id(); 2282 2283 else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) { 2284 __u16 t; 2285 if (pkt_dev->flags & F_QUEUE_MAP_RND) { 2286 t = prandom_u32() % 2287 (pkt_dev->queue_map_max - 2288 pkt_dev->queue_map_min + 1) 2289 + pkt_dev->queue_map_min; 2290 } else { 2291 t = pkt_dev->cur_queue_map + 1; 2292 if (t > pkt_dev->queue_map_max) 2293 t = pkt_dev->queue_map_min; 2294 } 2295 pkt_dev->cur_queue_map = t; 2296 } 2297 pkt_dev->cur_queue_map = pkt_dev->cur_queue_map % pkt_dev->odev->real_num_tx_queues; 2298 } 2299 2300 /* Increment/randomize headers according to flags and current values 2301 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst 2302 */ 2303 static void mod_cur_headers(struct pktgen_dev *pkt_dev) 2304 { 2305 __u32 imn; 2306 __u32 imx; 2307 int flow = 0; 2308 2309 if (pkt_dev->cflows) 2310 flow = f_pick(pkt_dev); 2311 2312 /* Deal with source MAC */ 2313 if (pkt_dev->src_mac_count > 1) { 2314 __u32 mc; 2315 __u32 tmp; 2316 2317 if (pkt_dev->flags & F_MACSRC_RND) 2318 mc = prandom_u32() % pkt_dev->src_mac_count; 2319 else { 2320 mc = pkt_dev->cur_src_mac_offset++; 2321 if (pkt_dev->cur_src_mac_offset >= 2322 pkt_dev->src_mac_count) 2323 pkt_dev->cur_src_mac_offset = 0; 2324 } 2325 2326 tmp = pkt_dev->src_mac[5] + (mc & 0xFF); 2327 pkt_dev->hh[11] = tmp; 2328 tmp = (pkt_dev->src_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); 2329 pkt_dev->hh[10] = tmp; 2330 tmp = (pkt_dev->src_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); 2331 pkt_dev->hh[9] = tmp; 2332 tmp = (pkt_dev->src_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); 2333 pkt_dev->hh[8] = tmp; 2334 tmp = (pkt_dev->src_mac[1] + (tmp >> 8)); 2335 pkt_dev->hh[7] = tmp; 2336 } 2337 2338 /* Deal with Destination MAC */ 2339 if (pkt_dev->dst_mac_count > 1) { 2340 __u32 mc; 2341 __u32 tmp; 2342 2343 if (pkt_dev->flags & F_MACDST_RND) 2344 mc = prandom_u32() % pkt_dev->dst_mac_count; 2345 2346 else { 2347 mc = pkt_dev->cur_dst_mac_offset++; 2348 if (pkt_dev->cur_dst_mac_offset >= 2349 pkt_dev->dst_mac_count) { 2350 pkt_dev->cur_dst_mac_offset = 0; 2351 } 2352 } 2353 2354 tmp = pkt_dev->dst_mac[5] + (mc & 0xFF); 2355 pkt_dev->hh[5] = tmp; 2356 tmp = (pkt_dev->dst_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); 2357 pkt_dev->hh[4] = tmp; 2358 tmp = (pkt_dev->dst_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); 2359 pkt_dev->hh[3] = tmp; 2360 tmp = (pkt_dev->dst_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); 2361 pkt_dev->hh[2] = tmp; 2362 tmp = (pkt_dev->dst_mac[1] + (tmp >> 8)); 2363 pkt_dev->hh[1] = tmp; 2364 } 2365 2366 if (pkt_dev->flags & F_MPLS_RND) { 2367 unsigned int i; 2368 for (i = 0; i < pkt_dev->nr_labels; i++) 2369 if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) 2370 pkt_dev->labels[i] = MPLS_STACK_BOTTOM | 2371 ((__force __be32)prandom_u32() & 2372 htonl(0x000fffff)); 2373 } 2374 2375 if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) { 2376 pkt_dev->vlan_id = prandom_u32() & (4096 - 1); 2377 } 2378 2379 if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) { 2380 pkt_dev->svlan_id = prandom_u32() & (4096 - 1); 2381 } 2382 2383 if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { 2384 if (pkt_dev->flags & F_UDPSRC_RND) 2385 pkt_dev->cur_udp_src = prandom_u32() % 2386 (pkt_dev->udp_src_max - pkt_dev->udp_src_min) 2387 + pkt_dev->udp_src_min; 2388 2389 else { 2390 pkt_dev->cur_udp_src++; 2391 if (pkt_dev->cur_udp_src >= pkt_dev->udp_src_max) 2392 pkt_dev->cur_udp_src = pkt_dev->udp_src_min; 2393 } 2394 } 2395 2396 if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) { 2397 if (pkt_dev->flags & F_UDPDST_RND) { 2398 pkt_dev->cur_udp_dst = prandom_u32() % 2399 (pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) 2400 + pkt_dev->udp_dst_min; 2401 } else { 2402 pkt_dev->cur_udp_dst++; 2403 if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max) 2404 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; 2405 } 2406 } 2407 2408 if (!(pkt_dev->flags & F_IPV6)) { 2409 2410 imn = ntohl(pkt_dev->saddr_min); 2411 imx = ntohl(pkt_dev->saddr_max); 2412 if (imn < imx) { 2413 __u32 t; 2414 if (pkt_dev->flags & F_IPSRC_RND) 2415 t = prandom_u32() % (imx - imn) + imn; 2416 else { 2417 t = ntohl(pkt_dev->cur_saddr); 2418 t++; 2419 if (t > imx) 2420 t = imn; 2421 2422 } 2423 pkt_dev->cur_saddr = htonl(t); 2424 } 2425 2426 if (pkt_dev->cflows && f_seen(pkt_dev, flow)) { 2427 pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; 2428 } else { 2429 imn = ntohl(pkt_dev->daddr_min); 2430 imx = ntohl(pkt_dev->daddr_max); 2431 if (imn < imx) { 2432 __u32 t; 2433 __be32 s; 2434 if (pkt_dev->flags & F_IPDST_RND) { 2435 2436 do { 2437 t = prandom_u32() % 2438 (imx - imn) + imn; 2439 s = htonl(t); 2440 } while (ipv4_is_loopback(s) || 2441 ipv4_is_multicast(s) || 2442 ipv4_is_lbcast(s) || 2443 ipv4_is_zeronet(s) || 2444 ipv4_is_local_multicast(s)); 2445 pkt_dev->cur_daddr = s; 2446 } else { 2447 t = ntohl(pkt_dev->cur_daddr); 2448 t++; 2449 if (t > imx) { 2450 t = imn; 2451 } 2452 pkt_dev->cur_daddr = htonl(t); 2453 } 2454 } 2455 if (pkt_dev->cflows) { 2456 pkt_dev->flows[flow].flags |= F_INIT; 2457 pkt_dev->flows[flow].cur_daddr = 2458 pkt_dev->cur_daddr; 2459 #ifdef CONFIG_XFRM 2460 if (pkt_dev->flags & F_IPSEC_ON) 2461 get_ipsec_sa(pkt_dev, flow); 2462 #endif 2463 pkt_dev->nflows++; 2464 } 2465 } 2466 } else { /* IPV6 * */ 2467 2468 if (!ipv6_addr_any(&pkt_dev->min_in6_daddr)) { 2469 int i; 2470 2471 /* Only random destinations yet */ 2472 2473 for (i = 0; i < 4; i++) { 2474 pkt_dev->cur_in6_daddr.s6_addr32[i] = 2475 (((__force __be32)prandom_u32() | 2476 pkt_dev->min_in6_daddr.s6_addr32[i]) & 2477 pkt_dev->max_in6_daddr.s6_addr32[i]); 2478 } 2479 } 2480 } 2481 2482 if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) { 2483 __u32 t; 2484 if (pkt_dev->flags & F_TXSIZE_RND) { 2485 t = prandom_u32() % 2486 (pkt_dev->max_pkt_size - pkt_dev->min_pkt_size) 2487 + pkt_dev->min_pkt_size; 2488 } else { 2489 t = pkt_dev->cur_pkt_size + 1; 2490 if (t > pkt_dev->max_pkt_size) 2491 t = pkt_dev->min_pkt_size; 2492 } 2493 pkt_dev->cur_pkt_size = t; 2494 } 2495 2496 set_cur_queue_map(pkt_dev); 2497 2498 pkt_dev->flows[flow].count++; 2499 } 2500 2501 2502 #ifdef CONFIG_XFRM 2503 static u32 pktgen_dst_metrics[RTAX_MAX + 1] = { 2504 2505 [RTAX_HOPLIMIT] = 0x5, /* Set a static hoplimit */ 2506 }; 2507 2508 static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev) 2509 { 2510 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; 2511 int err = 0; 2512 struct net *net = dev_net(pkt_dev->odev); 2513 2514 if (!x) 2515 return 0; 2516 /* XXX: we dont support tunnel mode for now until 2517 * we resolve the dst issue */ 2518 if ((x->props.mode != XFRM_MODE_TRANSPORT) && (pkt_dev->spi == 0)) 2519 return 0; 2520 2521 /* But when user specify an valid SPI, transformation 2522 * supports both transport/tunnel mode + ESP/AH type. 2523 */ 2524 if ((x->props.mode == XFRM_MODE_TUNNEL) && (pkt_dev->spi != 0)) 2525 skb->_skb_refdst = (unsigned long)&pkt_dev->dst | SKB_DST_NOREF; 2526 2527 rcu_read_lock_bh(); 2528 err = x->outer_mode->output(x, skb); 2529 rcu_read_unlock_bh(); 2530 if (err) { 2531 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR); 2532 goto error; 2533 } 2534 err = x->type->output(x, skb); 2535 if (err) { 2536 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR); 2537 goto error; 2538 } 2539 spin_lock_bh(&x->lock); 2540 x->curlft.bytes += skb->len; 2541 x->curlft.packets++; 2542 spin_unlock_bh(&x->lock); 2543 error: 2544 return err; 2545 } 2546 2547 static void free_SAs(struct pktgen_dev *pkt_dev) 2548 { 2549 if (pkt_dev->cflows) { 2550 /* let go of the SAs if we have them */ 2551 int i; 2552 for (i = 0; i < pkt_dev->cflows; i++) { 2553 struct xfrm_state *x = pkt_dev->flows[i].x; 2554 if (x) { 2555 xfrm_state_put(x); 2556 pkt_dev->flows[i].x = NULL; 2557 } 2558 } 2559 } 2560 } 2561 2562 static int process_ipsec(struct pktgen_dev *pkt_dev, 2563 struct sk_buff *skb, __be16 protocol) 2564 { 2565 if (pkt_dev->flags & F_IPSEC_ON) { 2566 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; 2567 int nhead = 0; 2568 if (x) { 2569 int ret; 2570 __u8 *eth; 2571 struct iphdr *iph; 2572 2573 nhead = x->props.header_len - skb_headroom(skb); 2574 if (nhead > 0) { 2575 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); 2576 if (ret < 0) { 2577 pr_err("Error expanding ipsec packet %d\n", 2578 ret); 2579 goto err; 2580 } 2581 } 2582 2583 /* ipsec is not expecting ll header */ 2584 skb_pull(skb, ETH_HLEN); 2585 ret = pktgen_output_ipsec(skb, pkt_dev); 2586 if (ret) { 2587 pr_err("Error creating ipsec packet %d\n", ret); 2588 goto err; 2589 } 2590 /* restore ll */ 2591 eth = (__u8 *) skb_push(skb, ETH_HLEN); 2592 memcpy(eth, pkt_dev->hh, 12); 2593 *(u16 *) ð[12] = protocol; 2594 2595 /* Update IPv4 header len as well as checksum value */ 2596 iph = ip_hdr(skb); 2597 iph->tot_len = htons(skb->len - ETH_HLEN); 2598 ip_send_check(iph); 2599 } 2600 } 2601 return 1; 2602 err: 2603 kfree_skb(skb); 2604 return 0; 2605 } 2606 #endif 2607 2608 static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) 2609 { 2610 unsigned int i; 2611 for (i = 0; i < pkt_dev->nr_labels; i++) 2612 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; 2613 2614 mpls--; 2615 *mpls |= MPLS_STACK_BOTTOM; 2616 } 2617 2618 static inline __be16 build_tci(unsigned int id, unsigned int cfi, 2619 unsigned int prio) 2620 { 2621 return htons(id | (cfi << 12) | (prio << 13)); 2622 } 2623 2624 static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, 2625 int datalen) 2626 { 2627 struct timeval timestamp; 2628 struct pktgen_hdr *pgh; 2629 2630 pgh = (struct pktgen_hdr *)skb_put(skb, sizeof(*pgh)); 2631 datalen -= sizeof(*pgh); 2632 2633 if (pkt_dev->nfrags <= 0) { 2634 memset(skb_put(skb, datalen), 0, datalen); 2635 } else { 2636 int frags = pkt_dev->nfrags; 2637 int i, len; 2638 int frag_len; 2639 2640 2641 if (frags > MAX_SKB_FRAGS) 2642 frags = MAX_SKB_FRAGS; 2643 len = datalen - frags * PAGE_SIZE; 2644 if (len > 0) { 2645 memset(skb_put(skb, len), 0, len); 2646 datalen = frags * PAGE_SIZE; 2647 } 2648 2649 i = 0; 2650 frag_len = (datalen/frags) < PAGE_SIZE ? 2651 (datalen/frags) : PAGE_SIZE; 2652 while (datalen > 0) { 2653 if (unlikely(!pkt_dev->page)) { 2654 int node = numa_node_id(); 2655 2656 if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE)) 2657 node = pkt_dev->node; 2658 pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); 2659 if (!pkt_dev->page) 2660 break; 2661 } 2662 get_page(pkt_dev->page); 2663 skb_frag_set_page(skb, i, pkt_dev->page); 2664 skb_shinfo(skb)->frags[i].page_offset = 0; 2665 /*last fragment, fill rest of data*/ 2666 if (i == (frags - 1)) 2667 skb_frag_size_set(&skb_shinfo(skb)->frags[i], 2668 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE)); 2669 else 2670 skb_frag_size_set(&skb_shinfo(skb)->frags[i], frag_len); 2671 datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]); 2672 skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 2673 skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 2674 i++; 2675 skb_shinfo(skb)->nr_frags = i; 2676 } 2677 } 2678 2679 /* Stamp the time, and sequence number, 2680 * convert them to network byte order 2681 */ 2682 pgh->pgh_magic = htonl(PKTGEN_MAGIC); 2683 pgh->seq_num = htonl(pkt_dev->seq_num); 2684 2685 do_gettimeofday(×tamp); 2686 pgh->tv_sec = htonl(timestamp.tv_sec); 2687 pgh->tv_usec = htonl(timestamp.tv_usec); 2688 } 2689 2690 static struct sk_buff *pktgen_alloc_skb(struct net_device *dev, 2691 struct pktgen_dev *pkt_dev, 2692 unsigned int extralen) 2693 { 2694 struct sk_buff *skb = NULL; 2695 unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen + 2696 pkt_dev->pkt_overhead; 2697 2698 if (pkt_dev->flags & F_NODE) { 2699 int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id(); 2700 2701 skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node); 2702 if (likely(skb)) { 2703 skb_reserve(skb, NET_SKB_PAD); 2704 skb->dev = dev; 2705 } 2706 } else { 2707 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT); 2708 } 2709 2710 return skb; 2711 } 2712 2713 static struct sk_buff *fill_packet_ipv4(struct net_device *odev, 2714 struct pktgen_dev *pkt_dev) 2715 { 2716 struct sk_buff *skb = NULL; 2717 __u8 *eth; 2718 struct udphdr *udph; 2719 int datalen, iplen; 2720 struct iphdr *iph; 2721 __be16 protocol = htons(ETH_P_IP); 2722 __be32 *mpls; 2723 __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */ 2724 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2725 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2726 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2727 u16 queue_map; 2728 2729 if (pkt_dev->nr_labels) 2730 protocol = htons(ETH_P_MPLS_UC); 2731 2732 if (pkt_dev->vlan_id != 0xffff) 2733 protocol = htons(ETH_P_8021Q); 2734 2735 /* Update any of the values, used when we're incrementing various 2736 * fields. 2737 */ 2738 mod_cur_headers(pkt_dev); 2739 queue_map = pkt_dev->cur_queue_map; 2740 2741 datalen = (odev->hard_header_len + 16) & ~0xf; 2742 2743 skb = pktgen_alloc_skb(odev, pkt_dev, datalen); 2744 if (!skb) { 2745 sprintf(pkt_dev->result, "No memory"); 2746 return NULL; 2747 } 2748 2749 prefetchw(skb->data); 2750 skb_reserve(skb, datalen); 2751 2752 /* Reserve for ethernet and IP header */ 2753 eth = (__u8 *) skb_push(skb, 14); 2754 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 2755 if (pkt_dev->nr_labels) 2756 mpls_push(mpls, pkt_dev); 2757 2758 if (pkt_dev->vlan_id != 0xffff) { 2759 if (pkt_dev->svlan_id != 0xffff) { 2760 svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16)); 2761 *svlan_tci = build_tci(pkt_dev->svlan_id, 2762 pkt_dev->svlan_cfi, 2763 pkt_dev->svlan_p); 2764 svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); 2765 *svlan_encapsulated_proto = htons(ETH_P_8021Q); 2766 } 2767 vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16)); 2768 *vlan_tci = build_tci(pkt_dev->vlan_id, 2769 pkt_dev->vlan_cfi, 2770 pkt_dev->vlan_p); 2771 vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); 2772 *vlan_encapsulated_proto = htons(ETH_P_IP); 2773 } 2774 2775 skb_set_mac_header(skb, 0); 2776 skb_set_network_header(skb, skb->len); 2777 iph = (struct iphdr *) skb_put(skb, sizeof(struct iphdr)); 2778 2779 skb_set_transport_header(skb, skb->len); 2780 udph = (struct udphdr *) skb_put(skb, sizeof(struct udphdr)); 2781 skb_set_queue_mapping(skb, queue_map); 2782 skb->priority = pkt_dev->skb_priority; 2783 2784 memcpy(eth, pkt_dev->hh, 12); 2785 *(__be16 *) & eth[12] = protocol; 2786 2787 /* Eth + IPh + UDPh + mpls */ 2788 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - 2789 pkt_dev->pkt_overhead; 2790 if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) 2791 datalen = sizeof(struct pktgen_hdr); 2792 2793 udph->source = htons(pkt_dev->cur_udp_src); 2794 udph->dest = htons(pkt_dev->cur_udp_dst); 2795 udph->len = htons(datalen + 8); /* DATA + udphdr */ 2796 udph->check = 0; 2797 2798 iph->ihl = 5; 2799 iph->version = 4; 2800 iph->ttl = 32; 2801 iph->tos = pkt_dev->tos; 2802 iph->protocol = IPPROTO_UDP; /* UDP */ 2803 iph->saddr = pkt_dev->cur_saddr; 2804 iph->daddr = pkt_dev->cur_daddr; 2805 iph->id = htons(pkt_dev->ip_id); 2806 pkt_dev->ip_id++; 2807 iph->frag_off = 0; 2808 iplen = 20 + 8 + datalen; 2809 iph->tot_len = htons(iplen); 2810 ip_send_check(iph); 2811 skb->protocol = protocol; 2812 skb->dev = odev; 2813 skb->pkt_type = PACKET_HOST; 2814 2815 if (!(pkt_dev->flags & F_UDPCSUM)) { 2816 skb->ip_summed = CHECKSUM_NONE; 2817 } else if (odev->features & NETIF_F_V4_CSUM) { 2818 skb->ip_summed = CHECKSUM_PARTIAL; 2819 skb->csum = 0; 2820 udp4_hwcsum(skb, udph->source, udph->dest); 2821 } else { 2822 __wsum csum = udp_csum(skb); 2823 2824 /* add protocol-dependent pseudo-header */ 2825 udph->check = csum_tcpudp_magic(udph->source, udph->dest, 2826 datalen + 8, IPPROTO_UDP, csum); 2827 2828 if (udph->check == 0) 2829 udph->check = CSUM_MANGLED_0; 2830 } 2831 2832 pktgen_finalize_skb(pkt_dev, skb, datalen); 2833 2834 #ifdef CONFIG_XFRM 2835 if (!process_ipsec(pkt_dev, skb, protocol)) 2836 return NULL; 2837 #endif 2838 2839 return skb; 2840 } 2841 2842 static struct sk_buff *fill_packet_ipv6(struct net_device *odev, 2843 struct pktgen_dev *pkt_dev) 2844 { 2845 struct sk_buff *skb = NULL; 2846 __u8 *eth; 2847 struct udphdr *udph; 2848 int datalen, udplen; 2849 struct ipv6hdr *iph; 2850 __be16 protocol = htons(ETH_P_IPV6); 2851 __be32 *mpls; 2852 __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */ 2853 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2854 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2855 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2856 u16 queue_map; 2857 2858 if (pkt_dev->nr_labels) 2859 protocol = htons(ETH_P_MPLS_UC); 2860 2861 if (pkt_dev->vlan_id != 0xffff) 2862 protocol = htons(ETH_P_8021Q); 2863 2864 /* Update any of the values, used when we're incrementing various 2865 * fields. 2866 */ 2867 mod_cur_headers(pkt_dev); 2868 queue_map = pkt_dev->cur_queue_map; 2869 2870 skb = pktgen_alloc_skb(odev, pkt_dev, 16); 2871 if (!skb) { 2872 sprintf(pkt_dev->result, "No memory"); 2873 return NULL; 2874 } 2875 2876 prefetchw(skb->data); 2877 skb_reserve(skb, 16); 2878 2879 /* Reserve for ethernet and IP header */ 2880 eth = (__u8 *) skb_push(skb, 14); 2881 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 2882 if (pkt_dev->nr_labels) 2883 mpls_push(mpls, pkt_dev); 2884 2885 if (pkt_dev->vlan_id != 0xffff) { 2886 if (pkt_dev->svlan_id != 0xffff) { 2887 svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16)); 2888 *svlan_tci = build_tci(pkt_dev->svlan_id, 2889 pkt_dev->svlan_cfi, 2890 pkt_dev->svlan_p); 2891 svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); 2892 *svlan_encapsulated_proto = htons(ETH_P_8021Q); 2893 } 2894 vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16)); 2895 *vlan_tci = build_tci(pkt_dev->vlan_id, 2896 pkt_dev->vlan_cfi, 2897 pkt_dev->vlan_p); 2898 vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); 2899 *vlan_encapsulated_proto = htons(ETH_P_IPV6); 2900 } 2901 2902 skb_set_mac_header(skb, 0); 2903 skb_set_network_header(skb, skb->len); 2904 iph = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr)); 2905 2906 skb_set_transport_header(skb, skb->len); 2907 udph = (struct udphdr *) skb_put(skb, sizeof(struct udphdr)); 2908 skb_set_queue_mapping(skb, queue_map); 2909 skb->priority = pkt_dev->skb_priority; 2910 2911 memcpy(eth, pkt_dev->hh, 12); 2912 *(__be16 *) ð[12] = protocol; 2913 2914 /* Eth + IPh + UDPh + mpls */ 2915 datalen = pkt_dev->cur_pkt_size - 14 - 2916 sizeof(struct ipv6hdr) - sizeof(struct udphdr) - 2917 pkt_dev->pkt_overhead; 2918 2919 if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) { 2920 datalen = sizeof(struct pktgen_hdr); 2921 net_info_ratelimited("increased datalen to %d\n", datalen); 2922 } 2923 2924 udplen = datalen + sizeof(struct udphdr); 2925 udph->source = htons(pkt_dev->cur_udp_src); 2926 udph->dest = htons(pkt_dev->cur_udp_dst); 2927 udph->len = htons(udplen); 2928 udph->check = 0; 2929 2930 *(__be32 *) iph = htonl(0x60000000); /* Version + flow */ 2931 2932 if (pkt_dev->traffic_class) { 2933 /* Version + traffic class + flow (0) */ 2934 *(__be32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20)); 2935 } 2936 2937 iph->hop_limit = 32; 2938 2939 iph->payload_len = htons(udplen); 2940 iph->nexthdr = IPPROTO_UDP; 2941 2942 iph->daddr = pkt_dev->cur_in6_daddr; 2943 iph->saddr = pkt_dev->cur_in6_saddr; 2944 2945 skb->protocol = protocol; 2946 skb->dev = odev; 2947 skb->pkt_type = PACKET_HOST; 2948 2949 if (!(pkt_dev->flags & F_UDPCSUM)) { 2950 skb->ip_summed = CHECKSUM_NONE; 2951 } else if (odev->features & NETIF_F_V6_CSUM) { 2952 skb->ip_summed = CHECKSUM_PARTIAL; 2953 skb->csum_start = skb_transport_header(skb) - skb->head; 2954 skb->csum_offset = offsetof(struct udphdr, check); 2955 udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0); 2956 } else { 2957 __wsum csum = udp_csum(skb); 2958 2959 /* add protocol-dependent pseudo-header */ 2960 udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum); 2961 2962 if (udph->check == 0) 2963 udph->check = CSUM_MANGLED_0; 2964 } 2965 2966 pktgen_finalize_skb(pkt_dev, skb, datalen); 2967 2968 return skb; 2969 } 2970 2971 static struct sk_buff *fill_packet(struct net_device *odev, 2972 struct pktgen_dev *pkt_dev) 2973 { 2974 if (pkt_dev->flags & F_IPV6) 2975 return fill_packet_ipv6(odev, pkt_dev); 2976 else 2977 return fill_packet_ipv4(odev, pkt_dev); 2978 } 2979 2980 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev) 2981 { 2982 pkt_dev->seq_num = 1; 2983 pkt_dev->idle_acc = 0; 2984 pkt_dev->sofar = 0; 2985 pkt_dev->tx_bytes = 0; 2986 pkt_dev->errors = 0; 2987 } 2988 2989 /* Set up structure for sending pkts, clear counters */ 2990 2991 static void pktgen_run(struct pktgen_thread *t) 2992 { 2993 struct pktgen_dev *pkt_dev; 2994 int started = 0; 2995 2996 func_enter(); 2997 2998 if_lock(t); 2999 list_for_each_entry(pkt_dev, &t->if_list, list) { 3000 3001 /* 3002 * setup odev and create initial packet. 3003 */ 3004 pktgen_setup_inject(pkt_dev); 3005 3006 if (pkt_dev->odev) { 3007 pktgen_clear_counters(pkt_dev); 3008 pkt_dev->running = 1; /* Cranke yeself! */ 3009 pkt_dev->skb = NULL; 3010 pkt_dev->started_at = pkt_dev->next_tx = ktime_get(); 3011 3012 set_pkt_overhead(pkt_dev); 3013 3014 strcpy(pkt_dev->result, "Starting"); 3015 started++; 3016 } else 3017 strcpy(pkt_dev->result, "Error starting"); 3018 } 3019 if_unlock(t); 3020 if (started) 3021 t->control &= ~(T_STOP); 3022 } 3023 3024 static void pktgen_stop_all_threads_ifs(struct pktgen_net *pn) 3025 { 3026 struct pktgen_thread *t; 3027 3028 func_enter(); 3029 3030 mutex_lock(&pktgen_thread_lock); 3031 3032 list_for_each_entry(t, &pn->pktgen_threads, th_list) 3033 t->control |= T_STOP; 3034 3035 mutex_unlock(&pktgen_thread_lock); 3036 } 3037 3038 static int thread_is_running(const struct pktgen_thread *t) 3039 { 3040 const struct pktgen_dev *pkt_dev; 3041 3042 list_for_each_entry(pkt_dev, &t->if_list, list) 3043 if (pkt_dev->running) 3044 return 1; 3045 return 0; 3046 } 3047 3048 static int pktgen_wait_thread_run(struct pktgen_thread *t) 3049 { 3050 if_lock(t); 3051 3052 while (thread_is_running(t)) { 3053 3054 if_unlock(t); 3055 3056 msleep_interruptible(100); 3057 3058 if (signal_pending(current)) 3059 goto signal; 3060 if_lock(t); 3061 } 3062 if_unlock(t); 3063 return 1; 3064 signal: 3065 return 0; 3066 } 3067 3068 static int pktgen_wait_all_threads_run(struct pktgen_net *pn) 3069 { 3070 struct pktgen_thread *t; 3071 int sig = 1; 3072 3073 mutex_lock(&pktgen_thread_lock); 3074 3075 list_for_each_entry(t, &pn->pktgen_threads, th_list) { 3076 sig = pktgen_wait_thread_run(t); 3077 if (sig == 0) 3078 break; 3079 } 3080 3081 if (sig == 0) 3082 list_for_each_entry(t, &pn->pktgen_threads, th_list) 3083 t->control |= (T_STOP); 3084 3085 mutex_unlock(&pktgen_thread_lock); 3086 return sig; 3087 } 3088 3089 static void pktgen_run_all_threads(struct pktgen_net *pn) 3090 { 3091 struct pktgen_thread *t; 3092 3093 func_enter(); 3094 3095 mutex_lock(&pktgen_thread_lock); 3096 3097 list_for_each_entry(t, &pn->pktgen_threads, th_list) 3098 t->control |= (T_RUN); 3099 3100 mutex_unlock(&pktgen_thread_lock); 3101 3102 /* Propagate thread->control */ 3103 schedule_timeout_interruptible(msecs_to_jiffies(125)); 3104 3105 pktgen_wait_all_threads_run(pn); 3106 } 3107 3108 static void pktgen_reset_all_threads(struct pktgen_net *pn) 3109 { 3110 struct pktgen_thread *t; 3111 3112 func_enter(); 3113 3114 mutex_lock(&pktgen_thread_lock); 3115 3116 list_for_each_entry(t, &pn->pktgen_threads, th_list) 3117 t->control |= (T_REMDEVALL); 3118 3119 mutex_unlock(&pktgen_thread_lock); 3120 3121 /* Propagate thread->control */ 3122 schedule_timeout_interruptible(msecs_to_jiffies(125)); 3123 3124 pktgen_wait_all_threads_run(pn); 3125 } 3126 3127 static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) 3128 { 3129 __u64 bps, mbps, pps; 3130 char *p = pkt_dev->result; 3131 ktime_t elapsed = ktime_sub(pkt_dev->stopped_at, 3132 pkt_dev->started_at); 3133 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); 3134 3135 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", 3136 (unsigned long long)ktime_to_us(elapsed), 3137 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), 3138 (unsigned long long)ktime_to_us(idle), 3139 (unsigned long long)pkt_dev->sofar, 3140 pkt_dev->cur_pkt_size, nr_frags); 3141 3142 pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC, 3143 ktime_to_ns(elapsed)); 3144 3145 bps = pps * 8 * pkt_dev->cur_pkt_size; 3146 3147 mbps = bps; 3148 do_div(mbps, 1000000); 3149 p += sprintf(p, " %llupps %lluMb/sec (%llubps) errors: %llu", 3150 (unsigned long long)pps, 3151 (unsigned long long)mbps, 3152 (unsigned long long)bps, 3153 (unsigned long long)pkt_dev->errors); 3154 } 3155 3156 /* Set stopped-at timer, remove from running list, do counters & statistics */ 3157 static int pktgen_stop_device(struct pktgen_dev *pkt_dev) 3158 { 3159 int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; 3160 3161 if (!pkt_dev->running) { 3162 pr_warning("interface: %s is already stopped\n", 3163 pkt_dev->odevname); 3164 return -EINVAL; 3165 } 3166 3167 kfree_skb(pkt_dev->skb); 3168 pkt_dev->skb = NULL; 3169 pkt_dev->stopped_at = ktime_get(); 3170 pkt_dev->running = 0; 3171 3172 show_results(pkt_dev, nr_frags); 3173 3174 return 0; 3175 } 3176 3177 static struct pktgen_dev *next_to_run(struct pktgen_thread *t) 3178 { 3179 struct pktgen_dev *pkt_dev, *best = NULL; 3180 3181 if_lock(t); 3182 3183 list_for_each_entry(pkt_dev, &t->if_list, list) { 3184 if (!pkt_dev->running) 3185 continue; 3186 if (best == NULL) 3187 best = pkt_dev; 3188 else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0) 3189 best = pkt_dev; 3190 } 3191 if_unlock(t); 3192 return best; 3193 } 3194 3195 static void pktgen_stop(struct pktgen_thread *t) 3196 { 3197 struct pktgen_dev *pkt_dev; 3198 3199 func_enter(); 3200 3201 if_lock(t); 3202 3203 list_for_each_entry(pkt_dev, &t->if_list, list) { 3204 pktgen_stop_device(pkt_dev); 3205 } 3206 3207 if_unlock(t); 3208 } 3209 3210 /* 3211 * one of our devices needs to be removed - find it 3212 * and remove it 3213 */ 3214 static void pktgen_rem_one_if(struct pktgen_thread *t) 3215 { 3216 struct list_head *q, *n; 3217 struct pktgen_dev *cur; 3218 3219 func_enter(); 3220 3221 if_lock(t); 3222 3223 list_for_each_safe(q, n, &t->if_list) { 3224 cur = list_entry(q, struct pktgen_dev, list); 3225 3226 if (!cur->removal_mark) 3227 continue; 3228 3229 kfree_skb(cur->skb); 3230 cur->skb = NULL; 3231 3232 pktgen_remove_device(t, cur); 3233 3234 break; 3235 } 3236 3237 if_unlock(t); 3238 } 3239 3240 static void pktgen_rem_all_ifs(struct pktgen_thread *t) 3241 { 3242 struct list_head *q, *n; 3243 struct pktgen_dev *cur; 3244 3245 func_enter(); 3246 3247 /* Remove all devices, free mem */ 3248 3249 if_lock(t); 3250 3251 list_for_each_safe(q, n, &t->if_list) { 3252 cur = list_entry(q, struct pktgen_dev, list); 3253 3254 kfree_skb(cur->skb); 3255 cur->skb = NULL; 3256 3257 pktgen_remove_device(t, cur); 3258 } 3259 3260 if_unlock(t); 3261 } 3262 3263 static void pktgen_rem_thread(struct pktgen_thread *t) 3264 { 3265 /* Remove from the thread list */ 3266 remove_proc_entry(t->tsk->comm, t->net->proc_dir); 3267 } 3268 3269 static void pktgen_resched(struct pktgen_dev *pkt_dev) 3270 { 3271 ktime_t idle_start = ktime_get(); 3272 schedule(); 3273 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start)); 3274 } 3275 3276 static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) 3277 { 3278 ktime_t idle_start = ktime_get(); 3279 3280 while (atomic_read(&(pkt_dev->skb->users)) != 1) { 3281 if (signal_pending(current)) 3282 break; 3283 3284 if (need_resched()) 3285 pktgen_resched(pkt_dev); 3286 else 3287 cpu_relax(); 3288 } 3289 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start)); 3290 } 3291 3292 static void pktgen_xmit(struct pktgen_dev *pkt_dev) 3293 { 3294 struct net_device *odev = pkt_dev->odev; 3295 netdev_tx_t (*xmit)(struct sk_buff *, struct net_device *) 3296 = odev->netdev_ops->ndo_start_xmit; 3297 struct netdev_queue *txq; 3298 u16 queue_map; 3299 int ret; 3300 3301 /* If device is offline, then don't send */ 3302 if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) { 3303 pktgen_stop_device(pkt_dev); 3304 return; 3305 } 3306 3307 /* This is max DELAY, this has special meaning of 3308 * "never transmit" 3309 */ 3310 if (unlikely(pkt_dev->delay == ULLONG_MAX)) { 3311 pkt_dev->next_tx = ktime_add_ns(ktime_get(), ULONG_MAX); 3312 return; 3313 } 3314 3315 /* If no skb or clone count exhausted then get new one */ 3316 if (!pkt_dev->skb || (pkt_dev->last_ok && 3317 ++pkt_dev->clone_count >= pkt_dev->clone_skb)) { 3318 /* build a new pkt */ 3319 kfree_skb(pkt_dev->skb); 3320 3321 pkt_dev->skb = fill_packet(odev, pkt_dev); 3322 if (pkt_dev->skb == NULL) { 3323 pr_err("ERROR: couldn't allocate skb in fill_packet\n"); 3324 schedule(); 3325 pkt_dev->clone_count--; /* back out increment, OOM */ 3326 return; 3327 } 3328 pkt_dev->last_pkt_size = pkt_dev->skb->len; 3329 pkt_dev->allocated_skbs++; 3330 pkt_dev->clone_count = 0; /* reset counter */ 3331 } 3332 3333 if (pkt_dev->delay && pkt_dev->last_ok) 3334 spin(pkt_dev, pkt_dev->next_tx); 3335 3336 queue_map = skb_get_queue_mapping(pkt_dev->skb); 3337 txq = netdev_get_tx_queue(odev, queue_map); 3338 3339 __netif_tx_lock_bh(txq); 3340 3341 if (unlikely(netif_xmit_frozen_or_stopped(txq))) { 3342 ret = NETDEV_TX_BUSY; 3343 pkt_dev->last_ok = 0; 3344 goto unlock; 3345 } 3346 atomic_inc(&(pkt_dev->skb->users)); 3347 ret = (*xmit)(pkt_dev->skb, odev); 3348 3349 switch (ret) { 3350 case NETDEV_TX_OK: 3351 txq_trans_update(txq); 3352 pkt_dev->last_ok = 1; 3353 pkt_dev->sofar++; 3354 pkt_dev->seq_num++; 3355 pkt_dev->tx_bytes += pkt_dev->last_pkt_size; 3356 break; 3357 case NET_XMIT_DROP: 3358 case NET_XMIT_CN: 3359 case NET_XMIT_POLICED: 3360 /* skb has been consumed */ 3361 pkt_dev->errors++; 3362 break; 3363 default: /* Drivers are not supposed to return other values! */ 3364 net_info_ratelimited("%s xmit error: %d\n", 3365 pkt_dev->odevname, ret); 3366 pkt_dev->errors++; 3367 /* fallthru */ 3368 case NETDEV_TX_LOCKED: 3369 case NETDEV_TX_BUSY: 3370 /* Retry it next time */ 3371 atomic_dec(&(pkt_dev->skb->users)); 3372 pkt_dev->last_ok = 0; 3373 } 3374 unlock: 3375 __netif_tx_unlock_bh(txq); 3376 3377 /* If pkt_dev->count is zero, then run forever */ 3378 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3379 pktgen_wait_for_skb(pkt_dev); 3380 3381 /* Done with this */ 3382 pktgen_stop_device(pkt_dev); 3383 } 3384 } 3385 3386 /* 3387 * Main loop of the thread goes here 3388 */ 3389 3390 static int pktgen_thread_worker(void *arg) 3391 { 3392 DEFINE_WAIT(wait); 3393 struct pktgen_thread *t = arg; 3394 struct pktgen_dev *pkt_dev = NULL; 3395 int cpu = t->cpu; 3396 3397 BUG_ON(smp_processor_id() != cpu); 3398 3399 init_waitqueue_head(&t->queue); 3400 complete(&t->start_done); 3401 3402 pr_debug("starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); 3403 3404 set_current_state(TASK_INTERRUPTIBLE); 3405 3406 set_freezable(); 3407 3408 while (!kthread_should_stop()) { 3409 pkt_dev = next_to_run(t); 3410 3411 if (unlikely(!pkt_dev && t->control == 0)) { 3412 if (t->net->pktgen_exiting) 3413 break; 3414 wait_event_interruptible_timeout(t->queue, 3415 t->control != 0, 3416 HZ/10); 3417 try_to_freeze(); 3418 continue; 3419 } 3420 3421 __set_current_state(TASK_RUNNING); 3422 3423 if (likely(pkt_dev)) { 3424 pktgen_xmit(pkt_dev); 3425 3426 if (need_resched()) 3427 pktgen_resched(pkt_dev); 3428 else 3429 cpu_relax(); 3430 } 3431 3432 if (t->control & T_STOP) { 3433 pktgen_stop(t); 3434 t->control &= ~(T_STOP); 3435 } 3436 3437 if (t->control & T_RUN) { 3438 pktgen_run(t); 3439 t->control &= ~(T_RUN); 3440 } 3441 3442 if (t->control & T_REMDEVALL) { 3443 pktgen_rem_all_ifs(t); 3444 t->control &= ~(T_REMDEVALL); 3445 } 3446 3447 if (t->control & T_REMDEV) { 3448 pktgen_rem_one_if(t); 3449 t->control &= ~(T_REMDEV); 3450 } 3451 3452 try_to_freeze(); 3453 3454 set_current_state(TASK_INTERRUPTIBLE); 3455 } 3456 3457 pr_debug("%s stopping all device\n", t->tsk->comm); 3458 pktgen_stop(t); 3459 3460 pr_debug("%s removing all device\n", t->tsk->comm); 3461 pktgen_rem_all_ifs(t); 3462 3463 pr_debug("%s removing thread\n", t->tsk->comm); 3464 pktgen_rem_thread(t); 3465 3466 /* Wait for kthread_stop */ 3467 while (!kthread_should_stop()) { 3468 set_current_state(TASK_INTERRUPTIBLE); 3469 schedule(); 3470 } 3471 __set_current_state(TASK_RUNNING); 3472 3473 return 0; 3474 } 3475 3476 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 3477 const char *ifname, bool exact) 3478 { 3479 struct pktgen_dev *p, *pkt_dev = NULL; 3480 size_t len = strlen(ifname); 3481 3482 if_lock(t); 3483 list_for_each_entry(p, &t->if_list, list) 3484 if (strncmp(p->odevname, ifname, len) == 0) { 3485 if (p->odevname[len]) { 3486 if (exact || p->odevname[len] != '@') 3487 continue; 3488 } 3489 pkt_dev = p; 3490 break; 3491 } 3492 3493 if_unlock(t); 3494 pr_debug("find_dev(%s) returning %p\n", ifname, pkt_dev); 3495 return pkt_dev; 3496 } 3497 3498 /* 3499 * Adds a dev at front of if_list. 3500 */ 3501 3502 static int add_dev_to_thread(struct pktgen_thread *t, 3503 struct pktgen_dev *pkt_dev) 3504 { 3505 int rv = 0; 3506 3507 if_lock(t); 3508 3509 if (pkt_dev->pg_thread) { 3510 pr_err("ERROR: already assigned to a thread\n"); 3511 rv = -EBUSY; 3512 goto out; 3513 } 3514 3515 list_add(&pkt_dev->list, &t->if_list); 3516 pkt_dev->pg_thread = t; 3517 pkt_dev->running = 0; 3518 3519 out: 3520 if_unlock(t); 3521 return rv; 3522 } 3523 3524 /* Called under thread lock */ 3525 3526 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) 3527 { 3528 struct pktgen_dev *pkt_dev; 3529 int err; 3530 int node = cpu_to_node(t->cpu); 3531 3532 /* We don't allow a device to be on several threads */ 3533 3534 pkt_dev = __pktgen_NN_threads(t->net, ifname, FIND); 3535 if (pkt_dev) { 3536 pr_err("ERROR: interface already used\n"); 3537 return -EBUSY; 3538 } 3539 3540 pkt_dev = kzalloc_node(sizeof(struct pktgen_dev), GFP_KERNEL, node); 3541 if (!pkt_dev) 3542 return -ENOMEM; 3543 3544 strcpy(pkt_dev->odevname, ifname); 3545 pkt_dev->flows = vzalloc_node(MAX_CFLOWS * sizeof(struct flow_state), 3546 node); 3547 if (pkt_dev->flows == NULL) { 3548 kfree(pkt_dev); 3549 return -ENOMEM; 3550 } 3551 3552 pkt_dev->removal_mark = 0; 3553 pkt_dev->nfrags = 0; 3554 pkt_dev->delay = pg_delay_d; 3555 pkt_dev->count = pg_count_d; 3556 pkt_dev->sofar = 0; 3557 pkt_dev->udp_src_min = 9; /* sink port */ 3558 pkt_dev->udp_src_max = 9; 3559 pkt_dev->udp_dst_min = 9; 3560 pkt_dev->udp_dst_max = 9; 3561 pkt_dev->vlan_p = 0; 3562 pkt_dev->vlan_cfi = 0; 3563 pkt_dev->vlan_id = 0xffff; 3564 pkt_dev->svlan_p = 0; 3565 pkt_dev->svlan_cfi = 0; 3566 pkt_dev->svlan_id = 0xffff; 3567 pkt_dev->node = -1; 3568 3569 err = pktgen_setup_dev(t->net, pkt_dev, ifname); 3570 if (err) 3571 goto out1; 3572 if (pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING) 3573 pkt_dev->clone_skb = pg_clone_skb_d; 3574 3575 pkt_dev->entry = proc_create_data(ifname, 0600, t->net->proc_dir, 3576 &pktgen_if_fops, pkt_dev); 3577 if (!pkt_dev->entry) { 3578 pr_err("cannot create %s/%s procfs entry\n", 3579 PG_PROC_DIR, ifname); 3580 err = -EINVAL; 3581 goto out2; 3582 } 3583 #ifdef CONFIG_XFRM 3584 pkt_dev->ipsmode = XFRM_MODE_TRANSPORT; 3585 pkt_dev->ipsproto = IPPROTO_ESP; 3586 3587 /* xfrm tunnel mode needs additional dst to extract outter 3588 * ip header protocol/ttl/id field, here creat a phony one. 3589 * instead of looking for a valid rt, which definitely hurting 3590 * performance under such circumstance. 3591 */ 3592 pkt_dev->dstops.family = AF_INET; 3593 pkt_dev->dst.dev = pkt_dev->odev; 3594 dst_init_metrics(&pkt_dev->dst, pktgen_dst_metrics, false); 3595 pkt_dev->dst.child = &pkt_dev->dst; 3596 pkt_dev->dst.ops = &pkt_dev->dstops; 3597 #endif 3598 3599 return add_dev_to_thread(t, pkt_dev); 3600 out2: 3601 dev_put(pkt_dev->odev); 3602 out1: 3603 #ifdef CONFIG_XFRM 3604 free_SAs(pkt_dev); 3605 #endif 3606 vfree(pkt_dev->flows); 3607 kfree(pkt_dev); 3608 return err; 3609 } 3610 3611 static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn) 3612 { 3613 struct pktgen_thread *t; 3614 struct proc_dir_entry *pe; 3615 struct task_struct *p; 3616 3617 t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL, 3618 cpu_to_node(cpu)); 3619 if (!t) { 3620 pr_err("ERROR: out of memory, can't create new thread\n"); 3621 return -ENOMEM; 3622 } 3623 3624 spin_lock_init(&t->if_lock); 3625 t->cpu = cpu; 3626 3627 INIT_LIST_HEAD(&t->if_list); 3628 3629 list_add_tail(&t->th_list, &pn->pktgen_threads); 3630 init_completion(&t->start_done); 3631 3632 p = kthread_create_on_node(pktgen_thread_worker, 3633 t, 3634 cpu_to_node(cpu), 3635 "kpktgend_%d", cpu); 3636 if (IS_ERR(p)) { 3637 pr_err("kernel_thread() failed for cpu %d\n", t->cpu); 3638 list_del(&t->th_list); 3639 kfree(t); 3640 return PTR_ERR(p); 3641 } 3642 kthread_bind(p, cpu); 3643 t->tsk = p; 3644 3645 pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir, 3646 &pktgen_thread_fops, t); 3647 if (!pe) { 3648 pr_err("cannot create %s/%s procfs entry\n", 3649 PG_PROC_DIR, t->tsk->comm); 3650 kthread_stop(p); 3651 list_del(&t->th_list); 3652 kfree(t); 3653 return -EINVAL; 3654 } 3655 3656 t->net = pn; 3657 wake_up_process(p); 3658 wait_for_completion(&t->start_done); 3659 3660 return 0; 3661 } 3662 3663 /* 3664 * Removes a device from the thread if_list. 3665 */ 3666 static void _rem_dev_from_if_list(struct pktgen_thread *t, 3667 struct pktgen_dev *pkt_dev) 3668 { 3669 struct list_head *q, *n; 3670 struct pktgen_dev *p; 3671 3672 list_for_each_safe(q, n, &t->if_list) { 3673 p = list_entry(q, struct pktgen_dev, list); 3674 if (p == pkt_dev) 3675 list_del(&p->list); 3676 } 3677 } 3678 3679 static int pktgen_remove_device(struct pktgen_thread *t, 3680 struct pktgen_dev *pkt_dev) 3681 { 3682 pr_debug("remove_device pkt_dev=%p\n", pkt_dev); 3683 3684 if (pkt_dev->running) { 3685 pr_warning("WARNING: trying to remove a running interface, stopping it now\n"); 3686 pktgen_stop_device(pkt_dev); 3687 } 3688 3689 /* Dis-associate from the interface */ 3690 3691 if (pkt_dev->odev) { 3692 dev_put(pkt_dev->odev); 3693 pkt_dev->odev = NULL; 3694 } 3695 3696 /* And update the thread if_list */ 3697 3698 _rem_dev_from_if_list(t, pkt_dev); 3699 3700 if (pkt_dev->entry) 3701 proc_remove(pkt_dev->entry); 3702 3703 #ifdef CONFIG_XFRM 3704 free_SAs(pkt_dev); 3705 #endif 3706 vfree(pkt_dev->flows); 3707 if (pkt_dev->page) 3708 put_page(pkt_dev->page); 3709 kfree(pkt_dev); 3710 return 0; 3711 } 3712 3713 static int __net_init pg_net_init(struct net *net) 3714 { 3715 struct pktgen_net *pn = net_generic(net, pg_net_id); 3716 struct proc_dir_entry *pe; 3717 int cpu, ret = 0; 3718 3719 pn->net = net; 3720 INIT_LIST_HEAD(&pn->pktgen_threads); 3721 pn->pktgen_exiting = false; 3722 pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net); 3723 if (!pn->proc_dir) { 3724 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR); 3725 return -ENODEV; 3726 } 3727 pe = proc_create(PGCTRL, 0600, pn->proc_dir, &pktgen_fops); 3728 if (pe == NULL) { 3729 pr_err("cannot create %s procfs entry\n", PGCTRL); 3730 ret = -EINVAL; 3731 goto remove; 3732 } 3733 3734 for_each_online_cpu(cpu) { 3735 int err; 3736 3737 err = pktgen_create_thread(cpu, pn); 3738 if (err) 3739 pr_warn("Cannot create thread for cpu %d (%d)\n", 3740 cpu, err); 3741 } 3742 3743 if (list_empty(&pn->pktgen_threads)) { 3744 pr_err("Initialization failed for all threads\n"); 3745 ret = -ENODEV; 3746 goto remove_entry; 3747 } 3748 3749 return 0; 3750 3751 remove_entry: 3752 remove_proc_entry(PGCTRL, pn->proc_dir); 3753 remove: 3754 remove_proc_entry(PG_PROC_DIR, pn->net->proc_net); 3755 return ret; 3756 } 3757 3758 static void __net_exit pg_net_exit(struct net *net) 3759 { 3760 struct pktgen_net *pn = net_generic(net, pg_net_id); 3761 struct pktgen_thread *t; 3762 struct list_head *q, *n; 3763 LIST_HEAD(list); 3764 3765 /* Stop all interfaces & threads */ 3766 pn->pktgen_exiting = true; 3767 3768 mutex_lock(&pktgen_thread_lock); 3769 list_splice_init(&pn->pktgen_threads, &list); 3770 mutex_unlock(&pktgen_thread_lock); 3771 3772 list_for_each_safe(q, n, &list) { 3773 t = list_entry(q, struct pktgen_thread, th_list); 3774 list_del(&t->th_list); 3775 kthread_stop(t->tsk); 3776 kfree(t); 3777 } 3778 3779 remove_proc_entry(PGCTRL, pn->proc_dir); 3780 remove_proc_entry(PG_PROC_DIR, pn->net->proc_net); 3781 } 3782 3783 static struct pernet_operations pg_net_ops = { 3784 .init = pg_net_init, 3785 .exit = pg_net_exit, 3786 .id = &pg_net_id, 3787 .size = sizeof(struct pktgen_net), 3788 }; 3789 3790 static int __init pg_init(void) 3791 { 3792 int ret = 0; 3793 3794 pr_info("%s", version); 3795 ret = register_pernet_subsys(&pg_net_ops); 3796 if (ret) 3797 return ret; 3798 ret = register_netdevice_notifier(&pktgen_notifier_block); 3799 if (ret) 3800 unregister_pernet_subsys(&pg_net_ops); 3801 3802 return ret; 3803 } 3804 3805 static void __exit pg_cleanup(void) 3806 { 3807 unregister_netdevice_notifier(&pktgen_notifier_block); 3808 unregister_pernet_subsys(&pg_net_ops); 3809 } 3810 3811 module_init(pg_init); 3812 module_exit(pg_cleanup); 3813 3814 MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se>"); 3815 MODULE_DESCRIPTION("Packet Generator tool"); 3816 MODULE_LICENSE("GPL"); 3817 MODULE_VERSION(VERSION); 3818 module_param(pg_count_d, int, 0); 3819 MODULE_PARM_DESC(pg_count_d, "Default number of packets to inject"); 3820 module_param(pg_delay_d, int, 0); 3821 MODULE_PARM_DESC(pg_delay_d, "Default delay between packets (nanoseconds)"); 3822 module_param(pg_clone_skb_d, int, 0); 3823 MODULE_PARM_DESC(pg_clone_skb_d, "Default number of copies of the same packet"); 3824 module_param(debug, int, 0); 3825 MODULE_PARM_DESC(debug, "Enable debugging of pktgen module"); 3826