1 /* 2 * Xenbus code for netif backend 3 * 4 * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au> 5 * Copyright (C) 2005 XenSource Ltd 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "common.h" 22 #include <linux/vmalloc.h> 23 #include <linux/rtnetlink.h> 24 25 struct backend_info { 26 struct xenbus_device *dev; 27 struct xenvif *vif; 28 29 /* This is the state that will be reflected in xenstore when any 30 * active hotplug script completes. 31 */ 32 enum xenbus_state state; 33 34 enum xenbus_state frontend_state; 35 struct xenbus_watch hotplug_status_watch; 36 u8 have_hotplug_status_watch:1; 37 }; 38 39 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); 40 static void connect(struct backend_info *be); 41 static int read_xenbus_vif_flags(struct backend_info *be); 42 static int backend_create_xenvif(struct backend_info *be); 43 static void unregister_hotplug_status_watch(struct backend_info *be); 44 static void set_backend_state(struct backend_info *be, 45 enum xenbus_state state); 46 47 #ifdef CONFIG_DEBUG_FS 48 struct dentry *xen_netback_dbg_root = NULL; 49 50 static int xenvif_read_io_ring(struct seq_file *m, void *v) 51 { 52 struct xenvif_queue *queue = m->private; 53 struct xen_netif_tx_back_ring *tx_ring = &queue->tx; 54 struct xen_netif_rx_back_ring *rx_ring = &queue->rx; 55 struct netdev_queue *dev_queue; 56 57 if (tx_ring->sring) { 58 struct xen_netif_tx_sring *sring = tx_ring->sring; 59 60 seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id, 61 tx_ring->nr_ents); 62 seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n", 63 sring->req_prod, 64 sring->req_prod - sring->rsp_prod, 65 tx_ring->req_cons, 66 tx_ring->req_cons - sring->rsp_prod, 67 sring->req_event, 68 sring->req_event - sring->rsp_prod); 69 seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n", 70 sring->rsp_prod, 71 tx_ring->rsp_prod_pvt, 72 tx_ring->rsp_prod_pvt - sring->rsp_prod, 73 sring->rsp_event, 74 sring->rsp_event - sring->rsp_prod); 75 seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n", 76 queue->pending_prod, 77 queue->pending_cons, 78 nr_pending_reqs(queue)); 79 seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n", 80 queue->dealloc_prod, 81 queue->dealloc_cons, 82 queue->dealloc_prod - queue->dealloc_cons); 83 } 84 85 if (rx_ring->sring) { 86 struct xen_netif_rx_sring *sring = rx_ring->sring; 87 88 seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents); 89 seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n", 90 sring->req_prod, 91 sring->req_prod - sring->rsp_prod, 92 rx_ring->req_cons, 93 rx_ring->req_cons - sring->rsp_prod, 94 sring->req_event, 95 sring->req_event - sring->rsp_prod); 96 seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n", 97 sring->rsp_prod, 98 rx_ring->rsp_prod_pvt, 99 rx_ring->rsp_prod_pvt - sring->rsp_prod, 100 sring->rsp_event, 101 sring->rsp_event - sring->rsp_prod); 102 } 103 104 seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n" 105 "Credit timer_pending: %d, credit: %lu, usec: %lu\n" 106 "remaining: %lu, expires: %lu, now: %lu\n", 107 queue->napi.state, queue->napi.weight, 108 skb_queue_len(&queue->tx_queue), 109 timer_pending(&queue->credit_timeout), 110 queue->credit_bytes, 111 queue->credit_usec, 112 queue->remaining_credit, 113 queue->credit_timeout.expires, 114 jiffies); 115 116 dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id); 117 118 seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n", 119 queue->rx_queue_len, queue->rx_queue_max, 120 skb_queue_len(&queue->rx_queue), 121 netif_tx_queue_stopped(dev_queue) ? "stopped" : "running"); 122 123 return 0; 124 } 125 126 #define XENVIF_KICK_STR "kick" 127 #define BUFFER_SIZE 32 128 129 static ssize_t 130 xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count, 131 loff_t *ppos) 132 { 133 struct xenvif_queue *queue = 134 ((struct seq_file *)filp->private_data)->private; 135 int len; 136 char write[BUFFER_SIZE]; 137 138 /* don't allow partial writes and check the length */ 139 if (*ppos != 0) 140 return 0; 141 if (count >= sizeof(write)) 142 return -ENOSPC; 143 144 len = simple_write_to_buffer(write, 145 sizeof(write) - 1, 146 ppos, 147 buf, 148 count); 149 if (len < 0) 150 return len; 151 152 write[len] = '\0'; 153 154 if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1)) 155 xenvif_interrupt(0, (void *)queue); 156 else { 157 pr_warn("Unknown command to io_ring_q%d. Available: kick\n", 158 queue->id); 159 count = -EINVAL; 160 } 161 return count; 162 } 163 164 static int xenvif_dump_open(struct inode *inode, struct file *filp) 165 { 166 int ret; 167 void *queue = NULL; 168 169 if (inode->i_private) 170 queue = inode->i_private; 171 ret = single_open(filp, xenvif_read_io_ring, queue); 172 filp->f_mode |= FMODE_PWRITE; 173 return ret; 174 } 175 176 static const struct file_operations xenvif_dbg_io_ring_ops_fops = { 177 .owner = THIS_MODULE, 178 .open = xenvif_dump_open, 179 .read = seq_read, 180 .llseek = seq_lseek, 181 .release = single_release, 182 .write = xenvif_write_io_ring, 183 }; 184 185 static void xenvif_debugfs_addif(struct xenvif *vif) 186 { 187 struct dentry *pfile; 188 int i; 189 190 if (IS_ERR_OR_NULL(xen_netback_dbg_root)) 191 return; 192 193 vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name, 194 xen_netback_dbg_root); 195 if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) { 196 for (i = 0; i < vif->num_queues; ++i) { 197 char filename[sizeof("io_ring_q") + 4]; 198 199 snprintf(filename, sizeof(filename), "io_ring_q%d", i); 200 pfile = debugfs_create_file(filename, 201 S_IRUSR | S_IWUSR, 202 vif->xenvif_dbg_root, 203 &vif->queues[i], 204 &xenvif_dbg_io_ring_ops_fops); 205 if (IS_ERR_OR_NULL(pfile)) 206 pr_warn("Creation of io_ring file returned %ld!\n", 207 PTR_ERR(pfile)); 208 } 209 } else 210 netdev_warn(vif->dev, 211 "Creation of vif debugfs dir returned %ld!\n", 212 PTR_ERR(vif->xenvif_dbg_root)); 213 } 214 215 static void xenvif_debugfs_delif(struct xenvif *vif) 216 { 217 if (IS_ERR_OR_NULL(xen_netback_dbg_root)) 218 return; 219 220 if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) 221 debugfs_remove_recursive(vif->xenvif_dbg_root); 222 vif->xenvif_dbg_root = NULL; 223 } 224 #endif /* CONFIG_DEBUG_FS */ 225 226 static int netback_remove(struct xenbus_device *dev) 227 { 228 struct backend_info *be = dev_get_drvdata(&dev->dev); 229 230 set_backend_state(be, XenbusStateClosed); 231 232 unregister_hotplug_status_watch(be); 233 if (be->vif) { 234 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 235 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); 236 xenvif_free(be->vif); 237 be->vif = NULL; 238 } 239 kfree(be); 240 dev_set_drvdata(&dev->dev, NULL); 241 return 0; 242 } 243 244 245 /** 246 * Entry point to this code when a new device is created. Allocate the basic 247 * structures and switch to InitWait. 248 */ 249 static int netback_probe(struct xenbus_device *dev, 250 const struct xenbus_device_id *id) 251 { 252 const char *message; 253 struct xenbus_transaction xbt; 254 int err; 255 int sg; 256 struct backend_info *be = kzalloc(sizeof(struct backend_info), 257 GFP_KERNEL); 258 if (!be) { 259 xenbus_dev_fatal(dev, -ENOMEM, 260 "allocating backend structure"); 261 return -ENOMEM; 262 } 263 264 be->dev = dev; 265 dev_set_drvdata(&dev->dev, be); 266 267 sg = 1; 268 269 do { 270 err = xenbus_transaction_start(&xbt); 271 if (err) { 272 xenbus_dev_fatal(dev, err, "starting transaction"); 273 goto fail; 274 } 275 276 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg); 277 if (err) { 278 message = "writing feature-sg"; 279 goto abort_transaction; 280 } 281 282 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", 283 "%d", sg); 284 if (err) { 285 message = "writing feature-gso-tcpv4"; 286 goto abort_transaction; 287 } 288 289 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6", 290 "%d", sg); 291 if (err) { 292 message = "writing feature-gso-tcpv6"; 293 goto abort_transaction; 294 } 295 296 /* We support partial checksum setup for IPv6 packets */ 297 err = xenbus_printf(xbt, dev->nodename, 298 "feature-ipv6-csum-offload", 299 "%d", 1); 300 if (err) { 301 message = "writing feature-ipv6-csum-offload"; 302 goto abort_transaction; 303 } 304 305 /* We support rx-copy path. */ 306 err = xenbus_printf(xbt, dev->nodename, 307 "feature-rx-copy", "%d", 1); 308 if (err) { 309 message = "writing feature-rx-copy"; 310 goto abort_transaction; 311 } 312 313 /* 314 * We don't support rx-flip path (except old guests who don't 315 * grok this feature flag). 316 */ 317 err = xenbus_printf(xbt, dev->nodename, 318 "feature-rx-flip", "%d", 0); 319 if (err) { 320 message = "writing feature-rx-flip"; 321 goto abort_transaction; 322 } 323 324 err = xenbus_transaction_end(xbt, 0); 325 } while (err == -EAGAIN); 326 327 if (err) { 328 xenbus_dev_fatal(dev, err, "completing transaction"); 329 goto fail; 330 } 331 332 /* 333 * Split event channels support, this is optional so it is not 334 * put inside the above loop. 335 */ 336 err = xenbus_printf(XBT_NIL, dev->nodename, 337 "feature-split-event-channels", 338 "%u", separate_tx_rx_irq); 339 if (err) 340 pr_debug("Error writing feature-split-event-channels\n"); 341 342 /* Multi-queue support: This is an optional feature. */ 343 err = xenbus_printf(XBT_NIL, dev->nodename, 344 "multi-queue-max-queues", "%u", xenvif_max_queues); 345 if (err) 346 pr_debug("Error writing multi-queue-max-queues\n"); 347 348 err = xenbus_switch_state(dev, XenbusStateInitWait); 349 if (err) 350 goto fail; 351 352 be->state = XenbusStateInitWait; 353 354 /* This kicks hotplug scripts, so do it immediately. */ 355 err = backend_create_xenvif(be); 356 if (err) 357 goto fail; 358 359 return 0; 360 361 abort_transaction: 362 xenbus_transaction_end(xbt, 1); 363 xenbus_dev_fatal(dev, err, "%s", message); 364 fail: 365 pr_debug("failed\n"); 366 netback_remove(dev); 367 return err; 368 } 369 370 371 /* 372 * Handle the creation of the hotplug script environment. We add the script 373 * and vif variables to the environment, for the benefit of the vif-* hotplug 374 * scripts. 375 */ 376 static int netback_uevent(struct xenbus_device *xdev, 377 struct kobj_uevent_env *env) 378 { 379 struct backend_info *be = dev_get_drvdata(&xdev->dev); 380 char *val; 381 382 val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL); 383 if (IS_ERR(val)) { 384 int err = PTR_ERR(val); 385 xenbus_dev_fatal(xdev, err, "reading script"); 386 return err; 387 } else { 388 if (add_uevent_var(env, "script=%s", val)) { 389 kfree(val); 390 return -ENOMEM; 391 } 392 kfree(val); 393 } 394 395 if (!be || !be->vif) 396 return 0; 397 398 return add_uevent_var(env, "vif=%s", be->vif->dev->name); 399 } 400 401 402 static int backend_create_xenvif(struct backend_info *be) 403 { 404 int err; 405 long handle; 406 struct xenbus_device *dev = be->dev; 407 struct xenvif *vif; 408 409 if (be->vif != NULL) 410 return 0; 411 412 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle); 413 if (err != 1) { 414 xenbus_dev_fatal(dev, err, "reading handle"); 415 return (err < 0) ? err : -EINVAL; 416 } 417 418 vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); 419 if (IS_ERR(vif)) { 420 err = PTR_ERR(vif); 421 xenbus_dev_fatal(dev, err, "creating interface"); 422 return err; 423 } 424 be->vif = vif; 425 426 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); 427 return 0; 428 } 429 430 static void backend_disconnect(struct backend_info *be) 431 { 432 if (be->vif) { 433 #ifdef CONFIG_DEBUG_FS 434 xenvif_debugfs_delif(be->vif); 435 #endif /* CONFIG_DEBUG_FS */ 436 xenvif_disconnect(be->vif); 437 } 438 } 439 440 static void backend_connect(struct backend_info *be) 441 { 442 if (be->vif) 443 connect(be); 444 } 445 446 static inline void backend_switch_state(struct backend_info *be, 447 enum xenbus_state state) 448 { 449 struct xenbus_device *dev = be->dev; 450 451 pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state)); 452 be->state = state; 453 454 /* If we are waiting for a hotplug script then defer the 455 * actual xenbus state change. 456 */ 457 if (!be->have_hotplug_status_watch) 458 xenbus_switch_state(dev, state); 459 } 460 461 /* Handle backend state transitions: 462 * 463 * The backend state starts in InitWait and the following transitions are 464 * allowed. 465 * 466 * InitWait -> Connected 467 * 468 * ^ \ | 469 * | \ | 470 * | \ | 471 * | \ | 472 * | \ | 473 * | \ | 474 * | V V 475 * 476 * Closed <-> Closing 477 * 478 * The state argument specifies the eventual state of the backend and the 479 * function transitions to that state via the shortest path. 480 */ 481 static void set_backend_state(struct backend_info *be, 482 enum xenbus_state state) 483 { 484 while (be->state != state) { 485 switch (be->state) { 486 case XenbusStateClosed: 487 switch (state) { 488 case XenbusStateInitWait: 489 case XenbusStateConnected: 490 pr_info("%s: prepare for reconnect\n", 491 be->dev->nodename); 492 backend_switch_state(be, XenbusStateInitWait); 493 break; 494 case XenbusStateClosing: 495 backend_switch_state(be, XenbusStateClosing); 496 break; 497 default: 498 BUG(); 499 } 500 break; 501 case XenbusStateInitWait: 502 switch (state) { 503 case XenbusStateConnected: 504 backend_connect(be); 505 backend_switch_state(be, XenbusStateConnected); 506 break; 507 case XenbusStateClosing: 508 case XenbusStateClosed: 509 backend_switch_state(be, XenbusStateClosing); 510 break; 511 default: 512 BUG(); 513 } 514 break; 515 case XenbusStateConnected: 516 switch (state) { 517 case XenbusStateInitWait: 518 case XenbusStateClosing: 519 case XenbusStateClosed: 520 backend_disconnect(be); 521 backend_switch_state(be, XenbusStateClosing); 522 break; 523 default: 524 BUG(); 525 } 526 break; 527 case XenbusStateClosing: 528 switch (state) { 529 case XenbusStateInitWait: 530 case XenbusStateConnected: 531 case XenbusStateClosed: 532 backend_switch_state(be, XenbusStateClosed); 533 break; 534 default: 535 BUG(); 536 } 537 break; 538 default: 539 BUG(); 540 } 541 } 542 } 543 544 /** 545 * Callback received when the frontend's state changes. 546 */ 547 static void frontend_changed(struct xenbus_device *dev, 548 enum xenbus_state frontend_state) 549 { 550 struct backend_info *be = dev_get_drvdata(&dev->dev); 551 552 pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state)); 553 554 be->frontend_state = frontend_state; 555 556 switch (frontend_state) { 557 case XenbusStateInitialising: 558 set_backend_state(be, XenbusStateInitWait); 559 break; 560 561 case XenbusStateInitialised: 562 break; 563 564 case XenbusStateConnected: 565 set_backend_state(be, XenbusStateConnected); 566 break; 567 568 case XenbusStateClosing: 569 set_backend_state(be, XenbusStateClosing); 570 break; 571 572 case XenbusStateClosed: 573 set_backend_state(be, XenbusStateClosed); 574 if (xenbus_dev_is_online(dev)) 575 break; 576 /* fall through if not online */ 577 case XenbusStateUnknown: 578 set_backend_state(be, XenbusStateClosed); 579 device_unregister(&dev->dev); 580 break; 581 582 default: 583 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend", 584 frontend_state); 585 break; 586 } 587 } 588 589 590 static void xen_net_read_rate(struct xenbus_device *dev, 591 unsigned long *bytes, unsigned long *usec) 592 { 593 char *s, *e; 594 unsigned long b, u; 595 char *ratestr; 596 597 /* Default to unlimited bandwidth. */ 598 *bytes = ~0UL; 599 *usec = 0; 600 601 ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL); 602 if (IS_ERR(ratestr)) 603 return; 604 605 s = ratestr; 606 b = simple_strtoul(s, &e, 10); 607 if ((s == e) || (*e != ',')) 608 goto fail; 609 610 s = e + 1; 611 u = simple_strtoul(s, &e, 10); 612 if ((s == e) || (*e != '\0')) 613 goto fail; 614 615 *bytes = b; 616 *usec = u; 617 618 kfree(ratestr); 619 return; 620 621 fail: 622 pr_warn("Failed to parse network rate limit. Traffic unlimited.\n"); 623 kfree(ratestr); 624 } 625 626 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 627 { 628 char *s, *e, *macstr; 629 int i; 630 631 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 632 if (IS_ERR(macstr)) 633 return PTR_ERR(macstr); 634 635 for (i = 0; i < ETH_ALEN; i++) { 636 mac[i] = simple_strtoul(s, &e, 16); 637 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 638 kfree(macstr); 639 return -ENOENT; 640 } 641 s = e+1; 642 } 643 644 kfree(macstr); 645 return 0; 646 } 647 648 static void unregister_hotplug_status_watch(struct backend_info *be) 649 { 650 if (be->have_hotplug_status_watch) { 651 unregister_xenbus_watch(&be->hotplug_status_watch); 652 kfree(be->hotplug_status_watch.node); 653 } 654 be->have_hotplug_status_watch = 0; 655 } 656 657 static void hotplug_status_changed(struct xenbus_watch *watch, 658 const char **vec, 659 unsigned int vec_size) 660 { 661 struct backend_info *be = container_of(watch, 662 struct backend_info, 663 hotplug_status_watch); 664 char *str; 665 unsigned int len; 666 667 str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len); 668 if (IS_ERR(str)) 669 return; 670 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { 671 /* Complete any pending state change */ 672 xenbus_switch_state(be->dev, be->state); 673 674 /* Not interested in this watch anymore. */ 675 unregister_hotplug_status_watch(be); 676 } 677 kfree(str); 678 } 679 680 static void connect(struct backend_info *be) 681 { 682 int err; 683 struct xenbus_device *dev = be->dev; 684 unsigned long credit_bytes, credit_usec; 685 unsigned int queue_index; 686 unsigned int requested_num_queues; 687 struct xenvif_queue *queue; 688 689 /* Check whether the frontend requested multiple queues 690 * and read the number requested. 691 */ 692 err = xenbus_scanf(XBT_NIL, dev->otherend, 693 "multi-queue-num-queues", 694 "%u", &requested_num_queues); 695 if (err < 0) { 696 requested_num_queues = 1; /* Fall back to single queue */ 697 } else if (requested_num_queues > xenvif_max_queues) { 698 /* buggy or malicious guest */ 699 xenbus_dev_fatal(dev, err, 700 "guest requested %u queues, exceeding the maximum of %u.", 701 requested_num_queues, xenvif_max_queues); 702 return; 703 } 704 705 err = xen_net_read_mac(dev, be->vif->fe_dev_addr); 706 if (err) { 707 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 708 return; 709 } 710 711 xen_net_read_rate(dev, &credit_bytes, &credit_usec); 712 read_xenbus_vif_flags(be); 713 714 /* Use the number of queues requested by the frontend */ 715 be->vif->queues = vzalloc(requested_num_queues * 716 sizeof(struct xenvif_queue)); 717 be->vif->num_queues = requested_num_queues; 718 be->vif->stalled_queues = requested_num_queues; 719 720 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { 721 queue = &be->vif->queues[queue_index]; 722 queue->vif = be->vif; 723 queue->id = queue_index; 724 snprintf(queue->name, sizeof(queue->name), "%s-q%u", 725 be->vif->dev->name, queue->id); 726 727 err = xenvif_init_queue(queue); 728 if (err) { 729 /* xenvif_init_queue() cleans up after itself on 730 * failure, but we need to clean up any previously 731 * initialised queues. Set num_queues to i so that 732 * earlier queues can be destroyed using the regular 733 * disconnect logic. 734 */ 735 be->vif->num_queues = queue_index; 736 goto err; 737 } 738 739 queue->remaining_credit = credit_bytes; 740 queue->credit_usec = credit_usec; 741 742 err = connect_rings(be, queue); 743 if (err) { 744 /* connect_rings() cleans up after itself on failure, 745 * but we need to clean up after xenvif_init_queue() here, 746 * and also clean up any previously initialised queues. 747 */ 748 xenvif_deinit_queue(queue); 749 be->vif->num_queues = queue_index; 750 goto err; 751 } 752 } 753 754 #ifdef CONFIG_DEBUG_FS 755 xenvif_debugfs_addif(be->vif); 756 #endif /* CONFIG_DEBUG_FS */ 757 758 /* Initialisation completed, tell core driver the number of 759 * active queues. 760 */ 761 rtnl_lock(); 762 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues); 763 netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues); 764 rtnl_unlock(); 765 766 xenvif_carrier_on(be->vif); 767 768 unregister_hotplug_status_watch(be); 769 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, 770 hotplug_status_changed, 771 "%s/%s", dev->nodename, "hotplug-status"); 772 if (!err) 773 be->have_hotplug_status_watch = 1; 774 775 netif_tx_wake_all_queues(be->vif->dev); 776 777 return; 778 779 err: 780 if (be->vif->num_queues > 0) 781 xenvif_disconnect(be->vif); /* Clean up existing queues */ 782 vfree(be->vif->queues); 783 be->vif->queues = NULL; 784 be->vif->num_queues = 0; 785 return; 786 } 787 788 789 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) 790 { 791 struct xenbus_device *dev = be->dev; 792 unsigned int num_queues = queue->vif->num_queues; 793 unsigned long tx_ring_ref, rx_ring_ref; 794 unsigned int tx_evtchn, rx_evtchn; 795 int err; 796 char *xspath; 797 size_t xspathsize; 798 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ 799 800 /* If the frontend requested 1 queue, or we have fallen back 801 * to single queue due to lack of frontend support for multi- 802 * queue, expect the remaining XenStore keys in the toplevel 803 * directory. Otherwise, expect them in a subdirectory called 804 * queue-N. 805 */ 806 if (num_queues == 1) { 807 xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL); 808 if (!xspath) { 809 xenbus_dev_fatal(dev, -ENOMEM, 810 "reading ring references"); 811 return -ENOMEM; 812 } 813 strcpy(xspath, dev->otherend); 814 } else { 815 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size; 816 xspath = kzalloc(xspathsize, GFP_KERNEL); 817 if (!xspath) { 818 xenbus_dev_fatal(dev, -ENOMEM, 819 "reading ring references"); 820 return -ENOMEM; 821 } 822 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, 823 queue->id); 824 } 825 826 err = xenbus_gather(XBT_NIL, xspath, 827 "tx-ring-ref", "%lu", &tx_ring_ref, 828 "rx-ring-ref", "%lu", &rx_ring_ref, NULL); 829 if (err) { 830 xenbus_dev_fatal(dev, err, 831 "reading %s/ring-ref", 832 xspath); 833 goto err; 834 } 835 836 /* Try split event channels first, then single event channel. */ 837 err = xenbus_gather(XBT_NIL, xspath, 838 "event-channel-tx", "%u", &tx_evtchn, 839 "event-channel-rx", "%u", &rx_evtchn, NULL); 840 if (err < 0) { 841 err = xenbus_scanf(XBT_NIL, xspath, 842 "event-channel", "%u", &tx_evtchn); 843 if (err < 0) { 844 xenbus_dev_fatal(dev, err, 845 "reading %s/event-channel(-tx/rx)", 846 xspath); 847 goto err; 848 } 849 rx_evtchn = tx_evtchn; 850 } 851 852 /* Map the shared frame, irq etc. */ 853 err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref, 854 tx_evtchn, rx_evtchn); 855 if (err) { 856 xenbus_dev_fatal(dev, err, 857 "mapping shared-frames %lu/%lu port tx %u rx %u", 858 tx_ring_ref, rx_ring_ref, 859 tx_evtchn, rx_evtchn); 860 goto err; 861 } 862 863 err = 0; 864 err: /* Regular return falls through with err == 0 */ 865 kfree(xspath); 866 return err; 867 } 868 869 static int read_xenbus_vif_flags(struct backend_info *be) 870 { 871 struct xenvif *vif = be->vif; 872 struct xenbus_device *dev = be->dev; 873 unsigned int rx_copy; 874 int err, val; 875 876 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", 877 &rx_copy); 878 if (err == -ENOENT) { 879 err = 0; 880 rx_copy = 0; 881 } 882 if (err < 0) { 883 xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", 884 dev->otherend); 885 return err; 886 } 887 if (!rx_copy) 888 return -EOPNOTSUPP; 889 890 if (xenbus_scanf(XBT_NIL, dev->otherend, 891 "feature-rx-notify", "%d", &val) < 0) 892 val = 0; 893 if (!val) { 894 /* - Reduce drain timeout to poll more frequently for 895 * Rx requests. 896 * - Disable Rx stall detection. 897 */ 898 be->vif->drain_timeout = msecs_to_jiffies(30); 899 be->vif->stall_timeout = 0; 900 } 901 902 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", 903 "%d", &val) < 0) 904 val = 0; 905 vif->can_sg = !!val; 906 907 vif->gso_mask = 0; 908 vif->gso_prefix_mask = 0; 909 910 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", 911 "%d", &val) < 0) 912 val = 0; 913 if (val) 914 vif->gso_mask |= GSO_BIT(TCPV4); 915 916 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", 917 "%d", &val) < 0) 918 val = 0; 919 if (val) 920 vif->gso_prefix_mask |= GSO_BIT(TCPV4); 921 922 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", 923 "%d", &val) < 0) 924 val = 0; 925 if (val) 926 vif->gso_mask |= GSO_BIT(TCPV6); 927 928 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix", 929 "%d", &val) < 0) 930 val = 0; 931 if (val) 932 vif->gso_prefix_mask |= GSO_BIT(TCPV6); 933 934 if (vif->gso_mask & vif->gso_prefix_mask) { 935 xenbus_dev_fatal(dev, err, 936 "%s: gso and gso prefix flags are not " 937 "mutually exclusive", 938 dev->otherend); 939 return -EOPNOTSUPP; 940 } 941 942 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", 943 "%d", &val) < 0) 944 val = 0; 945 vif->ip_csum = !val; 946 947 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload", 948 "%d", &val) < 0) 949 val = 0; 950 vif->ipv6_csum = !!val; 951 952 return 0; 953 } 954 955 static const struct xenbus_device_id netback_ids[] = { 956 { "vif" }, 957 { "" } 958 }; 959 960 static struct xenbus_driver netback_driver = { 961 .ids = netback_ids, 962 .probe = netback_probe, 963 .remove = netback_remove, 964 .uevent = netback_uevent, 965 .otherend_changed = frontend_changed, 966 }; 967 968 int xenvif_xenbus_init(void) 969 { 970 return xenbus_register_backend(&netback_driver); 971 } 972 973 void xenvif_xenbus_fini(void) 974 { 975 return xenbus_unregister_driver(&netback_driver); 976 } 977