1 /* 2 * Xenbus code for netif backend 3 * 4 * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au> 5 * Copyright (C) 2005 XenSource Ltd 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "common.h" 22 #include <linux/vmalloc.h> 23 #include <linux/rtnetlink.h> 24 25 struct backend_info { 26 struct xenbus_device *dev; 27 struct xenvif *vif; 28 29 /* This is the state that will be reflected in xenstore when any 30 * active hotplug script completes. 31 */ 32 enum xenbus_state state; 33 34 enum xenbus_state frontend_state; 35 struct xenbus_watch hotplug_status_watch; 36 u8 have_hotplug_status_watch:1; 37 }; 38 39 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); 40 static void connect(struct backend_info *be); 41 static int read_xenbus_vif_flags(struct backend_info *be); 42 static void backend_create_xenvif(struct backend_info *be); 43 static void unregister_hotplug_status_watch(struct backend_info *be); 44 static void set_backend_state(struct backend_info *be, 45 enum xenbus_state state); 46 47 #ifdef CONFIG_DEBUG_FS 48 struct dentry *xen_netback_dbg_root = NULL; 49 50 static int xenvif_read_io_ring(struct seq_file *m, void *v) 51 { 52 struct xenvif_queue *queue = m->private; 53 struct xen_netif_tx_back_ring *tx_ring = &queue->tx; 54 struct xen_netif_rx_back_ring *rx_ring = &queue->rx; 55 56 if (tx_ring->sring) { 57 struct xen_netif_tx_sring *sring = tx_ring->sring; 58 59 seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id, 60 tx_ring->nr_ents); 61 seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n", 62 sring->req_prod, 63 sring->req_prod - sring->rsp_prod, 64 tx_ring->req_cons, 65 tx_ring->req_cons - sring->rsp_prod, 66 sring->req_event, 67 sring->req_event - sring->rsp_prod); 68 seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n", 69 sring->rsp_prod, 70 tx_ring->rsp_prod_pvt, 71 tx_ring->rsp_prod_pvt - sring->rsp_prod, 72 sring->rsp_event, 73 sring->rsp_event - sring->rsp_prod); 74 seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n", 75 queue->pending_prod, 76 queue->pending_cons, 77 nr_pending_reqs(queue)); 78 seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n", 79 queue->dealloc_prod, 80 queue->dealloc_cons, 81 queue->dealloc_prod - queue->dealloc_cons); 82 } 83 84 if (rx_ring->sring) { 85 struct xen_netif_rx_sring *sring = rx_ring->sring; 86 87 seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents); 88 seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n", 89 sring->req_prod, 90 sring->req_prod - sring->rsp_prod, 91 rx_ring->req_cons, 92 rx_ring->req_cons - sring->rsp_prod, 93 sring->req_event, 94 sring->req_event - sring->rsp_prod); 95 seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n", 96 sring->rsp_prod, 97 rx_ring->rsp_prod_pvt, 98 rx_ring->rsp_prod_pvt - sring->rsp_prod, 99 sring->rsp_event, 100 sring->rsp_event - sring->rsp_prod); 101 } 102 103 seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n" 104 "Credit timer_pending: %d, credit: %lu, usec: %lu\n" 105 "remaining: %lu, expires: %lu, now: %lu\n", 106 queue->napi.state, queue->napi.weight, 107 skb_queue_len(&queue->tx_queue), 108 timer_pending(&queue->credit_timeout), 109 queue->credit_bytes, 110 queue->credit_usec, 111 queue->remaining_credit, 112 queue->credit_timeout.expires, 113 jiffies); 114 115 return 0; 116 } 117 118 #define XENVIF_KICK_STR "kick" 119 120 static ssize_t 121 xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count, 122 loff_t *ppos) 123 { 124 struct xenvif_queue *queue = 125 ((struct seq_file *)filp->private_data)->private; 126 int len; 127 char write[sizeof(XENVIF_KICK_STR)]; 128 129 /* don't allow partial writes and check the length */ 130 if (*ppos != 0) 131 return 0; 132 if (count < sizeof(XENVIF_KICK_STR) - 1) 133 return -ENOSPC; 134 135 len = simple_write_to_buffer(write, 136 sizeof(write), 137 ppos, 138 buf, 139 count); 140 if (len < 0) 141 return len; 142 143 if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1)) 144 xenvif_interrupt(0, (void *)queue); 145 else { 146 pr_warn("Unknown command to io_ring_q%d. Available: kick\n", 147 queue->id); 148 count = -EINVAL; 149 } 150 return count; 151 } 152 153 static int xenvif_dump_open(struct inode *inode, struct file *filp) 154 { 155 int ret; 156 void *queue = NULL; 157 158 if (inode->i_private) 159 queue = inode->i_private; 160 ret = single_open(filp, xenvif_read_io_ring, queue); 161 filp->f_mode |= FMODE_PWRITE; 162 return ret; 163 } 164 165 static const struct file_operations xenvif_dbg_io_ring_ops_fops = { 166 .owner = THIS_MODULE, 167 .open = xenvif_dump_open, 168 .read = seq_read, 169 .llseek = seq_lseek, 170 .release = single_release, 171 .write = xenvif_write_io_ring, 172 }; 173 174 static void xenvif_debugfs_addif(struct xenvif_queue *queue) 175 { 176 struct dentry *pfile; 177 struct xenvif *vif = queue->vif; 178 int i; 179 180 if (IS_ERR_OR_NULL(xen_netback_dbg_root)) 181 return; 182 183 vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name, 184 xen_netback_dbg_root); 185 if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) { 186 for (i = 0; i < vif->num_queues; ++i) { 187 char filename[sizeof("io_ring_q") + 4]; 188 189 snprintf(filename, sizeof(filename), "io_ring_q%d", i); 190 pfile = debugfs_create_file(filename, 191 S_IRUSR | S_IWUSR, 192 vif->xenvif_dbg_root, 193 &vif->queues[i], 194 &xenvif_dbg_io_ring_ops_fops); 195 if (IS_ERR_OR_NULL(pfile)) 196 pr_warn("Creation of io_ring file returned %ld!\n", 197 PTR_ERR(pfile)); 198 } 199 } else 200 netdev_warn(vif->dev, 201 "Creation of vif debugfs dir returned %ld!\n", 202 PTR_ERR(vif->xenvif_dbg_root)); 203 } 204 205 static void xenvif_debugfs_delif(struct xenvif *vif) 206 { 207 if (IS_ERR_OR_NULL(xen_netback_dbg_root)) 208 return; 209 210 if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) 211 debugfs_remove_recursive(vif->xenvif_dbg_root); 212 vif->xenvif_dbg_root = NULL; 213 } 214 #endif /* CONFIG_DEBUG_FS */ 215 216 static int netback_remove(struct xenbus_device *dev) 217 { 218 struct backend_info *be = dev_get_drvdata(&dev->dev); 219 220 set_backend_state(be, XenbusStateClosed); 221 222 unregister_hotplug_status_watch(be); 223 if (be->vif) { 224 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 225 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); 226 xenvif_free(be->vif); 227 be->vif = NULL; 228 } 229 kfree(be); 230 dev_set_drvdata(&dev->dev, NULL); 231 return 0; 232 } 233 234 235 /** 236 * Entry point to this code when a new device is created. Allocate the basic 237 * structures and switch to InitWait. 238 */ 239 static int netback_probe(struct xenbus_device *dev, 240 const struct xenbus_device_id *id) 241 { 242 const char *message; 243 struct xenbus_transaction xbt; 244 int err; 245 int sg; 246 struct backend_info *be = kzalloc(sizeof(struct backend_info), 247 GFP_KERNEL); 248 if (!be) { 249 xenbus_dev_fatal(dev, -ENOMEM, 250 "allocating backend structure"); 251 return -ENOMEM; 252 } 253 254 be->dev = dev; 255 dev_set_drvdata(&dev->dev, be); 256 257 sg = 1; 258 259 do { 260 err = xenbus_transaction_start(&xbt); 261 if (err) { 262 xenbus_dev_fatal(dev, err, "starting transaction"); 263 goto fail; 264 } 265 266 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg); 267 if (err) { 268 message = "writing feature-sg"; 269 goto abort_transaction; 270 } 271 272 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", 273 "%d", sg); 274 if (err) { 275 message = "writing feature-gso-tcpv4"; 276 goto abort_transaction; 277 } 278 279 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6", 280 "%d", sg); 281 if (err) { 282 message = "writing feature-gso-tcpv6"; 283 goto abort_transaction; 284 } 285 286 /* We support partial checksum setup for IPv6 packets */ 287 err = xenbus_printf(xbt, dev->nodename, 288 "feature-ipv6-csum-offload", 289 "%d", 1); 290 if (err) { 291 message = "writing feature-ipv6-csum-offload"; 292 goto abort_transaction; 293 } 294 295 /* We support rx-copy path. */ 296 err = xenbus_printf(xbt, dev->nodename, 297 "feature-rx-copy", "%d", 1); 298 if (err) { 299 message = "writing feature-rx-copy"; 300 goto abort_transaction; 301 } 302 303 /* 304 * We don't support rx-flip path (except old guests who don't 305 * grok this feature flag). 306 */ 307 err = xenbus_printf(xbt, dev->nodename, 308 "feature-rx-flip", "%d", 0); 309 if (err) { 310 message = "writing feature-rx-flip"; 311 goto abort_transaction; 312 } 313 314 err = xenbus_transaction_end(xbt, 0); 315 } while (err == -EAGAIN); 316 317 if (err) { 318 xenbus_dev_fatal(dev, err, "completing transaction"); 319 goto fail; 320 } 321 322 /* 323 * Split event channels support, this is optional so it is not 324 * put inside the above loop. 325 */ 326 err = xenbus_printf(XBT_NIL, dev->nodename, 327 "feature-split-event-channels", 328 "%u", separate_tx_rx_irq); 329 if (err) 330 pr_debug("Error writing feature-split-event-channels\n"); 331 332 /* Multi-queue support: This is an optional feature. */ 333 err = xenbus_printf(XBT_NIL, dev->nodename, 334 "multi-queue-max-queues", "%u", xenvif_max_queues); 335 if (err) 336 pr_debug("Error writing multi-queue-max-queues\n"); 337 338 err = xenbus_switch_state(dev, XenbusStateInitWait); 339 if (err) 340 goto fail; 341 342 be->state = XenbusStateInitWait; 343 344 /* This kicks hotplug scripts, so do it immediately. */ 345 backend_create_xenvif(be); 346 347 return 0; 348 349 abort_transaction: 350 xenbus_transaction_end(xbt, 1); 351 xenbus_dev_fatal(dev, err, "%s", message); 352 fail: 353 pr_debug("failed\n"); 354 netback_remove(dev); 355 return err; 356 } 357 358 359 /* 360 * Handle the creation of the hotplug script environment. We add the script 361 * and vif variables to the environment, for the benefit of the vif-* hotplug 362 * scripts. 363 */ 364 static int netback_uevent(struct xenbus_device *xdev, 365 struct kobj_uevent_env *env) 366 { 367 struct backend_info *be = dev_get_drvdata(&xdev->dev); 368 char *val; 369 370 val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL); 371 if (IS_ERR(val)) { 372 int err = PTR_ERR(val); 373 xenbus_dev_fatal(xdev, err, "reading script"); 374 return err; 375 } else { 376 if (add_uevent_var(env, "script=%s", val)) { 377 kfree(val); 378 return -ENOMEM; 379 } 380 kfree(val); 381 } 382 383 if (!be || !be->vif) 384 return 0; 385 386 return add_uevent_var(env, "vif=%s", be->vif->dev->name); 387 } 388 389 390 static void backend_create_xenvif(struct backend_info *be) 391 { 392 int err; 393 long handle; 394 struct xenbus_device *dev = be->dev; 395 396 if (be->vif != NULL) 397 return; 398 399 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle); 400 if (err != 1) { 401 xenbus_dev_fatal(dev, err, "reading handle"); 402 return; 403 } 404 405 be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); 406 if (IS_ERR(be->vif)) { 407 err = PTR_ERR(be->vif); 408 be->vif = NULL; 409 xenbus_dev_fatal(dev, err, "creating interface"); 410 return; 411 } 412 413 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); 414 } 415 416 static void backend_disconnect(struct backend_info *be) 417 { 418 if (be->vif) { 419 #ifdef CONFIG_DEBUG_FS 420 xenvif_debugfs_delif(be->vif); 421 #endif /* CONFIG_DEBUG_FS */ 422 xenvif_disconnect(be->vif); 423 } 424 } 425 426 static void backend_connect(struct backend_info *be) 427 { 428 if (be->vif) 429 connect(be); 430 } 431 432 static inline void backend_switch_state(struct backend_info *be, 433 enum xenbus_state state) 434 { 435 struct xenbus_device *dev = be->dev; 436 437 pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state)); 438 be->state = state; 439 440 /* If we are waiting for a hotplug script then defer the 441 * actual xenbus state change. 442 */ 443 if (!be->have_hotplug_status_watch) 444 xenbus_switch_state(dev, state); 445 } 446 447 /* Handle backend state transitions: 448 * 449 * The backend state starts in InitWait and the following transitions are 450 * allowed. 451 * 452 * InitWait -> Connected 453 * 454 * ^ \ | 455 * | \ | 456 * | \ | 457 * | \ | 458 * | \ | 459 * | \ | 460 * | V V 461 * 462 * Closed <-> Closing 463 * 464 * The state argument specifies the eventual state of the backend and the 465 * function transitions to that state via the shortest path. 466 */ 467 static void set_backend_state(struct backend_info *be, 468 enum xenbus_state state) 469 { 470 while (be->state != state) { 471 switch (be->state) { 472 case XenbusStateClosed: 473 switch (state) { 474 case XenbusStateInitWait: 475 case XenbusStateConnected: 476 pr_info("%s: prepare for reconnect\n", 477 be->dev->nodename); 478 backend_switch_state(be, XenbusStateInitWait); 479 break; 480 case XenbusStateClosing: 481 backend_switch_state(be, XenbusStateClosing); 482 break; 483 default: 484 BUG(); 485 } 486 break; 487 case XenbusStateInitWait: 488 switch (state) { 489 case XenbusStateConnected: 490 backend_connect(be); 491 backend_switch_state(be, XenbusStateConnected); 492 break; 493 case XenbusStateClosing: 494 case XenbusStateClosed: 495 backend_switch_state(be, XenbusStateClosing); 496 break; 497 default: 498 BUG(); 499 } 500 break; 501 case XenbusStateConnected: 502 switch (state) { 503 case XenbusStateInitWait: 504 case XenbusStateClosing: 505 case XenbusStateClosed: 506 backend_disconnect(be); 507 backend_switch_state(be, XenbusStateClosing); 508 break; 509 default: 510 BUG(); 511 } 512 break; 513 case XenbusStateClosing: 514 switch (state) { 515 case XenbusStateInitWait: 516 case XenbusStateConnected: 517 case XenbusStateClosed: 518 backend_switch_state(be, XenbusStateClosed); 519 break; 520 default: 521 BUG(); 522 } 523 break; 524 default: 525 BUG(); 526 } 527 } 528 } 529 530 /** 531 * Callback received when the frontend's state changes. 532 */ 533 static void frontend_changed(struct xenbus_device *dev, 534 enum xenbus_state frontend_state) 535 { 536 struct backend_info *be = dev_get_drvdata(&dev->dev); 537 538 pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state)); 539 540 be->frontend_state = frontend_state; 541 542 switch (frontend_state) { 543 case XenbusStateInitialising: 544 set_backend_state(be, XenbusStateInitWait); 545 break; 546 547 case XenbusStateInitialised: 548 break; 549 550 case XenbusStateConnected: 551 set_backend_state(be, XenbusStateConnected); 552 break; 553 554 case XenbusStateClosing: 555 set_backend_state(be, XenbusStateClosing); 556 break; 557 558 case XenbusStateClosed: 559 set_backend_state(be, XenbusStateClosed); 560 if (xenbus_dev_is_online(dev)) 561 break; 562 /* fall through if not online */ 563 case XenbusStateUnknown: 564 set_backend_state(be, XenbusStateClosed); 565 device_unregister(&dev->dev); 566 break; 567 568 default: 569 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend", 570 frontend_state); 571 break; 572 } 573 } 574 575 576 static void xen_net_read_rate(struct xenbus_device *dev, 577 unsigned long *bytes, unsigned long *usec) 578 { 579 char *s, *e; 580 unsigned long b, u; 581 char *ratestr; 582 583 /* Default to unlimited bandwidth. */ 584 *bytes = ~0UL; 585 *usec = 0; 586 587 ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL); 588 if (IS_ERR(ratestr)) 589 return; 590 591 s = ratestr; 592 b = simple_strtoul(s, &e, 10); 593 if ((s == e) || (*e != ',')) 594 goto fail; 595 596 s = e + 1; 597 u = simple_strtoul(s, &e, 10); 598 if ((s == e) || (*e != '\0')) 599 goto fail; 600 601 *bytes = b; 602 *usec = u; 603 604 kfree(ratestr); 605 return; 606 607 fail: 608 pr_warn("Failed to parse network rate limit. Traffic unlimited.\n"); 609 kfree(ratestr); 610 } 611 612 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 613 { 614 char *s, *e, *macstr; 615 int i; 616 617 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 618 if (IS_ERR(macstr)) 619 return PTR_ERR(macstr); 620 621 for (i = 0; i < ETH_ALEN; i++) { 622 mac[i] = simple_strtoul(s, &e, 16); 623 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 624 kfree(macstr); 625 return -ENOENT; 626 } 627 s = e+1; 628 } 629 630 kfree(macstr); 631 return 0; 632 } 633 634 static void unregister_hotplug_status_watch(struct backend_info *be) 635 { 636 if (be->have_hotplug_status_watch) { 637 unregister_xenbus_watch(&be->hotplug_status_watch); 638 kfree(be->hotplug_status_watch.node); 639 } 640 be->have_hotplug_status_watch = 0; 641 } 642 643 static void hotplug_status_changed(struct xenbus_watch *watch, 644 const char **vec, 645 unsigned int vec_size) 646 { 647 struct backend_info *be = container_of(watch, 648 struct backend_info, 649 hotplug_status_watch); 650 char *str; 651 unsigned int len; 652 653 str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len); 654 if (IS_ERR(str)) 655 return; 656 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { 657 /* Complete any pending state change */ 658 xenbus_switch_state(be->dev, be->state); 659 660 /* Not interested in this watch anymore. */ 661 unregister_hotplug_status_watch(be); 662 } 663 kfree(str); 664 } 665 666 static void connect(struct backend_info *be) 667 { 668 int err; 669 struct xenbus_device *dev = be->dev; 670 unsigned long credit_bytes, credit_usec; 671 unsigned int queue_index; 672 unsigned int requested_num_queues; 673 struct xenvif_queue *queue; 674 675 /* Check whether the frontend requested multiple queues 676 * and read the number requested. 677 */ 678 err = xenbus_scanf(XBT_NIL, dev->otherend, 679 "multi-queue-num-queues", 680 "%u", &requested_num_queues); 681 if (err < 0) { 682 requested_num_queues = 1; /* Fall back to single queue */ 683 } else if (requested_num_queues > xenvif_max_queues) { 684 /* buggy or malicious guest */ 685 xenbus_dev_fatal(dev, err, 686 "guest requested %u queues, exceeding the maximum of %u.", 687 requested_num_queues, xenvif_max_queues); 688 return; 689 } 690 691 err = xen_net_read_mac(dev, be->vif->fe_dev_addr); 692 if (err) { 693 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 694 return; 695 } 696 697 xen_net_read_rate(dev, &credit_bytes, &credit_usec); 698 read_xenbus_vif_flags(be); 699 700 /* Use the number of queues requested by the frontend */ 701 be->vif->queues = vzalloc(requested_num_queues * 702 sizeof(struct xenvif_queue)); 703 be->vif->num_queues = requested_num_queues; 704 705 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { 706 queue = &be->vif->queues[queue_index]; 707 queue->vif = be->vif; 708 queue->id = queue_index; 709 snprintf(queue->name, sizeof(queue->name), "%s-q%u", 710 be->vif->dev->name, queue->id); 711 712 err = xenvif_init_queue(queue); 713 if (err) { 714 /* xenvif_init_queue() cleans up after itself on 715 * failure, but we need to clean up any previously 716 * initialised queues. Set num_queues to i so that 717 * earlier queues can be destroyed using the regular 718 * disconnect logic. 719 */ 720 be->vif->num_queues = queue_index; 721 goto err; 722 } 723 724 queue->remaining_credit = credit_bytes; 725 726 err = connect_rings(be, queue); 727 if (err) { 728 /* connect_rings() cleans up after itself on failure, 729 * but we need to clean up after xenvif_init_queue() here, 730 * and also clean up any previously initialised queues. 731 */ 732 xenvif_deinit_queue(queue); 733 be->vif->num_queues = queue_index; 734 goto err; 735 } 736 #ifdef CONFIG_DEBUG_FS 737 xenvif_debugfs_addif(queue); 738 #endif /* CONFIG_DEBUG_FS */ 739 } 740 741 /* Initialisation completed, tell core driver the number of 742 * active queues. 743 */ 744 rtnl_lock(); 745 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues); 746 netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues); 747 rtnl_unlock(); 748 749 xenvif_carrier_on(be->vif); 750 751 unregister_hotplug_status_watch(be); 752 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, 753 hotplug_status_changed, 754 "%s/%s", dev->nodename, "hotplug-status"); 755 if (!err) 756 be->have_hotplug_status_watch = 1; 757 758 netif_tx_wake_all_queues(be->vif->dev); 759 760 return; 761 762 err: 763 if (be->vif->num_queues > 0) 764 xenvif_disconnect(be->vif); /* Clean up existing queues */ 765 vfree(be->vif->queues); 766 be->vif->queues = NULL; 767 be->vif->num_queues = 0; 768 return; 769 } 770 771 772 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) 773 { 774 struct xenbus_device *dev = be->dev; 775 unsigned int num_queues = queue->vif->num_queues; 776 unsigned long tx_ring_ref, rx_ring_ref; 777 unsigned int tx_evtchn, rx_evtchn; 778 int err; 779 char *xspath; 780 size_t xspathsize; 781 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ 782 783 /* If the frontend requested 1 queue, or we have fallen back 784 * to single queue due to lack of frontend support for multi- 785 * queue, expect the remaining XenStore keys in the toplevel 786 * directory. Otherwise, expect them in a subdirectory called 787 * queue-N. 788 */ 789 if (num_queues == 1) { 790 xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL); 791 if (!xspath) { 792 xenbus_dev_fatal(dev, -ENOMEM, 793 "reading ring references"); 794 return -ENOMEM; 795 } 796 strcpy(xspath, dev->otherend); 797 } else { 798 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size; 799 xspath = kzalloc(xspathsize, GFP_KERNEL); 800 if (!xspath) { 801 xenbus_dev_fatal(dev, -ENOMEM, 802 "reading ring references"); 803 return -ENOMEM; 804 } 805 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, 806 queue->id); 807 } 808 809 err = xenbus_gather(XBT_NIL, xspath, 810 "tx-ring-ref", "%lu", &tx_ring_ref, 811 "rx-ring-ref", "%lu", &rx_ring_ref, NULL); 812 if (err) { 813 xenbus_dev_fatal(dev, err, 814 "reading %s/ring-ref", 815 xspath); 816 goto err; 817 } 818 819 /* Try split event channels first, then single event channel. */ 820 err = xenbus_gather(XBT_NIL, xspath, 821 "event-channel-tx", "%u", &tx_evtchn, 822 "event-channel-rx", "%u", &rx_evtchn, NULL); 823 if (err < 0) { 824 err = xenbus_scanf(XBT_NIL, xspath, 825 "event-channel", "%u", &tx_evtchn); 826 if (err < 0) { 827 xenbus_dev_fatal(dev, err, 828 "reading %s/event-channel(-tx/rx)", 829 xspath); 830 goto err; 831 } 832 rx_evtchn = tx_evtchn; 833 } 834 835 /* Map the shared frame, irq etc. */ 836 err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref, 837 tx_evtchn, rx_evtchn); 838 if (err) { 839 xenbus_dev_fatal(dev, err, 840 "mapping shared-frames %lu/%lu port tx %u rx %u", 841 tx_ring_ref, rx_ring_ref, 842 tx_evtchn, rx_evtchn); 843 goto err; 844 } 845 846 err = 0; 847 err: /* Regular return falls through with err == 0 */ 848 kfree(xspath); 849 return err; 850 } 851 852 static int read_xenbus_vif_flags(struct backend_info *be) 853 { 854 struct xenvif *vif = be->vif; 855 struct xenbus_device *dev = be->dev; 856 unsigned int rx_copy; 857 int err, val; 858 859 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", 860 &rx_copy); 861 if (err == -ENOENT) { 862 err = 0; 863 rx_copy = 0; 864 } 865 if (err < 0) { 866 xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", 867 dev->otherend); 868 return err; 869 } 870 if (!rx_copy) 871 return -EOPNOTSUPP; 872 873 if (vif->dev->tx_queue_len != 0) { 874 if (xenbus_scanf(XBT_NIL, dev->otherend, 875 "feature-rx-notify", "%d", &val) < 0) 876 val = 0; 877 if (val) 878 vif->can_queue = 1; 879 else 880 /* Must be non-zero for pfifo_fast to work. */ 881 vif->dev->tx_queue_len = 1; 882 } 883 884 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", 885 "%d", &val) < 0) 886 val = 0; 887 vif->can_sg = !!val; 888 889 vif->gso_mask = 0; 890 vif->gso_prefix_mask = 0; 891 892 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", 893 "%d", &val) < 0) 894 val = 0; 895 if (val) 896 vif->gso_mask |= GSO_BIT(TCPV4); 897 898 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", 899 "%d", &val) < 0) 900 val = 0; 901 if (val) 902 vif->gso_prefix_mask |= GSO_BIT(TCPV4); 903 904 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", 905 "%d", &val) < 0) 906 val = 0; 907 if (val) 908 vif->gso_mask |= GSO_BIT(TCPV6); 909 910 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix", 911 "%d", &val) < 0) 912 val = 0; 913 if (val) 914 vif->gso_prefix_mask |= GSO_BIT(TCPV6); 915 916 if (vif->gso_mask & vif->gso_prefix_mask) { 917 xenbus_dev_fatal(dev, err, 918 "%s: gso and gso prefix flags are not " 919 "mutually exclusive", 920 dev->otherend); 921 return -EOPNOTSUPP; 922 } 923 924 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", 925 "%d", &val) < 0) 926 val = 0; 927 vif->ip_csum = !val; 928 929 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload", 930 "%d", &val) < 0) 931 val = 0; 932 vif->ipv6_csum = !!val; 933 934 return 0; 935 } 936 937 938 /* ** Driver Registration ** */ 939 940 941 static const struct xenbus_device_id netback_ids[] = { 942 { "vif" }, 943 { "" } 944 }; 945 946 947 static DEFINE_XENBUS_DRIVER(netback, , 948 .probe = netback_probe, 949 .remove = netback_remove, 950 .uevent = netback_uevent, 951 .otherend_changed = frontend_changed, 952 ); 953 954 int xenvif_xenbus_init(void) 955 { 956 return xenbus_register_backend(&netback_driver); 957 } 958 959 void xenvif_xenbus_fini(void) 960 { 961 return xenbus_unregister_driver(&netback_driver); 962 } 963