1 /* 2 * Xenbus code for netif backend 3 * 4 * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au> 5 * Copyright (C) 2005 XenSource Ltd 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "common.h" 22 #include <linux/vmalloc.h> 23 #include <linux/rtnetlink.h> 24 25 struct backend_info { 26 struct xenbus_device *dev; 27 struct xenvif *vif; 28 29 /* This is the state that will be reflected in xenstore when any 30 * active hotplug script completes. 31 */ 32 enum xenbus_state state; 33 34 enum xenbus_state frontend_state; 35 struct xenbus_watch hotplug_status_watch; 36 u8 have_hotplug_status_watch:1; 37 }; 38 39 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); 40 static void connect(struct backend_info *be); 41 static int read_xenbus_vif_flags(struct backend_info *be); 42 static void backend_create_xenvif(struct backend_info *be); 43 static void unregister_hotplug_status_watch(struct backend_info *be); 44 static void set_backend_state(struct backend_info *be, 45 enum xenbus_state state); 46 47 static int netback_remove(struct xenbus_device *dev) 48 { 49 struct backend_info *be = dev_get_drvdata(&dev->dev); 50 51 set_backend_state(be, XenbusStateClosed); 52 53 unregister_hotplug_status_watch(be); 54 if (be->vif) { 55 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 56 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); 57 xenvif_free(be->vif); 58 be->vif = NULL; 59 } 60 kfree(be); 61 dev_set_drvdata(&dev->dev, NULL); 62 return 0; 63 } 64 65 66 /** 67 * Entry point to this code when a new device is created. Allocate the basic 68 * structures and switch to InitWait. 69 */ 70 static int netback_probe(struct xenbus_device *dev, 71 const struct xenbus_device_id *id) 72 { 73 const char *message; 74 struct xenbus_transaction xbt; 75 int err; 76 int sg; 77 struct backend_info *be = kzalloc(sizeof(struct backend_info), 78 GFP_KERNEL); 79 if (!be) { 80 xenbus_dev_fatal(dev, -ENOMEM, 81 "allocating backend structure"); 82 return -ENOMEM; 83 } 84 85 be->dev = dev; 86 dev_set_drvdata(&dev->dev, be); 87 88 sg = 1; 89 90 do { 91 err = xenbus_transaction_start(&xbt); 92 if (err) { 93 xenbus_dev_fatal(dev, err, "starting transaction"); 94 goto fail; 95 } 96 97 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg); 98 if (err) { 99 message = "writing feature-sg"; 100 goto abort_transaction; 101 } 102 103 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", 104 "%d", sg); 105 if (err) { 106 message = "writing feature-gso-tcpv4"; 107 goto abort_transaction; 108 } 109 110 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6", 111 "%d", sg); 112 if (err) { 113 message = "writing feature-gso-tcpv6"; 114 goto abort_transaction; 115 } 116 117 /* We support partial checksum setup for IPv6 packets */ 118 err = xenbus_printf(xbt, dev->nodename, 119 "feature-ipv6-csum-offload", 120 "%d", 1); 121 if (err) { 122 message = "writing feature-ipv6-csum-offload"; 123 goto abort_transaction; 124 } 125 126 /* We support rx-copy path. */ 127 err = xenbus_printf(xbt, dev->nodename, 128 "feature-rx-copy", "%d", 1); 129 if (err) { 130 message = "writing feature-rx-copy"; 131 goto abort_transaction; 132 } 133 134 /* 135 * We don't support rx-flip path (except old guests who don't 136 * grok this feature flag). 137 */ 138 err = xenbus_printf(xbt, dev->nodename, 139 "feature-rx-flip", "%d", 0); 140 if (err) { 141 message = "writing feature-rx-flip"; 142 goto abort_transaction; 143 } 144 145 err = xenbus_transaction_end(xbt, 0); 146 } while (err == -EAGAIN); 147 148 if (err) { 149 xenbus_dev_fatal(dev, err, "completing transaction"); 150 goto fail; 151 } 152 153 /* 154 * Split event channels support, this is optional so it is not 155 * put inside the above loop. 156 */ 157 err = xenbus_printf(XBT_NIL, dev->nodename, 158 "feature-split-event-channels", 159 "%u", separate_tx_rx_irq); 160 if (err) 161 pr_debug("Error writing feature-split-event-channels\n"); 162 163 /* Multi-queue support: This is an optional feature. */ 164 err = xenbus_printf(XBT_NIL, dev->nodename, 165 "multi-queue-max-queues", "%u", xenvif_max_queues); 166 if (err) 167 pr_debug("Error writing multi-queue-max-queues\n"); 168 169 err = xenbus_switch_state(dev, XenbusStateInitWait); 170 if (err) 171 goto fail; 172 173 be->state = XenbusStateInitWait; 174 175 /* This kicks hotplug scripts, so do it immediately. */ 176 backend_create_xenvif(be); 177 178 return 0; 179 180 abort_transaction: 181 xenbus_transaction_end(xbt, 1); 182 xenbus_dev_fatal(dev, err, "%s", message); 183 fail: 184 pr_debug("failed\n"); 185 netback_remove(dev); 186 return err; 187 } 188 189 190 /* 191 * Handle the creation of the hotplug script environment. We add the script 192 * and vif variables to the environment, for the benefit of the vif-* hotplug 193 * scripts. 194 */ 195 static int netback_uevent(struct xenbus_device *xdev, 196 struct kobj_uevent_env *env) 197 { 198 struct backend_info *be = dev_get_drvdata(&xdev->dev); 199 char *val; 200 201 val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL); 202 if (IS_ERR(val)) { 203 int err = PTR_ERR(val); 204 xenbus_dev_fatal(xdev, err, "reading script"); 205 return err; 206 } else { 207 if (add_uevent_var(env, "script=%s", val)) { 208 kfree(val); 209 return -ENOMEM; 210 } 211 kfree(val); 212 } 213 214 if (!be || !be->vif) 215 return 0; 216 217 return add_uevent_var(env, "vif=%s", be->vif->dev->name); 218 } 219 220 221 static void backend_create_xenvif(struct backend_info *be) 222 { 223 int err; 224 long handle; 225 struct xenbus_device *dev = be->dev; 226 227 if (be->vif != NULL) 228 return; 229 230 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle); 231 if (err != 1) { 232 xenbus_dev_fatal(dev, err, "reading handle"); 233 return; 234 } 235 236 be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); 237 if (IS_ERR(be->vif)) { 238 err = PTR_ERR(be->vif); 239 be->vif = NULL; 240 xenbus_dev_fatal(dev, err, "creating interface"); 241 return; 242 } 243 244 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); 245 } 246 247 static void backend_disconnect(struct backend_info *be) 248 { 249 if (be->vif) 250 xenvif_disconnect(be->vif); 251 } 252 253 static void backend_connect(struct backend_info *be) 254 { 255 if (be->vif) 256 connect(be); 257 } 258 259 static inline void backend_switch_state(struct backend_info *be, 260 enum xenbus_state state) 261 { 262 struct xenbus_device *dev = be->dev; 263 264 pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state)); 265 be->state = state; 266 267 /* If we are waiting for a hotplug script then defer the 268 * actual xenbus state change. 269 */ 270 if (!be->have_hotplug_status_watch) 271 xenbus_switch_state(dev, state); 272 } 273 274 /* Handle backend state transitions: 275 * 276 * The backend state starts in InitWait and the following transitions are 277 * allowed. 278 * 279 * InitWait -> Connected 280 * 281 * ^ \ | 282 * | \ | 283 * | \ | 284 * | \ | 285 * | \ | 286 * | \ | 287 * | V V 288 * 289 * Closed <-> Closing 290 * 291 * The state argument specifies the eventual state of the backend and the 292 * function transitions to that state via the shortest path. 293 */ 294 static void set_backend_state(struct backend_info *be, 295 enum xenbus_state state) 296 { 297 while (be->state != state) { 298 switch (be->state) { 299 case XenbusStateClosed: 300 switch (state) { 301 case XenbusStateInitWait: 302 case XenbusStateConnected: 303 pr_info("%s: prepare for reconnect\n", 304 be->dev->nodename); 305 backend_switch_state(be, XenbusStateInitWait); 306 break; 307 case XenbusStateClosing: 308 backend_switch_state(be, XenbusStateClosing); 309 break; 310 default: 311 BUG(); 312 } 313 break; 314 case XenbusStateInitWait: 315 switch (state) { 316 case XenbusStateConnected: 317 backend_connect(be); 318 backend_switch_state(be, XenbusStateConnected); 319 break; 320 case XenbusStateClosing: 321 case XenbusStateClosed: 322 backend_switch_state(be, XenbusStateClosing); 323 break; 324 default: 325 BUG(); 326 } 327 break; 328 case XenbusStateConnected: 329 switch (state) { 330 case XenbusStateInitWait: 331 case XenbusStateClosing: 332 case XenbusStateClosed: 333 backend_disconnect(be); 334 backend_switch_state(be, XenbusStateClosing); 335 break; 336 default: 337 BUG(); 338 } 339 break; 340 case XenbusStateClosing: 341 switch (state) { 342 case XenbusStateInitWait: 343 case XenbusStateConnected: 344 case XenbusStateClosed: 345 backend_switch_state(be, XenbusStateClosed); 346 break; 347 default: 348 BUG(); 349 } 350 break; 351 default: 352 BUG(); 353 } 354 } 355 } 356 357 /** 358 * Callback received when the frontend's state changes. 359 */ 360 static void frontend_changed(struct xenbus_device *dev, 361 enum xenbus_state frontend_state) 362 { 363 struct backend_info *be = dev_get_drvdata(&dev->dev); 364 365 pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state)); 366 367 be->frontend_state = frontend_state; 368 369 switch (frontend_state) { 370 case XenbusStateInitialising: 371 set_backend_state(be, XenbusStateInitWait); 372 break; 373 374 case XenbusStateInitialised: 375 break; 376 377 case XenbusStateConnected: 378 set_backend_state(be, XenbusStateConnected); 379 break; 380 381 case XenbusStateClosing: 382 set_backend_state(be, XenbusStateClosing); 383 break; 384 385 case XenbusStateClosed: 386 set_backend_state(be, XenbusStateClosed); 387 if (xenbus_dev_is_online(dev)) 388 break; 389 /* fall through if not online */ 390 case XenbusStateUnknown: 391 set_backend_state(be, XenbusStateClosed); 392 device_unregister(&dev->dev); 393 break; 394 395 default: 396 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend", 397 frontend_state); 398 break; 399 } 400 } 401 402 403 static void xen_net_read_rate(struct xenbus_device *dev, 404 unsigned long *bytes, unsigned long *usec) 405 { 406 char *s, *e; 407 unsigned long b, u; 408 char *ratestr; 409 410 /* Default to unlimited bandwidth. */ 411 *bytes = ~0UL; 412 *usec = 0; 413 414 ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL); 415 if (IS_ERR(ratestr)) 416 return; 417 418 s = ratestr; 419 b = simple_strtoul(s, &e, 10); 420 if ((s == e) || (*e != ',')) 421 goto fail; 422 423 s = e + 1; 424 u = simple_strtoul(s, &e, 10); 425 if ((s == e) || (*e != '\0')) 426 goto fail; 427 428 *bytes = b; 429 *usec = u; 430 431 kfree(ratestr); 432 return; 433 434 fail: 435 pr_warn("Failed to parse network rate limit. Traffic unlimited.\n"); 436 kfree(ratestr); 437 } 438 439 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 440 { 441 char *s, *e, *macstr; 442 int i; 443 444 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 445 if (IS_ERR(macstr)) 446 return PTR_ERR(macstr); 447 448 for (i = 0; i < ETH_ALEN; i++) { 449 mac[i] = simple_strtoul(s, &e, 16); 450 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 451 kfree(macstr); 452 return -ENOENT; 453 } 454 s = e+1; 455 } 456 457 kfree(macstr); 458 return 0; 459 } 460 461 static void unregister_hotplug_status_watch(struct backend_info *be) 462 { 463 if (be->have_hotplug_status_watch) { 464 unregister_xenbus_watch(&be->hotplug_status_watch); 465 kfree(be->hotplug_status_watch.node); 466 } 467 be->have_hotplug_status_watch = 0; 468 } 469 470 static void hotplug_status_changed(struct xenbus_watch *watch, 471 const char **vec, 472 unsigned int vec_size) 473 { 474 struct backend_info *be = container_of(watch, 475 struct backend_info, 476 hotplug_status_watch); 477 char *str; 478 unsigned int len; 479 480 str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len); 481 if (IS_ERR(str)) 482 return; 483 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { 484 /* Complete any pending state change */ 485 xenbus_switch_state(be->dev, be->state); 486 487 /* Not interested in this watch anymore. */ 488 unregister_hotplug_status_watch(be); 489 } 490 kfree(str); 491 } 492 493 static void connect(struct backend_info *be) 494 { 495 int err; 496 struct xenbus_device *dev = be->dev; 497 unsigned long credit_bytes, credit_usec; 498 unsigned int queue_index; 499 unsigned int requested_num_queues; 500 struct xenvif_queue *queue; 501 502 /* Check whether the frontend requested multiple queues 503 * and read the number requested. 504 */ 505 err = xenbus_scanf(XBT_NIL, dev->otherend, 506 "multi-queue-num-queues", 507 "%u", &requested_num_queues); 508 if (err < 0) { 509 requested_num_queues = 1; /* Fall back to single queue */ 510 } else if (requested_num_queues > xenvif_max_queues) { 511 /* buggy or malicious guest */ 512 xenbus_dev_fatal(dev, err, 513 "guest requested %u queues, exceeding the maximum of %u.", 514 requested_num_queues, xenvif_max_queues); 515 return; 516 } 517 518 err = xen_net_read_mac(dev, be->vif->fe_dev_addr); 519 if (err) { 520 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 521 return; 522 } 523 524 xen_net_read_rate(dev, &credit_bytes, &credit_usec); 525 read_xenbus_vif_flags(be); 526 527 /* Use the number of queues requested by the frontend */ 528 be->vif->queues = vzalloc(requested_num_queues * 529 sizeof(struct xenvif_queue)); 530 be->vif->num_queues = requested_num_queues; 531 532 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { 533 queue = &be->vif->queues[queue_index]; 534 queue->vif = be->vif; 535 queue->id = queue_index; 536 snprintf(queue->name, sizeof(queue->name), "%s-q%u", 537 be->vif->dev->name, queue->id); 538 539 err = xenvif_init_queue(queue); 540 if (err) { 541 /* xenvif_init_queue() cleans up after itself on 542 * failure, but we need to clean up any previously 543 * initialised queues. Set num_queues to i so that 544 * earlier queues can be destroyed using the regular 545 * disconnect logic. 546 */ 547 be->vif->num_queues = queue_index; 548 goto err; 549 } 550 551 queue->remaining_credit = credit_bytes; 552 553 err = connect_rings(be, queue); 554 if (err) { 555 /* connect_rings() cleans up after itself on failure, 556 * but we need to clean up after xenvif_init_queue() here, 557 * and also clean up any previously initialised queues. 558 */ 559 xenvif_deinit_queue(queue); 560 be->vif->num_queues = queue_index; 561 goto err; 562 } 563 } 564 565 /* Initialisation completed, tell core driver the number of 566 * active queues. 567 */ 568 rtnl_lock(); 569 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues); 570 netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues); 571 rtnl_unlock(); 572 573 xenvif_carrier_on(be->vif); 574 575 unregister_hotplug_status_watch(be); 576 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, 577 hotplug_status_changed, 578 "%s/%s", dev->nodename, "hotplug-status"); 579 if (!err) 580 be->have_hotplug_status_watch = 1; 581 582 netif_tx_wake_all_queues(be->vif->dev); 583 584 return; 585 586 err: 587 if (be->vif->num_queues > 0) 588 xenvif_disconnect(be->vif); /* Clean up existing queues */ 589 vfree(be->vif->queues); 590 be->vif->queues = NULL; 591 be->vif->num_queues = 0; 592 return; 593 } 594 595 596 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) 597 { 598 struct xenbus_device *dev = be->dev; 599 unsigned int num_queues = queue->vif->num_queues; 600 unsigned long tx_ring_ref, rx_ring_ref; 601 unsigned int tx_evtchn, rx_evtchn; 602 int err; 603 char *xspath; 604 size_t xspathsize; 605 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ 606 607 /* If the frontend requested 1 queue, or we have fallen back 608 * to single queue due to lack of frontend support for multi- 609 * queue, expect the remaining XenStore keys in the toplevel 610 * directory. Otherwise, expect them in a subdirectory called 611 * queue-N. 612 */ 613 if (num_queues == 1) { 614 xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL); 615 if (!xspath) { 616 xenbus_dev_fatal(dev, -ENOMEM, 617 "reading ring references"); 618 return -ENOMEM; 619 } 620 strcpy(xspath, dev->otherend); 621 } else { 622 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size; 623 xspath = kzalloc(xspathsize, GFP_KERNEL); 624 if (!xspath) { 625 xenbus_dev_fatal(dev, -ENOMEM, 626 "reading ring references"); 627 return -ENOMEM; 628 } 629 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, 630 queue->id); 631 } 632 633 err = xenbus_gather(XBT_NIL, xspath, 634 "tx-ring-ref", "%lu", &tx_ring_ref, 635 "rx-ring-ref", "%lu", &rx_ring_ref, NULL); 636 if (err) { 637 xenbus_dev_fatal(dev, err, 638 "reading %s/ring-ref", 639 xspath); 640 goto err; 641 } 642 643 /* Try split event channels first, then single event channel. */ 644 err = xenbus_gather(XBT_NIL, xspath, 645 "event-channel-tx", "%u", &tx_evtchn, 646 "event-channel-rx", "%u", &rx_evtchn, NULL); 647 if (err < 0) { 648 err = xenbus_scanf(XBT_NIL, xspath, 649 "event-channel", "%u", &tx_evtchn); 650 if (err < 0) { 651 xenbus_dev_fatal(dev, err, 652 "reading %s/event-channel(-tx/rx)", 653 xspath); 654 goto err; 655 } 656 rx_evtchn = tx_evtchn; 657 } 658 659 /* Map the shared frame, irq etc. */ 660 err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref, 661 tx_evtchn, rx_evtchn); 662 if (err) { 663 xenbus_dev_fatal(dev, err, 664 "mapping shared-frames %lu/%lu port tx %u rx %u", 665 tx_ring_ref, rx_ring_ref, 666 tx_evtchn, rx_evtchn); 667 goto err; 668 } 669 670 err = 0; 671 err: /* Regular return falls through with err == 0 */ 672 kfree(xspath); 673 return err; 674 } 675 676 static int read_xenbus_vif_flags(struct backend_info *be) 677 { 678 struct xenvif *vif = be->vif; 679 struct xenbus_device *dev = be->dev; 680 unsigned int rx_copy; 681 int err, val; 682 683 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", 684 &rx_copy); 685 if (err == -ENOENT) { 686 err = 0; 687 rx_copy = 0; 688 } 689 if (err < 0) { 690 xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", 691 dev->otherend); 692 return err; 693 } 694 if (!rx_copy) 695 return -EOPNOTSUPP; 696 697 if (vif->dev->tx_queue_len != 0) { 698 if (xenbus_scanf(XBT_NIL, dev->otherend, 699 "feature-rx-notify", "%d", &val) < 0) 700 val = 0; 701 if (val) 702 vif->can_queue = 1; 703 else 704 /* Must be non-zero for pfifo_fast to work. */ 705 vif->dev->tx_queue_len = 1; 706 } 707 708 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", 709 "%d", &val) < 0) 710 val = 0; 711 vif->can_sg = !!val; 712 713 vif->gso_mask = 0; 714 vif->gso_prefix_mask = 0; 715 716 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", 717 "%d", &val) < 0) 718 val = 0; 719 if (val) 720 vif->gso_mask |= GSO_BIT(TCPV4); 721 722 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", 723 "%d", &val) < 0) 724 val = 0; 725 if (val) 726 vif->gso_prefix_mask |= GSO_BIT(TCPV4); 727 728 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", 729 "%d", &val) < 0) 730 val = 0; 731 if (val) 732 vif->gso_mask |= GSO_BIT(TCPV6); 733 734 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix", 735 "%d", &val) < 0) 736 val = 0; 737 if (val) 738 vif->gso_prefix_mask |= GSO_BIT(TCPV6); 739 740 if (vif->gso_mask & vif->gso_prefix_mask) { 741 xenbus_dev_fatal(dev, err, 742 "%s: gso and gso prefix flags are not " 743 "mutually exclusive", 744 dev->otherend); 745 return -EOPNOTSUPP; 746 } 747 748 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", 749 "%d", &val) < 0) 750 val = 0; 751 vif->ip_csum = !val; 752 753 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload", 754 "%d", &val) < 0) 755 val = 0; 756 vif->ipv6_csum = !!val; 757 758 return 0; 759 } 760 761 762 /* ** Driver Registration ** */ 763 764 765 static const struct xenbus_device_id netback_ids[] = { 766 { "vif" }, 767 { "" } 768 }; 769 770 771 static DEFINE_XENBUS_DRIVER(netback, , 772 .probe = netback_probe, 773 .remove = netback_remove, 774 .uevent = netback_uevent, 775 .otherend_changed = frontend_changed, 776 ); 777 778 int xenvif_xenbus_init(void) 779 { 780 return xenbus_register_backend(&netback_driver); 781 } 782 783 void xenvif_xenbus_fini(void) 784 { 785 return xenbus_unregister_driver(&netback_driver); 786 } 787