1 /* 2 * Network-device interface management. 3 * 4 * Copyright (c) 2004-2005, Keir Fraser 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License version 2 8 * as published by the Free Software Foundation; or, when distributed 9 * separately from the Linux kernel or incorporated into other 10 * software packages, subject to the following license: 11 * 12 * Permission is hereby granted, free of charge, to any person obtaining a copy 13 * of this source file (the "Software"), to deal in the Software without 14 * restriction, including without limitation the rights to use, copy, modify, 15 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 16 * and to permit persons to whom the Software is furnished to do so, subject to 17 * the following conditions: 18 * 19 * The above copyright notice and this permission notice shall be included in 20 * all copies or substantial portions of the Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 28 * IN THE SOFTWARE. 29 */ 30 31 #include "common.h" 32 33 #include <linux/kthread.h> 34 #include <linux/sched/task.h> 35 #include <linux/ethtool.h> 36 #include <linux/rtnetlink.h> 37 #include <linux/if_vlan.h> 38 #include <linux/vmalloc.h> 39 40 #include <xen/events.h> 41 #include <asm/xen/hypercall.h> 42 #include <xen/balloon.h> 43 44 #define XENVIF_QUEUE_LENGTH 32 45 46 /* Number of bytes allowed on the internal guest Rx queue. */ 47 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) 48 49 /* This function is used to set SKBFL_ZEROCOPY_ENABLE as well as 50 * increasing the inflight counter. We need to increase the inflight 51 * counter because core driver calls into xenvif_zerocopy_callback 52 * which calls xenvif_skb_zerocopy_complete. 53 */ 54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, 55 struct sk_buff *skb) 56 { 57 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_ENABLE; 58 atomic_inc(&queue->inflight_packets); 59 } 60 61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) 62 { 63 atomic_dec(&queue->inflight_packets); 64 65 /* Wake the dealloc thread _after_ decrementing inflight_packets so 66 * that if kthread_stop() has already been called, the dealloc thread 67 * does not wait forever with nothing to wake it. 68 */ 69 wake_up(&queue->dealloc_wq); 70 } 71 72 static int xenvif_schedulable(struct xenvif *vif) 73 { 74 return netif_running(vif->dev) && 75 test_bit(VIF_STATUS_CONNECTED, &vif->status) && 76 !vif->disabled; 77 } 78 79 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) 80 { 81 bool rc; 82 83 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); 84 if (rc) 85 napi_schedule(&queue->napi); 86 return rc; 87 } 88 89 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 90 { 91 struct xenvif_queue *queue = dev_id; 92 int old; 93 94 old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending); 95 WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n"); 96 97 if (!xenvif_handle_tx_interrupt(queue)) { 98 atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending); 99 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); 100 } 101 102 return IRQ_HANDLED; 103 } 104 105 static int xenvif_poll(struct napi_struct *napi, int budget) 106 { 107 struct xenvif_queue *queue = 108 container_of(napi, struct xenvif_queue, napi); 109 int work_done; 110 111 /* This vif is rogue, we pretend we've there is nothing to do 112 * for this vif to deschedule it from NAPI. But this interface 113 * will be turned off in thread context later. 114 */ 115 if (unlikely(queue->vif->disabled)) { 116 napi_complete(napi); 117 return 0; 118 } 119 120 work_done = xenvif_tx_action(queue, budget); 121 122 if (work_done < budget) { 123 napi_complete_done(napi, work_done); 124 /* If the queue is rate-limited, it shall be 125 * rescheduled in the timer callback. 126 */ 127 if (likely(!queue->rate_limited)) 128 xenvif_napi_schedule_or_enable_events(queue); 129 } 130 131 return work_done; 132 } 133 134 static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue) 135 { 136 bool rc; 137 138 rc = xenvif_have_rx_work(queue, false); 139 if (rc) 140 xenvif_kick_thread(queue); 141 return rc; 142 } 143 144 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 145 { 146 struct xenvif_queue *queue = dev_id; 147 int old; 148 149 old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending); 150 WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n"); 151 152 if (!xenvif_handle_rx_interrupt(queue)) { 153 atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending); 154 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); 155 } 156 157 return IRQ_HANDLED; 158 } 159 160 irqreturn_t xenvif_interrupt(int irq, void *dev_id) 161 { 162 struct xenvif_queue *queue = dev_id; 163 int old; 164 bool has_rx, has_tx; 165 166 old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending); 167 WARN(old, "Interrupt while EOI pending\n"); 168 169 has_tx = xenvif_handle_tx_interrupt(queue); 170 has_rx = xenvif_handle_rx_interrupt(queue); 171 172 if (!has_rx && !has_tx) { 173 atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending); 174 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); 175 } 176 177 return IRQ_HANDLED; 178 } 179 180 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, 181 struct net_device *sb_dev) 182 { 183 struct xenvif *vif = netdev_priv(dev); 184 unsigned int size = vif->hash.size; 185 unsigned int num_queues; 186 187 /* If queues are not set up internally - always return 0 188 * as the packet going to be dropped anyway */ 189 num_queues = READ_ONCE(vif->num_queues); 190 if (num_queues < 1) 191 return 0; 192 193 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) 194 return netdev_pick_tx(dev, skb, NULL) % 195 dev->real_num_tx_queues; 196 197 xenvif_set_skb_hash(vif, skb); 198 199 if (size == 0) 200 return skb_get_hash_raw(skb) % dev->real_num_tx_queues; 201 202 return vif->hash.mapping[vif->hash.mapping_sel] 203 [skb_get_hash_raw(skb) % size]; 204 } 205 206 static netdev_tx_t 207 xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 208 { 209 struct xenvif *vif = netdev_priv(dev); 210 struct xenvif_queue *queue = NULL; 211 unsigned int num_queues; 212 u16 index; 213 struct xenvif_rx_cb *cb; 214 215 BUG_ON(skb->dev != dev); 216 217 /* Drop the packet if queues are not set up. 218 * This handler should be called inside an RCU read section 219 * so we don't need to enter it here explicitly. 220 */ 221 num_queues = READ_ONCE(vif->num_queues); 222 if (num_queues < 1) 223 goto drop; 224 225 /* Obtain the queue to be used to transmit this packet */ 226 index = skb_get_queue_mapping(skb); 227 if (index >= num_queues) { 228 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n", 229 index, vif->dev->name); 230 index %= num_queues; 231 } 232 queue = &vif->queues[index]; 233 234 /* Drop the packet if queue is not ready */ 235 if (queue->task == NULL || 236 queue->dealloc_task == NULL || 237 !xenvif_schedulable(vif)) 238 goto drop; 239 240 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) { 241 struct ethhdr *eth = (struct ethhdr *)skb->data; 242 243 if (!xenvif_mcast_match(vif, eth->h_dest)) 244 goto drop; 245 } 246 247 cb = XENVIF_RX_CB(skb); 248 cb->expires = jiffies + vif->drain_timeout; 249 250 /* If there is no hash algorithm configured then make sure there 251 * is no hash information in the socket buffer otherwise it 252 * would be incorrectly forwarded to the frontend. 253 */ 254 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) 255 skb_clear_hash(skb); 256 257 if (!xenvif_rx_queue_tail(queue, skb)) 258 goto drop; 259 260 xenvif_kick_thread(queue); 261 262 return NETDEV_TX_OK; 263 264 drop: 265 vif->dev->stats.tx_dropped++; 266 dev_kfree_skb_any(skb); 267 return NETDEV_TX_OK; 268 } 269 270 static struct net_device_stats *xenvif_get_stats(struct net_device *dev) 271 { 272 struct xenvif *vif = netdev_priv(dev); 273 struct xenvif_queue *queue = NULL; 274 unsigned int num_queues; 275 u64 rx_bytes = 0; 276 u64 rx_packets = 0; 277 u64 tx_bytes = 0; 278 u64 tx_packets = 0; 279 unsigned int index; 280 281 rcu_read_lock(); 282 num_queues = READ_ONCE(vif->num_queues); 283 284 /* Aggregate tx and rx stats from each queue */ 285 for (index = 0; index < num_queues; ++index) { 286 queue = &vif->queues[index]; 287 rx_bytes += queue->stats.rx_bytes; 288 rx_packets += queue->stats.rx_packets; 289 tx_bytes += queue->stats.tx_bytes; 290 tx_packets += queue->stats.tx_packets; 291 } 292 293 rcu_read_unlock(); 294 295 vif->dev->stats.rx_bytes = rx_bytes; 296 vif->dev->stats.rx_packets = rx_packets; 297 vif->dev->stats.tx_bytes = tx_bytes; 298 vif->dev->stats.tx_packets = tx_packets; 299 300 return &vif->dev->stats; 301 } 302 303 static void xenvif_up(struct xenvif *vif) 304 { 305 struct xenvif_queue *queue = NULL; 306 unsigned int num_queues = vif->num_queues; 307 unsigned int queue_index; 308 309 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 310 queue = &vif->queues[queue_index]; 311 napi_enable(&queue->napi); 312 enable_irq(queue->tx_irq); 313 if (queue->tx_irq != queue->rx_irq) 314 enable_irq(queue->rx_irq); 315 xenvif_napi_schedule_or_enable_events(queue); 316 } 317 } 318 319 static void xenvif_down(struct xenvif *vif) 320 { 321 struct xenvif_queue *queue = NULL; 322 unsigned int num_queues = vif->num_queues; 323 unsigned int queue_index; 324 325 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 326 queue = &vif->queues[queue_index]; 327 disable_irq(queue->tx_irq); 328 if (queue->tx_irq != queue->rx_irq) 329 disable_irq(queue->rx_irq); 330 napi_disable(&queue->napi); 331 del_timer_sync(&queue->credit_timeout); 332 } 333 } 334 335 static int xenvif_open(struct net_device *dev) 336 { 337 struct xenvif *vif = netdev_priv(dev); 338 if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) 339 xenvif_up(vif); 340 netif_tx_start_all_queues(dev); 341 return 0; 342 } 343 344 static int xenvif_close(struct net_device *dev) 345 { 346 struct xenvif *vif = netdev_priv(dev); 347 if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) 348 xenvif_down(vif); 349 netif_tx_stop_all_queues(dev); 350 return 0; 351 } 352 353 static int xenvif_change_mtu(struct net_device *dev, int mtu) 354 { 355 struct xenvif *vif = netdev_priv(dev); 356 int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN; 357 358 if (mtu > max) 359 return -EINVAL; 360 dev->mtu = mtu; 361 return 0; 362 } 363 364 static netdev_features_t xenvif_fix_features(struct net_device *dev, 365 netdev_features_t features) 366 { 367 struct xenvif *vif = netdev_priv(dev); 368 369 if (!vif->can_sg) 370 features &= ~NETIF_F_SG; 371 if (~(vif->gso_mask) & GSO_BIT(TCPV4)) 372 features &= ~NETIF_F_TSO; 373 if (~(vif->gso_mask) & GSO_BIT(TCPV6)) 374 features &= ~NETIF_F_TSO6; 375 if (!vif->ip_csum) 376 features &= ~NETIF_F_IP_CSUM; 377 if (!vif->ipv6_csum) 378 features &= ~NETIF_F_IPV6_CSUM; 379 380 return features; 381 } 382 383 static const struct xenvif_stat { 384 char name[ETH_GSTRING_LEN]; 385 u16 offset; 386 } xenvif_stats[] = { 387 { 388 "rx_gso_checksum_fixup", 389 offsetof(struct xenvif_stats, rx_gso_checksum_fixup) 390 }, 391 /* If (sent != success + fail), there are probably packets never 392 * freed up properly! 393 */ 394 { 395 "tx_zerocopy_sent", 396 offsetof(struct xenvif_stats, tx_zerocopy_sent), 397 }, 398 { 399 "tx_zerocopy_success", 400 offsetof(struct xenvif_stats, tx_zerocopy_success), 401 }, 402 { 403 "tx_zerocopy_fail", 404 offsetof(struct xenvif_stats, tx_zerocopy_fail) 405 }, 406 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use 407 * a guest with the same MAX_SKB_FRAG 408 */ 409 { 410 "tx_frag_overflow", 411 offsetof(struct xenvif_stats, tx_frag_overflow) 412 }, 413 }; 414 415 static int xenvif_get_sset_count(struct net_device *dev, int string_set) 416 { 417 switch (string_set) { 418 case ETH_SS_STATS: 419 return ARRAY_SIZE(xenvif_stats); 420 default: 421 return -EINVAL; 422 } 423 } 424 425 static void xenvif_get_ethtool_stats(struct net_device *dev, 426 struct ethtool_stats *stats, u64 * data) 427 { 428 struct xenvif *vif = netdev_priv(dev); 429 unsigned int num_queues; 430 int i; 431 unsigned int queue_index; 432 433 rcu_read_lock(); 434 num_queues = READ_ONCE(vif->num_queues); 435 436 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { 437 unsigned long accum = 0; 438 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 439 void *vif_stats = &vif->queues[queue_index].stats; 440 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); 441 } 442 data[i] = accum; 443 } 444 445 rcu_read_unlock(); 446 } 447 448 static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) 449 { 450 int i; 451 452 switch (stringset) { 453 case ETH_SS_STATS: 454 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) 455 memcpy(data + i * ETH_GSTRING_LEN, 456 xenvif_stats[i].name, ETH_GSTRING_LEN); 457 break; 458 } 459 } 460 461 static const struct ethtool_ops xenvif_ethtool_ops = { 462 .get_link = ethtool_op_get_link, 463 464 .get_sset_count = xenvif_get_sset_count, 465 .get_ethtool_stats = xenvif_get_ethtool_stats, 466 .get_strings = xenvif_get_strings, 467 }; 468 469 static const struct net_device_ops xenvif_netdev_ops = { 470 .ndo_select_queue = xenvif_select_queue, 471 .ndo_start_xmit = xenvif_start_xmit, 472 .ndo_get_stats = xenvif_get_stats, 473 .ndo_open = xenvif_open, 474 .ndo_stop = xenvif_close, 475 .ndo_change_mtu = xenvif_change_mtu, 476 .ndo_fix_features = xenvif_fix_features, 477 .ndo_set_mac_address = eth_mac_addr, 478 .ndo_validate_addr = eth_validate_addr, 479 }; 480 481 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, 482 unsigned int handle) 483 { 484 static const u8 dummy_addr[ETH_ALEN] = { 485 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 486 }; 487 int err; 488 struct net_device *dev; 489 struct xenvif *vif; 490 char name[IFNAMSIZ] = {}; 491 492 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 493 /* Allocate a netdev with the max. supported number of queues. 494 * When the guest selects the desired number, it will be updated 495 * via netif_set_real_num_*_queues(). 496 */ 497 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN, 498 ether_setup, xenvif_max_queues); 499 if (dev == NULL) { 500 pr_warn("Could not allocate netdev for %s\n", name); 501 return ERR_PTR(-ENOMEM); 502 } 503 504 SET_NETDEV_DEV(dev, parent); 505 506 vif = netdev_priv(dev); 507 508 vif->domid = domid; 509 vif->handle = handle; 510 vif->can_sg = 1; 511 vif->ip_csum = 1; 512 vif->dev = dev; 513 vif->disabled = false; 514 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs); 515 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs); 516 517 /* Start out with no queues. */ 518 vif->queues = NULL; 519 vif->num_queues = 0; 520 521 vif->xdp_headroom = 0; 522 523 spin_lock_init(&vif->lock); 524 INIT_LIST_HEAD(&vif->fe_mcast_addr); 525 526 dev->netdev_ops = &xenvif_netdev_ops; 527 dev->hw_features = NETIF_F_SG | 528 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 529 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST; 530 dev->features = dev->hw_features | NETIF_F_RXCSUM; 531 dev->ethtool_ops = &xenvif_ethtool_ops; 532 533 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 534 535 dev->min_mtu = ETH_MIN_MTU; 536 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; 537 538 /* 539 * Initialise a dummy MAC address. We choose the numerically 540 * largest non-broadcast address to prevent the address getting 541 * stolen by an Ethernet bridge for STP purposes. 542 * (FE:FF:FF:FF:FF:FF) 543 */ 544 eth_hw_addr_set(dev, dummy_addr); 545 546 netif_carrier_off(dev); 547 548 err = register_netdev(dev); 549 if (err) { 550 netdev_warn(dev, "Could not register device: err=%d\n", err); 551 free_netdev(dev); 552 return ERR_PTR(err); 553 } 554 555 netdev_dbg(dev, "Successfully created xenvif\n"); 556 557 __module_get(THIS_MODULE); 558 559 return vif; 560 } 561 562 int xenvif_init_queue(struct xenvif_queue *queue) 563 { 564 int err, i; 565 566 queue->credit_bytes = queue->remaining_credit = ~0UL; 567 queue->credit_usec = 0UL; 568 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0); 569 queue->credit_window_start = get_jiffies_64(); 570 571 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES; 572 573 skb_queue_head_init(&queue->rx_queue); 574 skb_queue_head_init(&queue->tx_queue); 575 576 queue->pending_cons = 0; 577 queue->pending_prod = MAX_PENDING_REQS; 578 for (i = 0; i < MAX_PENDING_REQS; ++i) 579 queue->pending_ring[i] = i; 580 581 spin_lock_init(&queue->callback_lock); 582 spin_lock_init(&queue->response_lock); 583 584 /* If ballooning is disabled, this will consume real memory, so you 585 * better enable it. The long term solution would be to use just a 586 * bunch of valid page descriptors, without dependency on ballooning 587 */ 588 err = gnttab_alloc_pages(MAX_PENDING_REQS, 589 queue->mmap_pages); 590 if (err) { 591 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); 592 return -ENOMEM; 593 } 594 595 for (i = 0; i < MAX_PENDING_REQS; i++) { 596 queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc) 597 { { .callback = xenvif_zerocopy_callback }, 598 { { .ctx = NULL, 599 .desc = i } } }; 600 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; 601 } 602 603 return 0; 604 } 605 606 void xenvif_carrier_on(struct xenvif *vif) 607 { 608 rtnl_lock(); 609 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 610 dev_set_mtu(vif->dev, ETH_DATA_LEN); 611 netdev_update_features(vif->dev); 612 set_bit(VIF_STATUS_CONNECTED, &vif->status); 613 if (netif_running(vif->dev)) 614 xenvif_up(vif); 615 rtnl_unlock(); 616 } 617 618 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, 619 unsigned int evtchn) 620 { 621 struct net_device *dev = vif->dev; 622 struct xenbus_device *xendev = xenvif_to_xenbus_device(vif); 623 void *addr; 624 struct xen_netif_ctrl_sring *shared; 625 RING_IDX rsp_prod, req_prod; 626 int err; 627 628 err = xenbus_map_ring_valloc(xendev, &ring_ref, 1, &addr); 629 if (err) 630 goto err; 631 632 shared = (struct xen_netif_ctrl_sring *)addr; 633 rsp_prod = READ_ONCE(shared->rsp_prod); 634 req_prod = READ_ONCE(shared->req_prod); 635 636 BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE); 637 638 err = -EIO; 639 if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl)) 640 goto err_unmap; 641 642 err = bind_interdomain_evtchn_to_irq_lateeoi(xendev, evtchn); 643 if (err < 0) 644 goto err_unmap; 645 646 vif->ctrl_irq = err; 647 648 xenvif_init_hash(vif); 649 650 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn, 651 IRQF_ONESHOT, "xen-netback-ctrl", vif); 652 if (err) { 653 pr_warn("Could not setup irq handler for %s\n", dev->name); 654 goto err_deinit; 655 } 656 657 return 0; 658 659 err_deinit: 660 xenvif_deinit_hash(vif); 661 unbind_from_irqhandler(vif->ctrl_irq, vif); 662 vif->ctrl_irq = 0; 663 664 err_unmap: 665 xenbus_unmap_ring_vfree(xendev, vif->ctrl.sring); 666 vif->ctrl.sring = NULL; 667 668 err: 669 return err; 670 } 671 672 static void xenvif_disconnect_queue(struct xenvif_queue *queue) 673 { 674 if (queue->task) { 675 kthread_stop(queue->task); 676 put_task_struct(queue->task); 677 queue->task = NULL; 678 } 679 680 if (queue->dealloc_task) { 681 kthread_stop(queue->dealloc_task); 682 queue->dealloc_task = NULL; 683 } 684 685 if (queue->napi.poll) { 686 netif_napi_del(&queue->napi); 687 queue->napi.poll = NULL; 688 } 689 690 if (queue->tx_irq) { 691 unbind_from_irqhandler(queue->tx_irq, queue); 692 if (queue->tx_irq == queue->rx_irq) 693 queue->rx_irq = 0; 694 queue->tx_irq = 0; 695 } 696 697 if (queue->rx_irq) { 698 unbind_from_irqhandler(queue->rx_irq, queue); 699 queue->rx_irq = 0; 700 } 701 702 xenvif_unmap_frontend_data_rings(queue); 703 } 704 705 int xenvif_connect_data(struct xenvif_queue *queue, 706 unsigned long tx_ring_ref, 707 unsigned long rx_ring_ref, 708 unsigned int tx_evtchn, 709 unsigned int rx_evtchn) 710 { 711 struct xenbus_device *dev = xenvif_to_xenbus_device(queue->vif); 712 struct task_struct *task; 713 int err; 714 715 BUG_ON(queue->tx_irq); 716 BUG_ON(queue->task); 717 BUG_ON(queue->dealloc_task); 718 719 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref, 720 rx_ring_ref); 721 if (err < 0) 722 goto err; 723 724 init_waitqueue_head(&queue->wq); 725 init_waitqueue_head(&queue->dealloc_wq); 726 atomic_set(&queue->inflight_packets, 0); 727 728 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll); 729 730 queue->stalled = true; 731 732 task = kthread_run(xenvif_kthread_guest_rx, queue, 733 "%s-guest-rx", queue->name); 734 if (IS_ERR(task)) 735 goto kthread_err; 736 queue->task = task; 737 /* 738 * Take a reference to the task in order to prevent it from being freed 739 * if the thread function returns before kthread_stop is called. 740 */ 741 get_task_struct(task); 742 743 task = kthread_run(xenvif_dealloc_kthread, queue, 744 "%s-dealloc", queue->name); 745 if (IS_ERR(task)) 746 goto kthread_err; 747 queue->dealloc_task = task; 748 749 if (tx_evtchn == rx_evtchn) { 750 /* feature-split-event-channels == 0 */ 751 err = bind_interdomain_evtchn_to_irqhandler_lateeoi( 752 dev, tx_evtchn, xenvif_interrupt, 0, 753 queue->name, queue); 754 if (err < 0) 755 goto err; 756 queue->tx_irq = queue->rx_irq = err; 757 disable_irq(queue->tx_irq); 758 } else { 759 /* feature-split-event-channels == 1 */ 760 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), 761 "%s-tx", queue->name); 762 err = bind_interdomain_evtchn_to_irqhandler_lateeoi( 763 dev, tx_evtchn, xenvif_tx_interrupt, 0, 764 queue->tx_irq_name, queue); 765 if (err < 0) 766 goto err; 767 queue->tx_irq = err; 768 disable_irq(queue->tx_irq); 769 770 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), 771 "%s-rx", queue->name); 772 err = bind_interdomain_evtchn_to_irqhandler_lateeoi( 773 dev, rx_evtchn, xenvif_rx_interrupt, 0, 774 queue->rx_irq_name, queue); 775 if (err < 0) 776 goto err; 777 queue->rx_irq = err; 778 disable_irq(queue->rx_irq); 779 } 780 781 return 0; 782 783 kthread_err: 784 pr_warn("Could not allocate kthread for %s\n", queue->name); 785 err = PTR_ERR(task); 786 err: 787 xenvif_disconnect_queue(queue); 788 return err; 789 } 790 791 void xenvif_carrier_off(struct xenvif *vif) 792 { 793 struct net_device *dev = vif->dev; 794 795 rtnl_lock(); 796 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) { 797 netif_carrier_off(dev); /* discard queued packets */ 798 if (netif_running(dev)) 799 xenvif_down(vif); 800 } 801 rtnl_unlock(); 802 } 803 804 void xenvif_disconnect_data(struct xenvif *vif) 805 { 806 struct xenvif_queue *queue = NULL; 807 unsigned int num_queues = vif->num_queues; 808 unsigned int queue_index; 809 810 xenvif_carrier_off(vif); 811 812 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 813 queue = &vif->queues[queue_index]; 814 815 xenvif_disconnect_queue(queue); 816 } 817 818 xenvif_mcast_addr_list_free(vif); 819 } 820 821 void xenvif_disconnect_ctrl(struct xenvif *vif) 822 { 823 if (vif->ctrl_irq) { 824 xenvif_deinit_hash(vif); 825 unbind_from_irqhandler(vif->ctrl_irq, vif); 826 vif->ctrl_irq = 0; 827 } 828 829 if (vif->ctrl.sring) { 830 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), 831 vif->ctrl.sring); 832 vif->ctrl.sring = NULL; 833 } 834 } 835 836 /* Reverse the relevant parts of xenvif_init_queue(). 837 * Used for queue teardown from xenvif_free(), and on the 838 * error handling paths in xenbus.c:connect(). 839 */ 840 void xenvif_deinit_queue(struct xenvif_queue *queue) 841 { 842 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages); 843 } 844 845 void xenvif_free(struct xenvif *vif) 846 { 847 struct xenvif_queue *queues = vif->queues; 848 unsigned int num_queues = vif->num_queues; 849 unsigned int queue_index; 850 851 unregister_netdev(vif->dev); 852 free_netdev(vif->dev); 853 854 for (queue_index = 0; queue_index < num_queues; ++queue_index) 855 xenvif_deinit_queue(&queues[queue_index]); 856 vfree(queues); 857 858 module_put(THIS_MODULE); 859 } 860