1 /* 2 * Back-end of the driver for virtual network devices. This portion of the 3 * driver exports a 'unified' network-device interface that can be accessed 4 * by any operating system that implements a compatible front end. A 5 * reference front-end implementation can be found in: 6 * drivers/net/xen-netfront.c 7 * 8 * Copyright (c) 2002-2005, K A Fraser 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License version 2 12 * as published by the Free Software Foundation; or, when distributed 13 * separately from the Linux kernel or incorporated into other 14 * software packages, subject to the following license: 15 * 16 * Permission is hereby granted, free of charge, to any person obtaining a copy 17 * of this source file (the "Software"), to deal in the Software without 18 * restriction, including without limitation the rights to use, copy, modify, 19 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 20 * and to permit persons to whom the Software is furnished to do so, subject to 21 * the following conditions: 22 * 23 * The above copyright notice and this permission notice shall be included in 24 * all copies or substantial portions of the Software. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 32 * IN THE SOFTWARE. 33 */ 34 35 #include "common.h" 36 37 #include <linux/kthread.h> 38 #include <linux/if_vlan.h> 39 #include <linux/udp.h> 40 #include <linux/highmem.h> 41 42 #include <net/tcp.h> 43 44 #include <xen/xen.h> 45 #include <xen/events.h> 46 #include <xen/interface/memory.h> 47 48 #include <asm/xen/hypercall.h> 49 #include <asm/xen/page.h> 50 51 /* Provide an option to disable split event channels at load time as 52 * event channels are limited resource. Split event channels are 53 * enabled by default. 54 */ 55 bool separate_tx_rx_irq = 1; 56 module_param(separate_tx_rx_irq, bool, 0644); 57 58 /* The time that packets can stay on the guest Rx internal queue 59 * before they are dropped. 60 */ 61 unsigned int rx_drain_timeout_msecs = 10000; 62 module_param(rx_drain_timeout_msecs, uint, 0444); 63 64 /* The length of time before the frontend is considered unresponsive 65 * because it isn't providing Rx slots. 66 */ 67 unsigned int rx_stall_timeout_msecs = 60000; 68 module_param(rx_stall_timeout_msecs, uint, 0444); 69 70 unsigned int xenvif_max_queues; 71 module_param_named(max_queues, xenvif_max_queues, uint, 0644); 72 MODULE_PARM_DESC(max_queues, 73 "Maximum number of queues per virtual interface"); 74 75 /* 76 * This is the maximum slots a skb can have. If a guest sends a skb 77 * which exceeds this limit it is considered malicious. 78 */ 79 #define FATAL_SKB_SLOTS_DEFAULT 20 80 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; 81 module_param(fatal_skb_slots, uint, 0444); 82 83 /* The amount to copy out of the first guest Tx slot into the skb's 84 * linear area. If the first slot has more data, it will be mapped 85 * and put into the first frag. 86 * 87 * This is sized to avoid pulling headers from the frags for most 88 * TCP/IP packets. 89 */ 90 #define XEN_NETBACK_TX_COPY_LEN 128 91 92 93 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, 94 u8 status); 95 96 static void make_tx_response(struct xenvif_queue *queue, 97 struct xen_netif_tx_request *txp, 98 s8 st); 99 100 static inline int tx_work_todo(struct xenvif_queue *queue); 101 102 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, 103 u16 id, 104 s8 st, 105 u16 offset, 106 u16 size, 107 u16 flags); 108 109 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, 110 u16 idx) 111 { 112 return page_to_pfn(queue->mmap_pages[idx]); 113 } 114 115 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, 116 u16 idx) 117 { 118 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); 119 } 120 121 #define callback_param(vif, pending_idx) \ 122 (vif->pending_tx_info[pending_idx].callback_struct) 123 124 /* Find the containing VIF's structure from a pointer in pending_tx_info array 125 */ 126 static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) 127 { 128 u16 pending_idx = ubuf->desc; 129 struct pending_tx_info *temp = 130 container_of(ubuf, struct pending_tx_info, callback_struct); 131 return container_of(temp - pending_idx, 132 struct xenvif_queue, 133 pending_tx_info[0]); 134 } 135 136 static u16 frag_get_pending_idx(skb_frag_t *frag) 137 { 138 return (u16)frag->page_offset; 139 } 140 141 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx) 142 { 143 frag->page_offset = pending_idx; 144 } 145 146 static inline pending_ring_idx_t pending_index(unsigned i) 147 { 148 return i & (MAX_PENDING_REQS-1); 149 } 150 151 bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed) 152 { 153 RING_IDX prod, cons; 154 155 do { 156 prod = queue->rx.sring->req_prod; 157 cons = queue->rx.req_cons; 158 159 if (prod - cons >= needed) 160 return true; 161 162 queue->rx.sring->req_event = prod + 1; 163 164 /* Make sure event is visible before we check prod 165 * again. 166 */ 167 mb(); 168 } while (queue->rx.sring->req_prod != prod); 169 170 return false; 171 } 172 173 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) 174 { 175 unsigned long flags; 176 177 spin_lock_irqsave(&queue->rx_queue.lock, flags); 178 179 __skb_queue_tail(&queue->rx_queue, skb); 180 181 queue->rx_queue_len += skb->len; 182 if (queue->rx_queue_len > queue->rx_queue_max) 183 netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); 184 185 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); 186 } 187 188 static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) 189 { 190 struct sk_buff *skb; 191 192 spin_lock_irq(&queue->rx_queue.lock); 193 194 skb = __skb_dequeue(&queue->rx_queue); 195 if (skb) 196 queue->rx_queue_len -= skb->len; 197 198 spin_unlock_irq(&queue->rx_queue.lock); 199 200 return skb; 201 } 202 203 static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue) 204 { 205 spin_lock_irq(&queue->rx_queue.lock); 206 207 if (queue->rx_queue_len < queue->rx_queue_max) 208 netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); 209 210 spin_unlock_irq(&queue->rx_queue.lock); 211 } 212 213 214 static void xenvif_rx_queue_purge(struct xenvif_queue *queue) 215 { 216 struct sk_buff *skb; 217 while ((skb = xenvif_rx_dequeue(queue)) != NULL) 218 kfree_skb(skb); 219 } 220 221 static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) 222 { 223 struct sk_buff *skb; 224 225 for(;;) { 226 skb = skb_peek(&queue->rx_queue); 227 if (!skb) 228 break; 229 if (time_before(jiffies, XENVIF_RX_CB(skb)->expires)) 230 break; 231 xenvif_rx_dequeue(queue); 232 kfree_skb(skb); 233 } 234 } 235 236 struct netrx_pending_operations { 237 unsigned copy_prod, copy_cons; 238 unsigned meta_prod, meta_cons; 239 struct gnttab_copy *copy; 240 struct xenvif_rx_meta *meta; 241 int copy_off; 242 grant_ref_t copy_gref; 243 }; 244 245 static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, 246 struct netrx_pending_operations *npo) 247 { 248 struct xenvif_rx_meta *meta; 249 struct xen_netif_rx_request *req; 250 251 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 252 253 meta = npo->meta + npo->meta_prod++; 254 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 255 meta->gso_size = 0; 256 meta->size = 0; 257 meta->id = req->id; 258 259 npo->copy_off = 0; 260 npo->copy_gref = req->gref; 261 262 return meta; 263 } 264 265 /* 266 * Set up the grant operations for this fragment. If it's a flipping 267 * interface, we also set up the unmap request from here. 268 */ 269 static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, 270 struct netrx_pending_operations *npo, 271 struct page *page, unsigned long size, 272 unsigned long offset, int *head) 273 { 274 struct gnttab_copy *copy_gop; 275 struct xenvif_rx_meta *meta; 276 unsigned long bytes; 277 int gso_type = XEN_NETIF_GSO_TYPE_NONE; 278 279 /* Data must not cross a page boundary. */ 280 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); 281 282 meta = npo->meta + npo->meta_prod - 1; 283 284 /* Skip unused frames from start of page */ 285 page += offset >> PAGE_SHIFT; 286 offset &= ~PAGE_MASK; 287 288 while (size > 0) { 289 struct xen_page_foreign *foreign; 290 291 BUG_ON(offset >= PAGE_SIZE); 292 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); 293 294 if (npo->copy_off == MAX_BUFFER_OFFSET) 295 meta = get_next_rx_buffer(queue, npo); 296 297 bytes = PAGE_SIZE - offset; 298 if (bytes > size) 299 bytes = size; 300 301 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) 302 bytes = MAX_BUFFER_OFFSET - npo->copy_off; 303 304 copy_gop = npo->copy + npo->copy_prod++; 305 copy_gop->flags = GNTCOPY_dest_gref; 306 copy_gop->len = bytes; 307 308 foreign = xen_page_foreign(page); 309 if (foreign) { 310 copy_gop->source.domid = foreign->domid; 311 copy_gop->source.u.ref = foreign->gref; 312 copy_gop->flags |= GNTCOPY_source_gref; 313 } else { 314 copy_gop->source.domid = DOMID_SELF; 315 copy_gop->source.u.gmfn = 316 virt_to_mfn(page_address(page)); 317 } 318 copy_gop->source.offset = offset; 319 320 copy_gop->dest.domid = queue->vif->domid; 321 copy_gop->dest.offset = npo->copy_off; 322 copy_gop->dest.u.ref = npo->copy_gref; 323 324 npo->copy_off += bytes; 325 meta->size += bytes; 326 327 offset += bytes; 328 size -= bytes; 329 330 /* Next frame */ 331 if (offset == PAGE_SIZE && size) { 332 BUG_ON(!PageCompound(page)); 333 page++; 334 offset = 0; 335 } 336 337 /* Leave a gap for the GSO descriptor. */ 338 if (skb_is_gso(skb)) { 339 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 340 gso_type = XEN_NETIF_GSO_TYPE_TCPV4; 341 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 342 gso_type = XEN_NETIF_GSO_TYPE_TCPV6; 343 } 344 345 if (*head && ((1 << gso_type) & queue->vif->gso_mask)) 346 queue->rx.req_cons++; 347 348 *head = 0; /* There must be something in this buffer now. */ 349 350 } 351 } 352 353 /* 354 * Prepare an SKB to be transmitted to the frontend. 355 * 356 * This function is responsible for allocating grant operations, meta 357 * structures, etc. 358 * 359 * It returns the number of meta structures consumed. The number of 360 * ring slots used is always equal to the number of meta slots used 361 * plus the number of GSO descriptors used. Currently, we use either 362 * zero GSO descriptors (for non-GSO packets) or one descriptor (for 363 * frontend-side LRO). 364 */ 365 static int xenvif_gop_skb(struct sk_buff *skb, 366 struct netrx_pending_operations *npo, 367 struct xenvif_queue *queue) 368 { 369 struct xenvif *vif = netdev_priv(skb->dev); 370 int nr_frags = skb_shinfo(skb)->nr_frags; 371 int i; 372 struct xen_netif_rx_request *req; 373 struct xenvif_rx_meta *meta; 374 unsigned char *data; 375 int head = 1; 376 int old_meta_prod; 377 int gso_type; 378 379 old_meta_prod = npo->meta_prod; 380 381 gso_type = XEN_NETIF_GSO_TYPE_NONE; 382 if (skb_is_gso(skb)) { 383 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 384 gso_type = XEN_NETIF_GSO_TYPE_TCPV4; 385 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 386 gso_type = XEN_NETIF_GSO_TYPE_TCPV6; 387 } 388 389 /* Set up a GSO prefix descriptor, if necessary */ 390 if ((1 << gso_type) & vif->gso_prefix_mask) { 391 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 392 meta = npo->meta + npo->meta_prod++; 393 meta->gso_type = gso_type; 394 meta->gso_size = skb_shinfo(skb)->gso_size; 395 meta->size = 0; 396 meta->id = req->id; 397 } 398 399 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 400 meta = npo->meta + npo->meta_prod++; 401 402 if ((1 << gso_type) & vif->gso_mask) { 403 meta->gso_type = gso_type; 404 meta->gso_size = skb_shinfo(skb)->gso_size; 405 } else { 406 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 407 meta->gso_size = 0; 408 } 409 410 meta->size = 0; 411 meta->id = req->id; 412 npo->copy_off = 0; 413 npo->copy_gref = req->gref; 414 415 data = skb->data; 416 while (data < skb_tail_pointer(skb)) { 417 unsigned int offset = offset_in_page(data); 418 unsigned int len = PAGE_SIZE - offset; 419 420 if (data + len > skb_tail_pointer(skb)) 421 len = skb_tail_pointer(skb) - data; 422 423 xenvif_gop_frag_copy(queue, skb, npo, 424 virt_to_page(data), len, offset, &head); 425 data += len; 426 } 427 428 for (i = 0; i < nr_frags; i++) { 429 xenvif_gop_frag_copy(queue, skb, npo, 430 skb_frag_page(&skb_shinfo(skb)->frags[i]), 431 skb_frag_size(&skb_shinfo(skb)->frags[i]), 432 skb_shinfo(skb)->frags[i].page_offset, 433 &head); 434 } 435 436 return npo->meta_prod - old_meta_prod; 437 } 438 439 /* 440 * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was 441 * used to set up the operations on the top of 442 * netrx_pending_operations, which have since been done. Check that 443 * they didn't give any errors and advance over them. 444 */ 445 static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots, 446 struct netrx_pending_operations *npo) 447 { 448 struct gnttab_copy *copy_op; 449 int status = XEN_NETIF_RSP_OKAY; 450 int i; 451 452 for (i = 0; i < nr_meta_slots; i++) { 453 copy_op = npo->copy + npo->copy_cons++; 454 if (copy_op->status != GNTST_okay) { 455 netdev_dbg(vif->dev, 456 "Bad status %d from copy to DOM%d.\n", 457 copy_op->status, vif->domid); 458 status = XEN_NETIF_RSP_ERROR; 459 } 460 } 461 462 return status; 463 } 464 465 static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status, 466 struct xenvif_rx_meta *meta, 467 int nr_meta_slots) 468 { 469 int i; 470 unsigned long offset; 471 472 /* No fragments used */ 473 if (nr_meta_slots <= 1) 474 return; 475 476 nr_meta_slots--; 477 478 for (i = 0; i < nr_meta_slots; i++) { 479 int flags; 480 if (i == nr_meta_slots - 1) 481 flags = 0; 482 else 483 flags = XEN_NETRXF_more_data; 484 485 offset = 0; 486 make_rx_response(queue, meta[i].id, status, offset, 487 meta[i].size, flags); 488 } 489 } 490 491 void xenvif_kick_thread(struct xenvif_queue *queue) 492 { 493 wake_up(&queue->wq); 494 } 495 496 static void xenvif_rx_action(struct xenvif_queue *queue) 497 { 498 s8 status; 499 u16 flags; 500 struct xen_netif_rx_response *resp; 501 struct sk_buff_head rxq; 502 struct sk_buff *skb; 503 LIST_HEAD(notify); 504 int ret; 505 unsigned long offset; 506 bool need_to_notify = false; 507 508 struct netrx_pending_operations npo = { 509 .copy = queue->grant_copy_op, 510 .meta = queue->meta, 511 }; 512 513 skb_queue_head_init(&rxq); 514 515 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX) 516 && (skb = xenvif_rx_dequeue(queue)) != NULL) { 517 RING_IDX old_req_cons; 518 RING_IDX ring_slots_used; 519 520 queue->last_rx_time = jiffies; 521 522 old_req_cons = queue->rx.req_cons; 523 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); 524 ring_slots_used = queue->rx.req_cons - old_req_cons; 525 526 __skb_queue_tail(&rxq, skb); 527 } 528 529 BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta)); 530 531 if (!npo.copy_prod) 532 goto done; 533 534 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); 535 gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod); 536 537 while ((skb = __skb_dequeue(&rxq)) != NULL) { 538 539 if ((1 << queue->meta[npo.meta_cons].gso_type) & 540 queue->vif->gso_prefix_mask) { 541 resp = RING_GET_RESPONSE(&queue->rx, 542 queue->rx.rsp_prod_pvt++); 543 544 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; 545 546 resp->offset = queue->meta[npo.meta_cons].gso_size; 547 resp->id = queue->meta[npo.meta_cons].id; 548 resp->status = XENVIF_RX_CB(skb)->meta_slots_used; 549 550 npo.meta_cons++; 551 XENVIF_RX_CB(skb)->meta_slots_used--; 552 } 553 554 555 queue->stats.tx_bytes += skb->len; 556 queue->stats.tx_packets++; 557 558 status = xenvif_check_gop(queue->vif, 559 XENVIF_RX_CB(skb)->meta_slots_used, 560 &npo); 561 562 if (XENVIF_RX_CB(skb)->meta_slots_used == 1) 563 flags = 0; 564 else 565 flags = XEN_NETRXF_more_data; 566 567 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ 568 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated; 569 else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 570 /* remote but checksummed. */ 571 flags |= XEN_NETRXF_data_validated; 572 573 offset = 0; 574 resp = make_rx_response(queue, queue->meta[npo.meta_cons].id, 575 status, offset, 576 queue->meta[npo.meta_cons].size, 577 flags); 578 579 if ((1 << queue->meta[npo.meta_cons].gso_type) & 580 queue->vif->gso_mask) { 581 struct xen_netif_extra_info *gso = 582 (struct xen_netif_extra_info *) 583 RING_GET_RESPONSE(&queue->rx, 584 queue->rx.rsp_prod_pvt++); 585 586 resp->flags |= XEN_NETRXF_extra_info; 587 588 gso->u.gso.type = queue->meta[npo.meta_cons].gso_type; 589 gso->u.gso.size = queue->meta[npo.meta_cons].gso_size; 590 gso->u.gso.pad = 0; 591 gso->u.gso.features = 0; 592 593 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 594 gso->flags = 0; 595 } 596 597 xenvif_add_frag_responses(queue, status, 598 queue->meta + npo.meta_cons + 1, 599 XENVIF_RX_CB(skb)->meta_slots_used); 600 601 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret); 602 603 need_to_notify |= !!ret; 604 605 npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used; 606 dev_kfree_skb(skb); 607 } 608 609 done: 610 if (need_to_notify) 611 notify_remote_via_irq(queue->rx_irq); 612 } 613 614 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) 615 { 616 int more_to_do; 617 618 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); 619 620 if (more_to_do) 621 napi_schedule(&queue->napi); 622 } 623 624 static void tx_add_credit(struct xenvif_queue *queue) 625 { 626 unsigned long max_burst, max_credit; 627 628 /* 629 * Allow a burst big enough to transmit a jumbo packet of up to 128kB. 630 * Otherwise the interface can seize up due to insufficient credit. 631 */ 632 max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; 633 max_burst = min(max_burst, 131072UL); 634 max_burst = max(max_burst, queue->credit_bytes); 635 636 /* Take care that adding a new chunk of credit doesn't wrap to zero. */ 637 max_credit = queue->remaining_credit + queue->credit_bytes; 638 if (max_credit < queue->remaining_credit) 639 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ 640 641 queue->remaining_credit = min(max_credit, max_burst); 642 } 643 644 static void tx_credit_callback(unsigned long data) 645 { 646 struct xenvif_queue *queue = (struct xenvif_queue *)data; 647 tx_add_credit(queue); 648 xenvif_napi_schedule_or_enable_events(queue); 649 } 650 651 static void xenvif_tx_err(struct xenvif_queue *queue, 652 struct xen_netif_tx_request *txp, RING_IDX end) 653 { 654 RING_IDX cons = queue->tx.req_cons; 655 unsigned long flags; 656 657 do { 658 int notify; 659 660 spin_lock_irqsave(&queue->response_lock, flags); 661 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); 662 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); 663 spin_unlock_irqrestore(&queue->response_lock, flags); 664 if (notify) 665 notify_remote_via_irq(queue->tx_irq); 666 667 if (cons == end) 668 break; 669 txp = RING_GET_REQUEST(&queue->tx, cons++); 670 } while (1); 671 queue->tx.req_cons = cons; 672 } 673 674 static void xenvif_fatal_tx_err(struct xenvif *vif) 675 { 676 netdev_err(vif->dev, "fatal error; disabling device\n"); 677 vif->disabled = true; 678 /* Disable the vif from queue 0's kthread */ 679 if (vif->queues) 680 xenvif_kick_thread(&vif->queues[0]); 681 } 682 683 static int xenvif_count_requests(struct xenvif_queue *queue, 684 struct xen_netif_tx_request *first, 685 struct xen_netif_tx_request *txp, 686 int work_to_do) 687 { 688 RING_IDX cons = queue->tx.req_cons; 689 int slots = 0; 690 int drop_err = 0; 691 int more_data; 692 693 if (!(first->flags & XEN_NETTXF_more_data)) 694 return 0; 695 696 do { 697 struct xen_netif_tx_request dropped_tx = { 0 }; 698 699 if (slots >= work_to_do) { 700 netdev_err(queue->vif->dev, 701 "Asked for %d slots but exceeds this limit\n", 702 work_to_do); 703 xenvif_fatal_tx_err(queue->vif); 704 return -ENODATA; 705 } 706 707 /* This guest is really using too many slots and 708 * considered malicious. 709 */ 710 if (unlikely(slots >= fatal_skb_slots)) { 711 netdev_err(queue->vif->dev, 712 "Malicious frontend using %d slots, threshold %u\n", 713 slots, fatal_skb_slots); 714 xenvif_fatal_tx_err(queue->vif); 715 return -E2BIG; 716 } 717 718 /* Xen network protocol had implicit dependency on 719 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to 720 * the historical MAX_SKB_FRAGS value 18 to honor the 721 * same behavior as before. Any packet using more than 722 * 18 slots but less than fatal_skb_slots slots is 723 * dropped 724 */ 725 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { 726 if (net_ratelimit()) 727 netdev_dbg(queue->vif->dev, 728 "Too many slots (%d) exceeding limit (%d), dropping packet\n", 729 slots, XEN_NETBK_LEGACY_SLOTS_MAX); 730 drop_err = -E2BIG; 731 } 732 733 if (drop_err) 734 txp = &dropped_tx; 735 736 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), 737 sizeof(*txp)); 738 739 /* If the guest submitted a frame >= 64 KiB then 740 * first->size overflowed and following slots will 741 * appear to be larger than the frame. 742 * 743 * This cannot be fatal error as there are buggy 744 * frontends that do this. 745 * 746 * Consume all slots and drop the packet. 747 */ 748 if (!drop_err && txp->size > first->size) { 749 if (net_ratelimit()) 750 netdev_dbg(queue->vif->dev, 751 "Invalid tx request, slot size %u > remaining size %u\n", 752 txp->size, first->size); 753 drop_err = -EIO; 754 } 755 756 first->size -= txp->size; 757 slots++; 758 759 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { 760 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", 761 txp->offset, txp->size); 762 xenvif_fatal_tx_err(queue->vif); 763 return -EINVAL; 764 } 765 766 more_data = txp->flags & XEN_NETTXF_more_data; 767 768 if (!drop_err) 769 txp++; 770 771 } while (more_data); 772 773 if (drop_err) { 774 xenvif_tx_err(queue, first, cons + slots); 775 return drop_err; 776 } 777 778 return slots; 779 } 780 781 782 struct xenvif_tx_cb { 783 u16 pending_idx; 784 }; 785 786 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) 787 788 static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, 789 u16 pending_idx, 790 struct xen_netif_tx_request *txp, 791 struct gnttab_map_grant_ref *mop) 792 { 793 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; 794 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx), 795 GNTMAP_host_map | GNTMAP_readonly, 796 txp->gref, queue->vif->domid); 797 798 memcpy(&queue->pending_tx_info[pending_idx].req, txp, 799 sizeof(*txp)); 800 } 801 802 static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) 803 { 804 struct sk_buff *skb = 805 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN, 806 GFP_ATOMIC | __GFP_NOWARN); 807 if (unlikely(skb == NULL)) 808 return NULL; 809 810 /* Packets passed to netif_rx() must have some headroom. */ 811 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 812 813 /* Initialize it here to avoid later surprises */ 814 skb_shinfo(skb)->destructor_arg = NULL; 815 816 return skb; 817 } 818 819 static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, 820 struct sk_buff *skb, 821 struct xen_netif_tx_request *txp, 822 struct gnttab_map_grant_ref *gop) 823 { 824 struct skb_shared_info *shinfo = skb_shinfo(skb); 825 skb_frag_t *frags = shinfo->frags; 826 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 827 int start; 828 pending_ring_idx_t index; 829 unsigned int nr_slots, frag_overflow = 0; 830 831 /* At this point shinfo->nr_frags is in fact the number of 832 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. 833 */ 834 if (shinfo->nr_frags > MAX_SKB_FRAGS) { 835 frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS; 836 BUG_ON(frag_overflow > MAX_SKB_FRAGS); 837 shinfo->nr_frags = MAX_SKB_FRAGS; 838 } 839 nr_slots = shinfo->nr_frags; 840 841 /* Skip first skb fragment if it is on same page as header fragment. */ 842 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); 843 844 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; 845 shinfo->nr_frags++, txp++, gop++) { 846 index = pending_index(queue->pending_cons++); 847 pending_idx = queue->pending_ring[index]; 848 xenvif_tx_create_map_op(queue, pending_idx, txp, gop); 849 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); 850 } 851 852 if (frag_overflow) { 853 struct sk_buff *nskb = xenvif_alloc_skb(0); 854 if (unlikely(nskb == NULL)) { 855 if (net_ratelimit()) 856 netdev_err(queue->vif->dev, 857 "Can't allocate the frag_list skb.\n"); 858 return NULL; 859 } 860 861 shinfo = skb_shinfo(nskb); 862 frags = shinfo->frags; 863 864 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; 865 shinfo->nr_frags++, txp++, gop++) { 866 index = pending_index(queue->pending_cons++); 867 pending_idx = queue->pending_ring[index]; 868 xenvif_tx_create_map_op(queue, pending_idx, txp, gop); 869 frag_set_pending_idx(&frags[shinfo->nr_frags], 870 pending_idx); 871 } 872 873 skb_shinfo(skb)->frag_list = nskb; 874 } 875 876 return gop; 877 } 878 879 static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, 880 u16 pending_idx, 881 grant_handle_t handle) 882 { 883 if (unlikely(queue->grant_tx_handle[pending_idx] != 884 NETBACK_INVALID_HANDLE)) { 885 netdev_err(queue->vif->dev, 886 "Trying to overwrite active handle! pending_idx: %x\n", 887 pending_idx); 888 BUG(); 889 } 890 queue->grant_tx_handle[pending_idx] = handle; 891 } 892 893 static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue, 894 u16 pending_idx) 895 { 896 if (unlikely(queue->grant_tx_handle[pending_idx] == 897 NETBACK_INVALID_HANDLE)) { 898 netdev_err(queue->vif->dev, 899 "Trying to unmap invalid handle! pending_idx: %x\n", 900 pending_idx); 901 BUG(); 902 } 903 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; 904 } 905 906 static int xenvif_tx_check_gop(struct xenvif_queue *queue, 907 struct sk_buff *skb, 908 struct gnttab_map_grant_ref **gopp_map, 909 struct gnttab_copy **gopp_copy) 910 { 911 struct gnttab_map_grant_ref *gop_map = *gopp_map; 912 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 913 /* This always points to the shinfo of the skb being checked, which 914 * could be either the first or the one on the frag_list 915 */ 916 struct skb_shared_info *shinfo = skb_shinfo(skb); 917 /* If this is non-NULL, we are currently checking the frag_list skb, and 918 * this points to the shinfo of the first one 919 */ 920 struct skb_shared_info *first_shinfo = NULL; 921 int nr_frags = shinfo->nr_frags; 922 const bool sharedslot = nr_frags && 923 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx; 924 int i, err; 925 926 /* Check status of header. */ 927 err = (*gopp_copy)->status; 928 if (unlikely(err)) { 929 if (net_ratelimit()) 930 netdev_dbg(queue->vif->dev, 931 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", 932 (*gopp_copy)->status, 933 pending_idx, 934 (*gopp_copy)->source.u.ref); 935 /* The first frag might still have this slot mapped */ 936 if (!sharedslot) 937 xenvif_idx_release(queue, pending_idx, 938 XEN_NETIF_RSP_ERROR); 939 } 940 (*gopp_copy)++; 941 942 check_frags: 943 for (i = 0; i < nr_frags; i++, gop_map++) { 944 int j, newerr; 945 946 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 947 948 /* Check error status: if okay then remember grant handle. */ 949 newerr = gop_map->status; 950 951 if (likely(!newerr)) { 952 xenvif_grant_handle_set(queue, 953 pending_idx, 954 gop_map->handle); 955 /* Had a previous error? Invalidate this fragment. */ 956 if (unlikely(err)) { 957 xenvif_idx_unmap(queue, pending_idx); 958 /* If the mapping of the first frag was OK, but 959 * the header's copy failed, and they are 960 * sharing a slot, send an error 961 */ 962 if (i == 0 && sharedslot) 963 xenvif_idx_release(queue, pending_idx, 964 XEN_NETIF_RSP_ERROR); 965 else 966 xenvif_idx_release(queue, pending_idx, 967 XEN_NETIF_RSP_OKAY); 968 } 969 continue; 970 } 971 972 /* Error on this fragment: respond to client with an error. */ 973 if (net_ratelimit()) 974 netdev_dbg(queue->vif->dev, 975 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n", 976 i, 977 gop_map->status, 978 pending_idx, 979 gop_map->ref); 980 981 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 982 983 /* Not the first error? Preceding frags already invalidated. */ 984 if (err) 985 continue; 986 987 /* First error: if the header haven't shared a slot with the 988 * first frag, release it as well. 989 */ 990 if (!sharedslot) 991 xenvif_idx_release(queue, 992 XENVIF_TX_CB(skb)->pending_idx, 993 XEN_NETIF_RSP_OKAY); 994 995 /* Invalidate preceding fragments of this skb. */ 996 for (j = 0; j < i; j++) { 997 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 998 xenvif_idx_unmap(queue, pending_idx); 999 xenvif_idx_release(queue, pending_idx, 1000 XEN_NETIF_RSP_OKAY); 1001 } 1002 1003 /* And if we found the error while checking the frag_list, unmap 1004 * the first skb's frags 1005 */ 1006 if (first_shinfo) { 1007 for (j = 0; j < first_shinfo->nr_frags; j++) { 1008 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]); 1009 xenvif_idx_unmap(queue, pending_idx); 1010 xenvif_idx_release(queue, pending_idx, 1011 XEN_NETIF_RSP_OKAY); 1012 } 1013 } 1014 1015 /* Remember the error: invalidate all subsequent fragments. */ 1016 err = newerr; 1017 } 1018 1019 if (skb_has_frag_list(skb) && !first_shinfo) { 1020 first_shinfo = skb_shinfo(skb); 1021 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list); 1022 nr_frags = shinfo->nr_frags; 1023 1024 goto check_frags; 1025 } 1026 1027 *gopp_map = gop_map; 1028 return err; 1029 } 1030 1031 static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) 1032 { 1033 struct skb_shared_info *shinfo = skb_shinfo(skb); 1034 int nr_frags = shinfo->nr_frags; 1035 int i; 1036 u16 prev_pending_idx = INVALID_PENDING_IDX; 1037 1038 for (i = 0; i < nr_frags; i++) { 1039 skb_frag_t *frag = shinfo->frags + i; 1040 struct xen_netif_tx_request *txp; 1041 struct page *page; 1042 u16 pending_idx; 1043 1044 pending_idx = frag_get_pending_idx(frag); 1045 1046 /* If this is not the first frag, chain it to the previous*/ 1047 if (prev_pending_idx == INVALID_PENDING_IDX) 1048 skb_shinfo(skb)->destructor_arg = 1049 &callback_param(queue, pending_idx); 1050 else 1051 callback_param(queue, prev_pending_idx).ctx = 1052 &callback_param(queue, pending_idx); 1053 1054 callback_param(queue, pending_idx).ctx = NULL; 1055 prev_pending_idx = pending_idx; 1056 1057 txp = &queue->pending_tx_info[pending_idx].req; 1058 page = virt_to_page(idx_to_kaddr(queue, pending_idx)); 1059 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); 1060 skb->len += txp->size; 1061 skb->data_len += txp->size; 1062 skb->truesize += txp->size; 1063 1064 /* Take an extra reference to offset network stack's put_page */ 1065 get_page(queue->mmap_pages[pending_idx]); 1066 } 1067 } 1068 1069 static int xenvif_get_extras(struct xenvif_queue *queue, 1070 struct xen_netif_extra_info *extras, 1071 int work_to_do) 1072 { 1073 struct xen_netif_extra_info extra; 1074 RING_IDX cons = queue->tx.req_cons; 1075 1076 do { 1077 if (unlikely(work_to_do-- <= 0)) { 1078 netdev_err(queue->vif->dev, "Missing extra info\n"); 1079 xenvif_fatal_tx_err(queue->vif); 1080 return -EBADR; 1081 } 1082 1083 memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), 1084 sizeof(extra)); 1085 if (unlikely(!extra.type || 1086 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1087 queue->tx.req_cons = ++cons; 1088 netdev_err(queue->vif->dev, 1089 "Invalid extra type: %d\n", extra.type); 1090 xenvif_fatal_tx_err(queue->vif); 1091 return -EINVAL; 1092 } 1093 1094 memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); 1095 queue->tx.req_cons = ++cons; 1096 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); 1097 1098 return work_to_do; 1099 } 1100 1101 static int xenvif_set_skb_gso(struct xenvif *vif, 1102 struct sk_buff *skb, 1103 struct xen_netif_extra_info *gso) 1104 { 1105 if (!gso->u.gso.size) { 1106 netdev_err(vif->dev, "GSO size must not be zero.\n"); 1107 xenvif_fatal_tx_err(vif); 1108 return -EINVAL; 1109 } 1110 1111 switch (gso->u.gso.type) { 1112 case XEN_NETIF_GSO_TYPE_TCPV4: 1113 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 1114 break; 1115 case XEN_NETIF_GSO_TYPE_TCPV6: 1116 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 1117 break; 1118 default: 1119 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); 1120 xenvif_fatal_tx_err(vif); 1121 return -EINVAL; 1122 } 1123 1124 skb_shinfo(skb)->gso_size = gso->u.gso.size; 1125 /* gso_segs will be calculated later */ 1126 1127 return 0; 1128 } 1129 1130 static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb) 1131 { 1132 bool recalculate_partial_csum = false; 1133 1134 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy 1135 * peers can fail to set NETRXF_csum_blank when sending a GSO 1136 * frame. In this case force the SKB to CHECKSUM_PARTIAL and 1137 * recalculate the partial checksum. 1138 */ 1139 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 1140 queue->stats.rx_gso_checksum_fixup++; 1141 skb->ip_summed = CHECKSUM_PARTIAL; 1142 recalculate_partial_csum = true; 1143 } 1144 1145 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 1146 if (skb->ip_summed != CHECKSUM_PARTIAL) 1147 return 0; 1148 1149 return skb_checksum_setup(skb, recalculate_partial_csum); 1150 } 1151 1152 static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) 1153 { 1154 u64 now = get_jiffies_64(); 1155 u64 next_credit = queue->credit_window_start + 1156 msecs_to_jiffies(queue->credit_usec / 1000); 1157 1158 /* Timer could already be pending in rare cases. */ 1159 if (timer_pending(&queue->credit_timeout)) 1160 return true; 1161 1162 /* Passed the point where we can replenish credit? */ 1163 if (time_after_eq64(now, next_credit)) { 1164 queue->credit_window_start = now; 1165 tx_add_credit(queue); 1166 } 1167 1168 /* Still too big to send right now? Set a callback. */ 1169 if (size > queue->remaining_credit) { 1170 queue->credit_timeout.data = 1171 (unsigned long)queue; 1172 queue->credit_timeout.function = 1173 tx_credit_callback; 1174 mod_timer(&queue->credit_timeout, 1175 next_credit); 1176 queue->credit_window_start = next_credit; 1177 1178 return true; 1179 } 1180 1181 return false; 1182 } 1183 1184 static void xenvif_tx_build_gops(struct xenvif_queue *queue, 1185 int budget, 1186 unsigned *copy_ops, 1187 unsigned *map_ops) 1188 { 1189 struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; 1190 struct sk_buff *skb; 1191 int ret; 1192 1193 while (skb_queue_len(&queue->tx_queue) < budget) { 1194 struct xen_netif_tx_request txreq; 1195 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1196 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; 1197 u16 pending_idx; 1198 RING_IDX idx; 1199 int work_to_do; 1200 unsigned int data_len; 1201 pending_ring_idx_t index; 1202 1203 if (queue->tx.sring->req_prod - queue->tx.req_cons > 1204 XEN_NETIF_TX_RING_SIZE) { 1205 netdev_err(queue->vif->dev, 1206 "Impossible number of requests. " 1207 "req_prod %d, req_cons %d, size %ld\n", 1208 queue->tx.sring->req_prod, queue->tx.req_cons, 1209 XEN_NETIF_TX_RING_SIZE); 1210 xenvif_fatal_tx_err(queue->vif); 1211 break; 1212 } 1213 1214 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); 1215 if (!work_to_do) 1216 break; 1217 1218 idx = queue->tx.req_cons; 1219 rmb(); /* Ensure that we see the request before we copy it. */ 1220 memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); 1221 1222 /* Credit-based scheduling. */ 1223 if (txreq.size > queue->remaining_credit && 1224 tx_credit_exceeded(queue, txreq.size)) 1225 break; 1226 1227 queue->remaining_credit -= txreq.size; 1228 1229 work_to_do--; 1230 queue->tx.req_cons = ++idx; 1231 1232 memset(extras, 0, sizeof(extras)); 1233 if (txreq.flags & XEN_NETTXF_extra_info) { 1234 work_to_do = xenvif_get_extras(queue, extras, 1235 work_to_do); 1236 idx = queue->tx.req_cons; 1237 if (unlikely(work_to_do < 0)) 1238 break; 1239 } 1240 1241 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); 1242 if (unlikely(ret < 0)) 1243 break; 1244 1245 idx += ret; 1246 1247 if (unlikely(txreq.size < ETH_HLEN)) { 1248 netdev_dbg(queue->vif->dev, 1249 "Bad packet size: %d\n", txreq.size); 1250 xenvif_tx_err(queue, &txreq, idx); 1251 break; 1252 } 1253 1254 /* No crossing a page as the payload mustn't fragment. */ 1255 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { 1256 netdev_err(queue->vif->dev, 1257 "txreq.offset: %x, size: %u, end: %lu\n", 1258 txreq.offset, txreq.size, 1259 (txreq.offset&~PAGE_MASK) + txreq.size); 1260 xenvif_fatal_tx_err(queue->vif); 1261 break; 1262 } 1263 1264 index = pending_index(queue->pending_cons); 1265 pending_idx = queue->pending_ring[index]; 1266 1267 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN && 1268 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? 1269 XEN_NETBACK_TX_COPY_LEN : txreq.size; 1270 1271 skb = xenvif_alloc_skb(data_len); 1272 if (unlikely(skb == NULL)) { 1273 netdev_dbg(queue->vif->dev, 1274 "Can't allocate a skb in start_xmit.\n"); 1275 xenvif_tx_err(queue, &txreq, idx); 1276 break; 1277 } 1278 1279 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 1280 struct xen_netif_extra_info *gso; 1281 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1282 1283 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { 1284 /* Failure in xenvif_set_skb_gso is fatal. */ 1285 kfree_skb(skb); 1286 break; 1287 } 1288 } 1289 1290 XENVIF_TX_CB(skb)->pending_idx = pending_idx; 1291 1292 __skb_put(skb, data_len); 1293 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; 1294 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; 1295 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; 1296 1297 queue->tx_copy_ops[*copy_ops].dest.u.gmfn = 1298 virt_to_mfn(skb->data); 1299 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; 1300 queue->tx_copy_ops[*copy_ops].dest.offset = 1301 offset_in_page(skb->data); 1302 1303 queue->tx_copy_ops[*copy_ops].len = data_len; 1304 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; 1305 1306 (*copy_ops)++; 1307 1308 skb_shinfo(skb)->nr_frags = ret; 1309 if (data_len < txreq.size) { 1310 skb_shinfo(skb)->nr_frags++; 1311 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1312 pending_idx); 1313 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); 1314 gop++; 1315 } else { 1316 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1317 INVALID_PENDING_IDX); 1318 memcpy(&queue->pending_tx_info[pending_idx].req, &txreq, 1319 sizeof(txreq)); 1320 } 1321 1322 queue->pending_cons++; 1323 1324 request_gop = xenvif_get_requests(queue, skb, txfrags, gop); 1325 if (request_gop == NULL) { 1326 kfree_skb(skb); 1327 xenvif_tx_err(queue, &txreq, idx); 1328 break; 1329 } 1330 gop = request_gop; 1331 1332 __skb_queue_tail(&queue->tx_queue, skb); 1333 1334 queue->tx.req_cons = idx; 1335 1336 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || 1337 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) 1338 break; 1339 } 1340 1341 (*map_ops) = gop - queue->tx_map_ops; 1342 return; 1343 } 1344 1345 /* Consolidate skb with a frag_list into a brand new one with local pages on 1346 * frags. Returns 0 or -ENOMEM if can't allocate new pages. 1347 */ 1348 static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb) 1349 { 1350 unsigned int offset = skb_headlen(skb); 1351 skb_frag_t frags[MAX_SKB_FRAGS]; 1352 int i, f; 1353 struct ubuf_info *uarg; 1354 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; 1355 1356 queue->stats.tx_zerocopy_sent += 2; 1357 queue->stats.tx_frag_overflow++; 1358 1359 xenvif_fill_frags(queue, nskb); 1360 /* Subtract frags size, we will correct it later */ 1361 skb->truesize -= skb->data_len; 1362 skb->len += nskb->len; 1363 skb->data_len += nskb->len; 1364 1365 /* create a brand new frags array and coalesce there */ 1366 for (i = 0; offset < skb->len; i++) { 1367 struct page *page; 1368 unsigned int len; 1369 1370 BUG_ON(i >= MAX_SKB_FRAGS); 1371 page = alloc_page(GFP_ATOMIC); 1372 if (!page) { 1373 int j; 1374 skb->truesize += skb->data_len; 1375 for (j = 0; j < i; j++) 1376 put_page(frags[j].page.p); 1377 return -ENOMEM; 1378 } 1379 1380 if (offset + PAGE_SIZE < skb->len) 1381 len = PAGE_SIZE; 1382 else 1383 len = skb->len - offset; 1384 if (skb_copy_bits(skb, offset, page_address(page), len)) 1385 BUG(); 1386 1387 offset += len; 1388 frags[i].page.p = page; 1389 frags[i].page_offset = 0; 1390 skb_frag_size_set(&frags[i], len); 1391 } 1392 1393 /* Copied all the bits from the frag list -- free it. */ 1394 skb_frag_list_init(skb); 1395 xenvif_skb_zerocopy_prepare(queue, nskb); 1396 kfree_skb(nskb); 1397 1398 /* Release all the original (foreign) frags. */ 1399 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1400 skb_frag_unref(skb, f); 1401 uarg = skb_shinfo(skb)->destructor_arg; 1402 /* increase inflight counter to offset decrement in callback */ 1403 atomic_inc(&queue->inflight_packets); 1404 uarg->callback(uarg, true); 1405 skb_shinfo(skb)->destructor_arg = NULL; 1406 1407 /* Fill the skb with the new (local) frags. */ 1408 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); 1409 skb_shinfo(skb)->nr_frags = i; 1410 skb->truesize += i * PAGE_SIZE; 1411 1412 return 0; 1413 } 1414 1415 static int xenvif_tx_submit(struct xenvif_queue *queue) 1416 { 1417 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; 1418 struct gnttab_copy *gop_copy = queue->tx_copy_ops; 1419 struct sk_buff *skb; 1420 int work_done = 0; 1421 1422 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { 1423 struct xen_netif_tx_request *txp; 1424 u16 pending_idx; 1425 unsigned data_len; 1426 1427 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 1428 txp = &queue->pending_tx_info[pending_idx].req; 1429 1430 /* Check the remap error code. */ 1431 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { 1432 /* If there was an error, xenvif_tx_check_gop is 1433 * expected to release all the frags which were mapped, 1434 * so kfree_skb shouldn't do it again 1435 */ 1436 skb_shinfo(skb)->nr_frags = 0; 1437 if (skb_has_frag_list(skb)) { 1438 struct sk_buff *nskb = 1439 skb_shinfo(skb)->frag_list; 1440 skb_shinfo(nskb)->nr_frags = 0; 1441 } 1442 kfree_skb(skb); 1443 continue; 1444 } 1445 1446 data_len = skb->len; 1447 callback_param(queue, pending_idx).ctx = NULL; 1448 if (data_len < txp->size) { 1449 /* Append the packet payload as a fragment. */ 1450 txp->offset += data_len; 1451 txp->size -= data_len; 1452 } else { 1453 /* Schedule a response immediately. */ 1454 xenvif_idx_release(queue, pending_idx, 1455 XEN_NETIF_RSP_OKAY); 1456 } 1457 1458 if (txp->flags & XEN_NETTXF_csum_blank) 1459 skb->ip_summed = CHECKSUM_PARTIAL; 1460 else if (txp->flags & XEN_NETTXF_data_validated) 1461 skb->ip_summed = CHECKSUM_UNNECESSARY; 1462 1463 xenvif_fill_frags(queue, skb); 1464 1465 if (unlikely(skb_has_frag_list(skb))) { 1466 if (xenvif_handle_frag_list(queue, skb)) { 1467 if (net_ratelimit()) 1468 netdev_err(queue->vif->dev, 1469 "Not enough memory to consolidate frag_list!\n"); 1470 xenvif_skb_zerocopy_prepare(queue, skb); 1471 kfree_skb(skb); 1472 continue; 1473 } 1474 } 1475 1476 skb->dev = queue->vif->dev; 1477 skb->protocol = eth_type_trans(skb, skb->dev); 1478 skb_reset_network_header(skb); 1479 1480 if (checksum_setup(queue, skb)) { 1481 netdev_dbg(queue->vif->dev, 1482 "Can't setup checksum in net_tx_action\n"); 1483 /* We have to set this flag to trigger the callback */ 1484 if (skb_shinfo(skb)->destructor_arg) 1485 xenvif_skb_zerocopy_prepare(queue, skb); 1486 kfree_skb(skb); 1487 continue; 1488 } 1489 1490 skb_probe_transport_header(skb, 0); 1491 1492 /* If the packet is GSO then we will have just set up the 1493 * transport header offset in checksum_setup so it's now 1494 * straightforward to calculate gso_segs. 1495 */ 1496 if (skb_is_gso(skb)) { 1497 int mss = skb_shinfo(skb)->gso_size; 1498 int hdrlen = skb_transport_header(skb) - 1499 skb_mac_header(skb) + 1500 tcp_hdrlen(skb); 1501 1502 skb_shinfo(skb)->gso_segs = 1503 DIV_ROUND_UP(skb->len - hdrlen, mss); 1504 } 1505 1506 queue->stats.rx_bytes += skb->len; 1507 queue->stats.rx_packets++; 1508 1509 work_done++; 1510 1511 /* Set this flag right before netif_receive_skb, otherwise 1512 * someone might think this packet already left netback, and 1513 * do a skb_copy_ubufs while we are still in control of the 1514 * skb. E.g. the __pskb_pull_tail earlier can do such thing. 1515 */ 1516 if (skb_shinfo(skb)->destructor_arg) { 1517 xenvif_skb_zerocopy_prepare(queue, skb); 1518 queue->stats.tx_zerocopy_sent++; 1519 } 1520 1521 netif_receive_skb(skb); 1522 } 1523 1524 return work_done; 1525 } 1526 1527 void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) 1528 { 1529 unsigned long flags; 1530 pending_ring_idx_t index; 1531 struct xenvif_queue *queue = ubuf_to_queue(ubuf); 1532 1533 /* This is the only place where we grab this lock, to protect callbacks 1534 * from each other. 1535 */ 1536 spin_lock_irqsave(&queue->callback_lock, flags); 1537 do { 1538 u16 pending_idx = ubuf->desc; 1539 ubuf = (struct ubuf_info *) ubuf->ctx; 1540 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= 1541 MAX_PENDING_REQS); 1542 index = pending_index(queue->dealloc_prod); 1543 queue->dealloc_ring[index] = pending_idx; 1544 /* Sync with xenvif_tx_dealloc_action: 1545 * insert idx then incr producer. 1546 */ 1547 smp_wmb(); 1548 queue->dealloc_prod++; 1549 } while (ubuf); 1550 wake_up(&queue->dealloc_wq); 1551 spin_unlock_irqrestore(&queue->callback_lock, flags); 1552 1553 if (likely(zerocopy_success)) 1554 queue->stats.tx_zerocopy_success++; 1555 else 1556 queue->stats.tx_zerocopy_fail++; 1557 xenvif_skb_zerocopy_complete(queue); 1558 } 1559 1560 static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) 1561 { 1562 struct gnttab_unmap_grant_ref *gop; 1563 pending_ring_idx_t dc, dp; 1564 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; 1565 unsigned int i = 0; 1566 1567 dc = queue->dealloc_cons; 1568 gop = queue->tx_unmap_ops; 1569 1570 /* Free up any grants we have finished using */ 1571 do { 1572 dp = queue->dealloc_prod; 1573 1574 /* Ensure we see all indices enqueued by all 1575 * xenvif_zerocopy_callback(). 1576 */ 1577 smp_rmb(); 1578 1579 while (dc != dp) { 1580 BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); 1581 pending_idx = 1582 queue->dealloc_ring[pending_index(dc++)]; 1583 1584 pending_idx_release[gop-queue->tx_unmap_ops] = 1585 pending_idx; 1586 queue->pages_to_unmap[gop-queue->tx_unmap_ops] = 1587 queue->mmap_pages[pending_idx]; 1588 gnttab_set_unmap_op(gop, 1589 idx_to_kaddr(queue, pending_idx), 1590 GNTMAP_host_map, 1591 queue->grant_tx_handle[pending_idx]); 1592 xenvif_grant_handle_reset(queue, pending_idx); 1593 ++gop; 1594 } 1595 1596 } while (dp != queue->dealloc_prod); 1597 1598 queue->dealloc_cons = dc; 1599 1600 if (gop - queue->tx_unmap_ops > 0) { 1601 int ret; 1602 ret = gnttab_unmap_refs(queue->tx_unmap_ops, 1603 NULL, 1604 queue->pages_to_unmap, 1605 gop - queue->tx_unmap_ops); 1606 if (ret) { 1607 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n", 1608 gop - queue->tx_unmap_ops, ret); 1609 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { 1610 if (gop[i].status != GNTST_okay) 1611 netdev_err(queue->vif->dev, 1612 " host_addr: %llx handle: %x status: %d\n", 1613 gop[i].host_addr, 1614 gop[i].handle, 1615 gop[i].status); 1616 } 1617 BUG(); 1618 } 1619 } 1620 1621 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) 1622 xenvif_idx_release(queue, pending_idx_release[i], 1623 XEN_NETIF_RSP_OKAY); 1624 } 1625 1626 1627 /* Called after netfront has transmitted */ 1628 int xenvif_tx_action(struct xenvif_queue *queue, int budget) 1629 { 1630 unsigned nr_mops, nr_cops = 0; 1631 int work_done, ret; 1632 1633 if (unlikely(!tx_work_todo(queue))) 1634 return 0; 1635 1636 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops); 1637 1638 if (nr_cops == 0) 1639 return 0; 1640 1641 gnttab_batch_copy(queue->tx_copy_ops, nr_cops); 1642 if (nr_mops != 0) { 1643 ret = gnttab_map_refs(queue->tx_map_ops, 1644 NULL, 1645 queue->pages_to_map, 1646 nr_mops); 1647 BUG_ON(ret); 1648 } 1649 1650 work_done = xenvif_tx_submit(queue); 1651 1652 return work_done; 1653 } 1654 1655 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, 1656 u8 status) 1657 { 1658 struct pending_tx_info *pending_tx_info; 1659 pending_ring_idx_t index; 1660 int notify; 1661 unsigned long flags; 1662 1663 pending_tx_info = &queue->pending_tx_info[pending_idx]; 1664 1665 spin_lock_irqsave(&queue->response_lock, flags); 1666 1667 make_tx_response(queue, &pending_tx_info->req, status); 1668 1669 /* Release the pending index before pusing the Tx response so 1670 * its available before a new Tx request is pushed by the 1671 * frontend. 1672 */ 1673 index = pending_index(queue->pending_prod++); 1674 queue->pending_ring[index] = pending_idx; 1675 1676 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); 1677 1678 spin_unlock_irqrestore(&queue->response_lock, flags); 1679 1680 if (notify) 1681 notify_remote_via_irq(queue->tx_irq); 1682 } 1683 1684 1685 static void make_tx_response(struct xenvif_queue *queue, 1686 struct xen_netif_tx_request *txp, 1687 s8 st) 1688 { 1689 RING_IDX i = queue->tx.rsp_prod_pvt; 1690 struct xen_netif_tx_response *resp; 1691 1692 resp = RING_GET_RESPONSE(&queue->tx, i); 1693 resp->id = txp->id; 1694 resp->status = st; 1695 1696 if (txp->flags & XEN_NETTXF_extra_info) 1697 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; 1698 1699 queue->tx.rsp_prod_pvt = ++i; 1700 } 1701 1702 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, 1703 u16 id, 1704 s8 st, 1705 u16 offset, 1706 u16 size, 1707 u16 flags) 1708 { 1709 RING_IDX i = queue->rx.rsp_prod_pvt; 1710 struct xen_netif_rx_response *resp; 1711 1712 resp = RING_GET_RESPONSE(&queue->rx, i); 1713 resp->offset = offset; 1714 resp->flags = flags; 1715 resp->id = id; 1716 resp->status = (s16)size; 1717 if (st < 0) 1718 resp->status = (s16)st; 1719 1720 queue->rx.rsp_prod_pvt = ++i; 1721 1722 return resp; 1723 } 1724 1725 void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) 1726 { 1727 int ret; 1728 struct gnttab_unmap_grant_ref tx_unmap_op; 1729 1730 gnttab_set_unmap_op(&tx_unmap_op, 1731 idx_to_kaddr(queue, pending_idx), 1732 GNTMAP_host_map, 1733 queue->grant_tx_handle[pending_idx]); 1734 xenvif_grant_handle_reset(queue, pending_idx); 1735 1736 ret = gnttab_unmap_refs(&tx_unmap_op, NULL, 1737 &queue->mmap_pages[pending_idx], 1); 1738 if (ret) { 1739 netdev_err(queue->vif->dev, 1740 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n", 1741 ret, 1742 pending_idx, 1743 tx_unmap_op.host_addr, 1744 tx_unmap_op.handle, 1745 tx_unmap_op.status); 1746 BUG(); 1747 } 1748 } 1749 1750 static inline int tx_work_todo(struct xenvif_queue *queue) 1751 { 1752 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) 1753 return 1; 1754 1755 return 0; 1756 } 1757 1758 static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue) 1759 { 1760 return queue->dealloc_cons != queue->dealloc_prod; 1761 } 1762 1763 void xenvif_unmap_frontend_rings(struct xenvif_queue *queue) 1764 { 1765 if (queue->tx.sring) 1766 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), 1767 queue->tx.sring); 1768 if (queue->rx.sring) 1769 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), 1770 queue->rx.sring); 1771 } 1772 1773 int xenvif_map_frontend_rings(struct xenvif_queue *queue, 1774 grant_ref_t tx_ring_ref, 1775 grant_ref_t rx_ring_ref) 1776 { 1777 void *addr; 1778 struct xen_netif_tx_sring *txs; 1779 struct xen_netif_rx_sring *rxs; 1780 1781 int err = -ENOMEM; 1782 1783 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), 1784 tx_ring_ref, &addr); 1785 if (err) 1786 goto err; 1787 1788 txs = (struct xen_netif_tx_sring *)addr; 1789 BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE); 1790 1791 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), 1792 rx_ring_ref, &addr); 1793 if (err) 1794 goto err; 1795 1796 rxs = (struct xen_netif_rx_sring *)addr; 1797 BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE); 1798 1799 return 0; 1800 1801 err: 1802 xenvif_unmap_frontend_rings(queue); 1803 return err; 1804 } 1805 1806 static void xenvif_queue_carrier_off(struct xenvif_queue *queue) 1807 { 1808 struct xenvif *vif = queue->vif; 1809 1810 queue->stalled = true; 1811 1812 /* At least one queue has stalled? Disable the carrier. */ 1813 spin_lock(&vif->lock); 1814 if (vif->stalled_queues++ == 0) { 1815 netdev_info(vif->dev, "Guest Rx stalled"); 1816 netif_carrier_off(vif->dev); 1817 } 1818 spin_unlock(&vif->lock); 1819 } 1820 1821 static void xenvif_queue_carrier_on(struct xenvif_queue *queue) 1822 { 1823 struct xenvif *vif = queue->vif; 1824 1825 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ 1826 queue->stalled = false; 1827 1828 /* All queues are ready? Enable the carrier. */ 1829 spin_lock(&vif->lock); 1830 if (--vif->stalled_queues == 0) { 1831 netdev_info(vif->dev, "Guest Rx ready"); 1832 netif_carrier_on(vif->dev); 1833 } 1834 spin_unlock(&vif->lock); 1835 } 1836 1837 static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) 1838 { 1839 RING_IDX prod, cons; 1840 1841 prod = queue->rx.sring->req_prod; 1842 cons = queue->rx.req_cons; 1843 1844 return !queue->stalled 1845 && prod - cons < XEN_NETBK_RX_SLOTS_MAX 1846 && time_after(jiffies, 1847 queue->last_rx_time + queue->vif->stall_timeout); 1848 } 1849 1850 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) 1851 { 1852 RING_IDX prod, cons; 1853 1854 prod = queue->rx.sring->req_prod; 1855 cons = queue->rx.req_cons; 1856 1857 return queue->stalled 1858 && prod - cons >= XEN_NETBK_RX_SLOTS_MAX; 1859 } 1860 1861 static bool xenvif_have_rx_work(struct xenvif_queue *queue) 1862 { 1863 return (!skb_queue_empty(&queue->rx_queue) 1864 && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)) 1865 || (queue->vif->stall_timeout && 1866 (xenvif_rx_queue_stalled(queue) 1867 || xenvif_rx_queue_ready(queue))) 1868 || kthread_should_stop() 1869 || queue->vif->disabled; 1870 } 1871 1872 static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) 1873 { 1874 struct sk_buff *skb; 1875 long timeout; 1876 1877 skb = skb_peek(&queue->rx_queue); 1878 if (!skb) 1879 return MAX_SCHEDULE_TIMEOUT; 1880 1881 timeout = XENVIF_RX_CB(skb)->expires - jiffies; 1882 return timeout < 0 ? 0 : timeout; 1883 } 1884 1885 /* Wait until the guest Rx thread has work. 1886 * 1887 * The timeout needs to be adjusted based on the current head of the 1888 * queue (and not just the head at the beginning). In particular, if 1889 * the queue is initially empty an infinite timeout is used and this 1890 * needs to be reduced when a skb is queued. 1891 * 1892 * This cannot be done with wait_event_timeout() because it only 1893 * calculates the timeout once. 1894 */ 1895 static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) 1896 { 1897 DEFINE_WAIT(wait); 1898 1899 if (xenvif_have_rx_work(queue)) 1900 return; 1901 1902 for (;;) { 1903 long ret; 1904 1905 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); 1906 if (xenvif_have_rx_work(queue)) 1907 break; 1908 ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); 1909 if (!ret) 1910 break; 1911 } 1912 finish_wait(&queue->wq, &wait); 1913 } 1914 1915 int xenvif_kthread_guest_rx(void *data) 1916 { 1917 struct xenvif_queue *queue = data; 1918 struct xenvif *vif = queue->vif; 1919 1920 if (!vif->stall_timeout) 1921 xenvif_queue_carrier_on(queue); 1922 1923 for (;;) { 1924 xenvif_wait_for_rx_work(queue); 1925 1926 if (kthread_should_stop()) 1927 break; 1928 1929 /* This frontend is found to be rogue, disable it in 1930 * kthread context. Currently this is only set when 1931 * netback finds out frontend sends malformed packet, 1932 * but we cannot disable the interface in softirq 1933 * context so we defer it here, if this thread is 1934 * associated with queue 0. 1935 */ 1936 if (unlikely(vif->disabled && queue->id == 0)) { 1937 xenvif_carrier_off(vif); 1938 break; 1939 } 1940 1941 if (!skb_queue_empty(&queue->rx_queue)) 1942 xenvif_rx_action(queue); 1943 1944 /* If the guest hasn't provided any Rx slots for a 1945 * while it's probably not responsive, drop the 1946 * carrier so packets are dropped earlier. 1947 */ 1948 if (vif->stall_timeout) { 1949 if (xenvif_rx_queue_stalled(queue)) 1950 xenvif_queue_carrier_off(queue); 1951 else if (xenvif_rx_queue_ready(queue)) 1952 xenvif_queue_carrier_on(queue); 1953 } 1954 1955 /* Queued packets may have foreign pages from other 1956 * domains. These cannot be queued indefinitely as 1957 * this would starve guests of grant refs and transmit 1958 * slots. 1959 */ 1960 xenvif_rx_queue_drop_expired(queue); 1961 1962 xenvif_rx_queue_maybe_wake(queue); 1963 1964 cond_resched(); 1965 } 1966 1967 /* Bin any remaining skbs */ 1968 xenvif_rx_queue_purge(queue); 1969 1970 return 0; 1971 } 1972 1973 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue) 1974 { 1975 /* Dealloc thread must remain running until all inflight 1976 * packets complete. 1977 */ 1978 return kthread_should_stop() && 1979 !atomic_read(&queue->inflight_packets); 1980 } 1981 1982 int xenvif_dealloc_kthread(void *data) 1983 { 1984 struct xenvif_queue *queue = data; 1985 1986 for (;;) { 1987 wait_event_interruptible(queue->dealloc_wq, 1988 tx_dealloc_work_todo(queue) || 1989 xenvif_dealloc_kthread_should_stop(queue)); 1990 if (xenvif_dealloc_kthread_should_stop(queue)) 1991 break; 1992 1993 xenvif_tx_dealloc_action(queue); 1994 cond_resched(); 1995 } 1996 1997 /* Unmap anything remaining*/ 1998 if (tx_dealloc_work_todo(queue)) 1999 xenvif_tx_dealloc_action(queue); 2000 2001 return 0; 2002 } 2003 2004 static int __init netback_init(void) 2005 { 2006 int rc = 0; 2007 2008 if (!xen_domain()) 2009 return -ENODEV; 2010 2011 /* Allow as many queues as there are CPUs, by default */ 2012 xenvif_max_queues = num_online_cpus(); 2013 2014 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { 2015 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", 2016 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); 2017 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; 2018 } 2019 2020 rc = xenvif_xenbus_init(); 2021 if (rc) 2022 goto failed_init; 2023 2024 #ifdef CONFIG_DEBUG_FS 2025 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); 2026 if (IS_ERR_OR_NULL(xen_netback_dbg_root)) 2027 pr_warn("Init of debugfs returned %ld!\n", 2028 PTR_ERR(xen_netback_dbg_root)); 2029 #endif /* CONFIG_DEBUG_FS */ 2030 2031 return 0; 2032 2033 failed_init: 2034 return rc; 2035 } 2036 2037 module_init(netback_init); 2038 2039 static void __exit netback_fini(void) 2040 { 2041 #ifdef CONFIG_DEBUG_FS 2042 if (!IS_ERR_OR_NULL(xen_netback_dbg_root)) 2043 debugfs_remove_recursive(xen_netback_dbg_root); 2044 #endif /* CONFIG_DEBUG_FS */ 2045 xenvif_xenbus_fini(); 2046 } 2047 module_exit(netback_fini); 2048 2049 MODULE_LICENSE("Dual BSD/GPL"); 2050 MODULE_ALIAS("xen-backend:vif"); 2051