1 /* 2 * Back-end of the driver for virtual network devices. This portion of the 3 * driver exports a 'unified' network-device interface that can be accessed 4 * by any operating system that implements a compatible front end. A 5 * reference front-end implementation can be found in: 6 * drivers/net/xen-netfront.c 7 * 8 * Copyright (c) 2002-2005, K A Fraser 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License version 2 12 * as published by the Free Software Foundation; or, when distributed 13 * separately from the Linux kernel or incorporated into other 14 * software packages, subject to the following license: 15 * 16 * Permission is hereby granted, free of charge, to any person obtaining a copy 17 * of this source file (the "Software"), to deal in the Software without 18 * restriction, including without limitation the rights to use, copy, modify, 19 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 20 * and to permit persons to whom the Software is furnished to do so, subject to 21 * the following conditions: 22 * 23 * The above copyright notice and this permission notice shall be included in 24 * all copies or substantial portions of the Software. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 32 * IN THE SOFTWARE. 33 */ 34 35 #include "common.h" 36 37 #include <linux/kthread.h> 38 #include <linux/if_vlan.h> 39 #include <linux/udp.h> 40 #include <linux/highmem.h> 41 42 #include <net/tcp.h> 43 44 #include <xen/xen.h> 45 #include <xen/events.h> 46 #include <xen/interface/memory.h> 47 #include <xen/page.h> 48 49 #include <asm/xen/hypercall.h> 50 51 /* Provide an option to disable split event channels at load time as 52 * event channels are limited resource. Split event channels are 53 * enabled by default. 54 */ 55 bool separate_tx_rx_irq = true; 56 module_param(separate_tx_rx_irq, bool, 0644); 57 58 /* The time that packets can stay on the guest Rx internal queue 59 * before they are dropped. 60 */ 61 unsigned int rx_drain_timeout_msecs = 10000; 62 module_param(rx_drain_timeout_msecs, uint, 0444); 63 64 /* The length of time before the frontend is considered unresponsive 65 * because it isn't providing Rx slots. 66 */ 67 unsigned int rx_stall_timeout_msecs = 60000; 68 module_param(rx_stall_timeout_msecs, uint, 0444); 69 70 #define MAX_QUEUES_DEFAULT 8 71 unsigned int xenvif_max_queues; 72 module_param_named(max_queues, xenvif_max_queues, uint, 0644); 73 MODULE_PARM_DESC(max_queues, 74 "Maximum number of queues per virtual interface"); 75 76 /* 77 * This is the maximum slots a skb can have. If a guest sends a skb 78 * which exceeds this limit it is considered malicious. 79 */ 80 #define FATAL_SKB_SLOTS_DEFAULT 20 81 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; 82 module_param(fatal_skb_slots, uint, 0444); 83 84 /* The amount to copy out of the first guest Tx slot into the skb's 85 * linear area. If the first slot has more data, it will be mapped 86 * and put into the first frag. 87 * 88 * This is sized to avoid pulling headers from the frags for most 89 * TCP/IP packets. 90 */ 91 #define XEN_NETBACK_TX_COPY_LEN 128 92 93 /* This is the maximum number of flows in the hash cache. */ 94 #define XENVIF_HASH_CACHE_SIZE_DEFAULT 64 95 unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT; 96 module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644); 97 MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache"); 98 99 /* The module parameter tells that we have to put data 100 * for xen-netfront with the XDP_PACKET_HEADROOM offset 101 * needed for XDP processing 102 */ 103 bool provides_xdp_headroom = true; 104 module_param(provides_xdp_headroom, bool, 0644); 105 106 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, 107 u8 status); 108 109 static void make_tx_response(struct xenvif_queue *queue, 110 struct xen_netif_tx_request *txp, 111 unsigned int extra_count, 112 s8 st); 113 static void push_tx_responses(struct xenvif_queue *queue); 114 115 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); 116 117 static inline int tx_work_todo(struct xenvif_queue *queue); 118 119 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, 120 u16 idx) 121 { 122 return page_to_pfn(queue->mmap_pages[idx]); 123 } 124 125 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, 126 u16 idx) 127 { 128 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); 129 } 130 131 #define callback_param(vif, pending_idx) \ 132 (vif->pending_tx_info[pending_idx].callback_struct) 133 134 /* Find the containing VIF's structure from a pointer in pending_tx_info array 135 */ 136 static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info_msgzc *ubuf) 137 { 138 u16 pending_idx = ubuf->desc; 139 struct pending_tx_info *temp = 140 container_of(ubuf, struct pending_tx_info, callback_struct); 141 return container_of(temp - pending_idx, 142 struct xenvif_queue, 143 pending_tx_info[0]); 144 } 145 146 static u16 frag_get_pending_idx(skb_frag_t *frag) 147 { 148 return (u16)skb_frag_off(frag); 149 } 150 151 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx) 152 { 153 skb_frag_off_set(frag, pending_idx); 154 } 155 156 static inline pending_ring_idx_t pending_index(unsigned i) 157 { 158 return i & (MAX_PENDING_REQS-1); 159 } 160 161 void xenvif_kick_thread(struct xenvif_queue *queue) 162 { 163 wake_up(&queue->wq); 164 } 165 166 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) 167 { 168 int more_to_do; 169 170 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); 171 172 if (more_to_do) 173 napi_schedule(&queue->napi); 174 else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI, 175 &queue->eoi_pending) & 176 (NETBK_TX_EOI | NETBK_COMMON_EOI)) 177 xen_irq_lateeoi(queue->tx_irq, 0); 178 } 179 180 static void tx_add_credit(struct xenvif_queue *queue) 181 { 182 unsigned long max_burst, max_credit; 183 184 /* 185 * Allow a burst big enough to transmit a jumbo packet of up to 128kB. 186 * Otherwise the interface can seize up due to insufficient credit. 187 */ 188 max_burst = max(131072UL, queue->credit_bytes); 189 190 /* Take care that adding a new chunk of credit doesn't wrap to zero. */ 191 max_credit = queue->remaining_credit + queue->credit_bytes; 192 if (max_credit < queue->remaining_credit) 193 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ 194 195 queue->remaining_credit = min(max_credit, max_burst); 196 queue->rate_limited = false; 197 } 198 199 void xenvif_tx_credit_callback(struct timer_list *t) 200 { 201 struct xenvif_queue *queue = from_timer(queue, t, credit_timeout); 202 tx_add_credit(queue); 203 xenvif_napi_schedule_or_enable_events(queue); 204 } 205 206 static void xenvif_tx_err(struct xenvif_queue *queue, 207 struct xen_netif_tx_request *txp, 208 unsigned int extra_count, RING_IDX end) 209 { 210 RING_IDX cons = queue->tx.req_cons; 211 unsigned long flags; 212 213 do { 214 spin_lock_irqsave(&queue->response_lock, flags); 215 make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR); 216 push_tx_responses(queue); 217 spin_unlock_irqrestore(&queue->response_lock, flags); 218 if (cons == end) 219 break; 220 RING_COPY_REQUEST(&queue->tx, cons++, txp); 221 extra_count = 0; /* only the first frag can have extras */ 222 } while (1); 223 queue->tx.req_cons = cons; 224 } 225 226 static void xenvif_fatal_tx_err(struct xenvif *vif) 227 { 228 netdev_err(vif->dev, "fatal error; disabling device\n"); 229 vif->disabled = true; 230 /* Disable the vif from queue 0's kthread */ 231 if (vif->num_queues) 232 xenvif_kick_thread(&vif->queues[0]); 233 } 234 235 static int xenvif_count_requests(struct xenvif_queue *queue, 236 struct xen_netif_tx_request *first, 237 unsigned int extra_count, 238 struct xen_netif_tx_request *txp, 239 int work_to_do) 240 { 241 RING_IDX cons = queue->tx.req_cons; 242 int slots = 0; 243 int drop_err = 0; 244 int more_data; 245 246 if (!(first->flags & XEN_NETTXF_more_data)) 247 return 0; 248 249 do { 250 struct xen_netif_tx_request dropped_tx = { 0 }; 251 252 if (slots >= work_to_do) { 253 netdev_err(queue->vif->dev, 254 "Asked for %d slots but exceeds this limit\n", 255 work_to_do); 256 xenvif_fatal_tx_err(queue->vif); 257 return -ENODATA; 258 } 259 260 /* This guest is really using too many slots and 261 * considered malicious. 262 */ 263 if (unlikely(slots >= fatal_skb_slots)) { 264 netdev_err(queue->vif->dev, 265 "Malicious frontend using %d slots, threshold %u\n", 266 slots, fatal_skb_slots); 267 xenvif_fatal_tx_err(queue->vif); 268 return -E2BIG; 269 } 270 271 /* Xen network protocol had implicit dependency on 272 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to 273 * the historical MAX_SKB_FRAGS value 18 to honor the 274 * same behavior as before. Any packet using more than 275 * 18 slots but less than fatal_skb_slots slots is 276 * dropped 277 */ 278 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { 279 if (net_ratelimit()) 280 netdev_dbg(queue->vif->dev, 281 "Too many slots (%d) exceeding limit (%d), dropping packet\n", 282 slots, XEN_NETBK_LEGACY_SLOTS_MAX); 283 drop_err = -E2BIG; 284 } 285 286 if (drop_err) 287 txp = &dropped_tx; 288 289 RING_COPY_REQUEST(&queue->tx, cons + slots, txp); 290 291 /* If the guest submitted a frame >= 64 KiB then 292 * first->size overflowed and following slots will 293 * appear to be larger than the frame. 294 * 295 * This cannot be fatal error as there are buggy 296 * frontends that do this. 297 * 298 * Consume all slots and drop the packet. 299 */ 300 if (!drop_err && txp->size > first->size) { 301 if (net_ratelimit()) 302 netdev_dbg(queue->vif->dev, 303 "Invalid tx request, slot size %u > remaining size %u\n", 304 txp->size, first->size); 305 drop_err = -EIO; 306 } 307 308 first->size -= txp->size; 309 slots++; 310 311 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) { 312 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n", 313 txp->offset, txp->size); 314 xenvif_fatal_tx_err(queue->vif); 315 return -EINVAL; 316 } 317 318 more_data = txp->flags & XEN_NETTXF_more_data; 319 320 if (!drop_err) 321 txp++; 322 323 } while (more_data); 324 325 if (drop_err) { 326 xenvif_tx_err(queue, first, extra_count, cons + slots); 327 return drop_err; 328 } 329 330 return slots; 331 } 332 333 334 struct xenvif_tx_cb { 335 u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1]; 336 u8 copy_count; 337 u32 split_mask; 338 }; 339 340 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) 341 #define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i]) 342 #define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count) 343 344 static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, 345 u16 pending_idx, 346 struct xen_netif_tx_request *txp, 347 unsigned int extra_count, 348 struct gnttab_map_grant_ref *mop) 349 { 350 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; 351 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx), 352 GNTMAP_host_map | GNTMAP_readonly, 353 txp->gref, queue->vif->domid); 354 355 memcpy(&queue->pending_tx_info[pending_idx].req, txp, 356 sizeof(*txp)); 357 queue->pending_tx_info[pending_idx].extra_count = extra_count; 358 } 359 360 static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) 361 { 362 struct sk_buff *skb = 363 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN, 364 GFP_ATOMIC | __GFP_NOWARN); 365 366 BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb)); 367 if (unlikely(skb == NULL)) 368 return NULL; 369 370 /* Packets passed to netif_rx() must have some headroom. */ 371 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 372 373 /* Initialize it here to avoid later surprises */ 374 skb_shinfo(skb)->destructor_arg = NULL; 375 376 return skb; 377 } 378 379 static void xenvif_get_requests(struct xenvif_queue *queue, 380 struct sk_buff *skb, 381 struct xen_netif_tx_request *first, 382 struct xen_netif_tx_request *txfrags, 383 unsigned *copy_ops, 384 unsigned *map_ops, 385 unsigned int frag_overflow, 386 struct sk_buff *nskb, 387 unsigned int extra_count, 388 unsigned int data_len) 389 { 390 struct skb_shared_info *shinfo = skb_shinfo(skb); 391 skb_frag_t *frags = shinfo->frags; 392 u16 pending_idx; 393 pending_ring_idx_t index; 394 unsigned int nr_slots; 395 struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops; 396 struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; 397 struct xen_netif_tx_request *txp = first; 398 399 nr_slots = shinfo->nr_frags + frag_overflow + 1; 400 401 copy_count(skb) = 0; 402 XENVIF_TX_CB(skb)->split_mask = 0; 403 404 /* Create copy ops for exactly data_len bytes into the skb head. */ 405 __skb_put(skb, data_len); 406 while (data_len > 0) { 407 int amount = data_len > txp->size ? txp->size : data_len; 408 bool split = false; 409 410 cop->source.u.ref = txp->gref; 411 cop->source.domid = queue->vif->domid; 412 cop->source.offset = txp->offset; 413 414 cop->dest.domid = DOMID_SELF; 415 cop->dest.offset = (offset_in_page(skb->data + 416 skb_headlen(skb) - 417 data_len)) & ~XEN_PAGE_MASK; 418 cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) 419 - data_len); 420 421 /* Don't cross local page boundary! */ 422 if (cop->dest.offset + amount > XEN_PAGE_SIZE) { 423 amount = XEN_PAGE_SIZE - cop->dest.offset; 424 XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb); 425 split = true; 426 } 427 428 cop->len = amount; 429 cop->flags = GNTCOPY_source_gref; 430 431 index = pending_index(queue->pending_cons); 432 pending_idx = queue->pending_ring[index]; 433 callback_param(queue, pending_idx).ctx = NULL; 434 copy_pending_idx(skb, copy_count(skb)) = pending_idx; 435 if (!split) 436 copy_count(skb)++; 437 438 cop++; 439 data_len -= amount; 440 441 if (amount == txp->size) { 442 /* The copy op covered the full tx_request */ 443 444 memcpy(&queue->pending_tx_info[pending_idx].req, 445 txp, sizeof(*txp)); 446 queue->pending_tx_info[pending_idx].extra_count = 447 (txp == first) ? extra_count : 0; 448 449 if (txp == first) 450 txp = txfrags; 451 else 452 txp++; 453 queue->pending_cons++; 454 nr_slots--; 455 } else { 456 /* The copy op partially covered the tx_request. 457 * The remainder will be mapped or copied in the next 458 * iteration. 459 */ 460 txp->offset += amount; 461 txp->size -= amount; 462 } 463 } 464 465 for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; 466 nr_slots--) { 467 if (unlikely(!txp->size)) { 468 unsigned long flags; 469 470 spin_lock_irqsave(&queue->response_lock, flags); 471 make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY); 472 push_tx_responses(queue); 473 spin_unlock_irqrestore(&queue->response_lock, flags); 474 ++txp; 475 continue; 476 } 477 478 index = pending_index(queue->pending_cons++); 479 pending_idx = queue->pending_ring[index]; 480 xenvif_tx_create_map_op(queue, pending_idx, txp, 481 txp == first ? extra_count : 0, gop); 482 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); 483 ++shinfo->nr_frags; 484 ++gop; 485 486 if (txp == first) 487 txp = txfrags; 488 else 489 txp++; 490 } 491 492 if (nr_slots > 0) { 493 494 shinfo = skb_shinfo(nskb); 495 frags = shinfo->frags; 496 497 for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) { 498 if (unlikely(!txp->size)) { 499 unsigned long flags; 500 501 spin_lock_irqsave(&queue->response_lock, flags); 502 make_tx_response(queue, txp, 0, 503 XEN_NETIF_RSP_OKAY); 504 push_tx_responses(queue); 505 spin_unlock_irqrestore(&queue->response_lock, 506 flags); 507 continue; 508 } 509 510 index = pending_index(queue->pending_cons++); 511 pending_idx = queue->pending_ring[index]; 512 xenvif_tx_create_map_op(queue, pending_idx, txp, 0, 513 gop); 514 frag_set_pending_idx(&frags[shinfo->nr_frags], 515 pending_idx); 516 ++shinfo->nr_frags; 517 ++gop; 518 } 519 520 if (shinfo->nr_frags) { 521 skb_shinfo(skb)->frag_list = nskb; 522 nskb = NULL; 523 } 524 } 525 526 if (nskb) { 527 /* A frag_list skb was allocated but it is no longer needed 528 * because enough slots were converted to copy ops above or some 529 * were empty. 530 */ 531 kfree_skb(nskb); 532 } 533 534 (*copy_ops) = cop - queue->tx_copy_ops; 535 (*map_ops) = gop - queue->tx_map_ops; 536 } 537 538 static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, 539 u16 pending_idx, 540 grant_handle_t handle) 541 { 542 if (unlikely(queue->grant_tx_handle[pending_idx] != 543 NETBACK_INVALID_HANDLE)) { 544 netdev_err(queue->vif->dev, 545 "Trying to overwrite active handle! pending_idx: 0x%x\n", 546 pending_idx); 547 BUG(); 548 } 549 queue->grant_tx_handle[pending_idx] = handle; 550 } 551 552 static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue, 553 u16 pending_idx) 554 { 555 if (unlikely(queue->grant_tx_handle[pending_idx] == 556 NETBACK_INVALID_HANDLE)) { 557 netdev_err(queue->vif->dev, 558 "Trying to unmap invalid handle! pending_idx: 0x%x\n", 559 pending_idx); 560 BUG(); 561 } 562 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; 563 } 564 565 static int xenvif_tx_check_gop(struct xenvif_queue *queue, 566 struct sk_buff *skb, 567 struct gnttab_map_grant_ref **gopp_map, 568 struct gnttab_copy **gopp_copy) 569 { 570 struct gnttab_map_grant_ref *gop_map = *gopp_map; 571 u16 pending_idx; 572 /* This always points to the shinfo of the skb being checked, which 573 * could be either the first or the one on the frag_list 574 */ 575 struct skb_shared_info *shinfo = skb_shinfo(skb); 576 /* If this is non-NULL, we are currently checking the frag_list skb, and 577 * this points to the shinfo of the first one 578 */ 579 struct skb_shared_info *first_shinfo = NULL; 580 int nr_frags = shinfo->nr_frags; 581 const bool sharedslot = nr_frags && 582 frag_get_pending_idx(&shinfo->frags[0]) == 583 copy_pending_idx(skb, copy_count(skb) - 1); 584 int i, err = 0; 585 586 for (i = 0; i < copy_count(skb); i++) { 587 int newerr; 588 589 /* Check status of header. */ 590 pending_idx = copy_pending_idx(skb, i); 591 592 newerr = (*gopp_copy)->status; 593 594 /* Split copies need to be handled together. */ 595 if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) { 596 (*gopp_copy)++; 597 if (!newerr) 598 newerr = (*gopp_copy)->status; 599 } 600 if (likely(!newerr)) { 601 /* The first frag might still have this slot mapped */ 602 if (i < copy_count(skb) - 1 || !sharedslot) 603 xenvif_idx_release(queue, pending_idx, 604 XEN_NETIF_RSP_OKAY); 605 } else { 606 err = newerr; 607 if (net_ratelimit()) 608 netdev_dbg(queue->vif->dev, 609 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", 610 (*gopp_copy)->status, 611 pending_idx, 612 (*gopp_copy)->source.u.ref); 613 /* The first frag might still have this slot mapped */ 614 if (i < copy_count(skb) - 1 || !sharedslot) 615 xenvif_idx_release(queue, pending_idx, 616 XEN_NETIF_RSP_ERROR); 617 } 618 (*gopp_copy)++; 619 } 620 621 check_frags: 622 for (i = 0; i < nr_frags; i++, gop_map++) { 623 int j, newerr; 624 625 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 626 627 /* Check error status: if okay then remember grant handle. */ 628 newerr = gop_map->status; 629 630 if (likely(!newerr)) { 631 xenvif_grant_handle_set(queue, 632 pending_idx, 633 gop_map->handle); 634 /* Had a previous error? Invalidate this fragment. */ 635 if (unlikely(err)) { 636 xenvif_idx_unmap(queue, pending_idx); 637 /* If the mapping of the first frag was OK, but 638 * the header's copy failed, and they are 639 * sharing a slot, send an error 640 */ 641 if (i == 0 && !first_shinfo && sharedslot) 642 xenvif_idx_release(queue, pending_idx, 643 XEN_NETIF_RSP_ERROR); 644 else 645 xenvif_idx_release(queue, pending_idx, 646 XEN_NETIF_RSP_OKAY); 647 } 648 continue; 649 } 650 651 /* Error on this fragment: respond to client with an error. */ 652 if (net_ratelimit()) 653 netdev_dbg(queue->vif->dev, 654 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n", 655 i, 656 gop_map->status, 657 pending_idx, 658 gop_map->ref); 659 660 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 661 662 /* Not the first error? Preceding frags already invalidated. */ 663 if (err) 664 continue; 665 666 /* Invalidate preceding fragments of this skb. */ 667 for (j = 0; j < i; j++) { 668 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 669 xenvif_idx_unmap(queue, pending_idx); 670 xenvif_idx_release(queue, pending_idx, 671 XEN_NETIF_RSP_OKAY); 672 } 673 674 /* And if we found the error while checking the frag_list, unmap 675 * the first skb's frags 676 */ 677 if (first_shinfo) { 678 for (j = 0; j < first_shinfo->nr_frags; j++) { 679 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]); 680 xenvif_idx_unmap(queue, pending_idx); 681 xenvif_idx_release(queue, pending_idx, 682 XEN_NETIF_RSP_OKAY); 683 } 684 } 685 686 /* Remember the error: invalidate all subsequent fragments. */ 687 err = newerr; 688 } 689 690 if (skb_has_frag_list(skb) && !first_shinfo) { 691 first_shinfo = shinfo; 692 shinfo = skb_shinfo(shinfo->frag_list); 693 nr_frags = shinfo->nr_frags; 694 695 goto check_frags; 696 } 697 698 *gopp_map = gop_map; 699 return err; 700 } 701 702 static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) 703 { 704 struct skb_shared_info *shinfo = skb_shinfo(skb); 705 int nr_frags = shinfo->nr_frags; 706 int i; 707 u16 prev_pending_idx = INVALID_PENDING_IDX; 708 709 for (i = 0; i < nr_frags; i++) { 710 skb_frag_t *frag = shinfo->frags + i; 711 struct xen_netif_tx_request *txp; 712 struct page *page; 713 u16 pending_idx; 714 715 pending_idx = frag_get_pending_idx(frag); 716 717 /* If this is not the first frag, chain it to the previous*/ 718 if (prev_pending_idx == INVALID_PENDING_IDX) 719 skb_shinfo(skb)->destructor_arg = 720 &callback_param(queue, pending_idx); 721 else 722 callback_param(queue, prev_pending_idx).ctx = 723 &callback_param(queue, pending_idx); 724 725 callback_param(queue, pending_idx).ctx = NULL; 726 prev_pending_idx = pending_idx; 727 728 txp = &queue->pending_tx_info[pending_idx].req; 729 page = virt_to_page((void *)idx_to_kaddr(queue, pending_idx)); 730 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); 731 skb->len += txp->size; 732 skb->data_len += txp->size; 733 skb->truesize += txp->size; 734 735 /* Take an extra reference to offset network stack's put_page */ 736 get_page(queue->mmap_pages[pending_idx]); 737 } 738 } 739 740 static int xenvif_get_extras(struct xenvif_queue *queue, 741 struct xen_netif_extra_info *extras, 742 unsigned int *extra_count, 743 int work_to_do) 744 { 745 struct xen_netif_extra_info extra; 746 RING_IDX cons = queue->tx.req_cons; 747 748 do { 749 if (unlikely(work_to_do-- <= 0)) { 750 netdev_err(queue->vif->dev, "Missing extra info\n"); 751 xenvif_fatal_tx_err(queue->vif); 752 return -EBADR; 753 } 754 755 RING_COPY_REQUEST(&queue->tx, cons, &extra); 756 757 queue->tx.req_cons = ++cons; 758 (*extra_count)++; 759 760 if (unlikely(!extra.type || 761 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 762 netdev_err(queue->vif->dev, 763 "Invalid extra type: %d\n", extra.type); 764 xenvif_fatal_tx_err(queue->vif); 765 return -EINVAL; 766 } 767 768 memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); 769 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); 770 771 return work_to_do; 772 } 773 774 static int xenvif_set_skb_gso(struct xenvif *vif, 775 struct sk_buff *skb, 776 struct xen_netif_extra_info *gso) 777 { 778 if (!gso->u.gso.size) { 779 netdev_err(vif->dev, "GSO size must not be zero.\n"); 780 xenvif_fatal_tx_err(vif); 781 return -EINVAL; 782 } 783 784 switch (gso->u.gso.type) { 785 case XEN_NETIF_GSO_TYPE_TCPV4: 786 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 787 break; 788 case XEN_NETIF_GSO_TYPE_TCPV6: 789 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 790 break; 791 default: 792 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); 793 xenvif_fatal_tx_err(vif); 794 return -EINVAL; 795 } 796 797 skb_shinfo(skb)->gso_size = gso->u.gso.size; 798 /* gso_segs will be calculated later */ 799 800 return 0; 801 } 802 803 static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb) 804 { 805 bool recalculate_partial_csum = false; 806 807 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy 808 * peers can fail to set NETRXF_csum_blank when sending a GSO 809 * frame. In this case force the SKB to CHECKSUM_PARTIAL and 810 * recalculate the partial checksum. 811 */ 812 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 813 queue->stats.rx_gso_checksum_fixup++; 814 skb->ip_summed = CHECKSUM_PARTIAL; 815 recalculate_partial_csum = true; 816 } 817 818 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 819 if (skb->ip_summed != CHECKSUM_PARTIAL) 820 return 0; 821 822 return skb_checksum_setup(skb, recalculate_partial_csum); 823 } 824 825 static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) 826 { 827 u64 now = get_jiffies_64(); 828 u64 next_credit = queue->credit_window_start + 829 msecs_to_jiffies(queue->credit_usec / 1000); 830 831 /* Timer could already be pending in rare cases. */ 832 if (timer_pending(&queue->credit_timeout)) { 833 queue->rate_limited = true; 834 return true; 835 } 836 837 /* Passed the point where we can replenish credit? */ 838 if (time_after_eq64(now, next_credit)) { 839 queue->credit_window_start = now; 840 tx_add_credit(queue); 841 } 842 843 /* Still too big to send right now? Set a callback. */ 844 if (size > queue->remaining_credit) { 845 mod_timer(&queue->credit_timeout, 846 next_credit); 847 queue->credit_window_start = next_credit; 848 queue->rate_limited = true; 849 850 return true; 851 } 852 853 return false; 854 } 855 856 /* No locking is required in xenvif_mcast_add/del() as they are 857 * only ever invoked from NAPI poll. An RCU list is used because 858 * xenvif_mcast_match() is called asynchronously, during start_xmit. 859 */ 860 861 static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr) 862 { 863 struct xenvif_mcast_addr *mcast; 864 865 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) { 866 if (net_ratelimit()) 867 netdev_err(vif->dev, 868 "Too many multicast addresses\n"); 869 return -ENOSPC; 870 } 871 872 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC); 873 if (!mcast) 874 return -ENOMEM; 875 876 ether_addr_copy(mcast->addr, addr); 877 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr); 878 vif->fe_mcast_count++; 879 880 return 0; 881 } 882 883 static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr) 884 { 885 struct xenvif_mcast_addr *mcast; 886 887 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { 888 if (ether_addr_equal(addr, mcast->addr)) { 889 --vif->fe_mcast_count; 890 list_del_rcu(&mcast->entry); 891 kfree_rcu(mcast, rcu); 892 break; 893 } 894 } 895 } 896 897 bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr) 898 { 899 struct xenvif_mcast_addr *mcast; 900 901 rcu_read_lock(); 902 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { 903 if (ether_addr_equal(addr, mcast->addr)) { 904 rcu_read_unlock(); 905 return true; 906 } 907 } 908 rcu_read_unlock(); 909 910 return false; 911 } 912 913 void xenvif_mcast_addr_list_free(struct xenvif *vif) 914 { 915 /* No need for locking or RCU here. NAPI poll and TX queue 916 * are stopped. 917 */ 918 while (!list_empty(&vif->fe_mcast_addr)) { 919 struct xenvif_mcast_addr *mcast; 920 921 mcast = list_first_entry(&vif->fe_mcast_addr, 922 struct xenvif_mcast_addr, 923 entry); 924 --vif->fe_mcast_count; 925 list_del(&mcast->entry); 926 kfree(mcast); 927 } 928 } 929 930 static void xenvif_tx_build_gops(struct xenvif_queue *queue, 931 int budget, 932 unsigned *copy_ops, 933 unsigned *map_ops) 934 { 935 struct sk_buff *skb, *nskb; 936 int ret; 937 unsigned int frag_overflow; 938 939 while (skb_queue_len(&queue->tx_queue) < budget) { 940 struct xen_netif_tx_request txreq; 941 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 942 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; 943 unsigned int extra_count; 944 RING_IDX idx; 945 int work_to_do; 946 unsigned int data_len; 947 948 if (queue->tx.sring->req_prod - queue->tx.req_cons > 949 XEN_NETIF_TX_RING_SIZE) { 950 netdev_err(queue->vif->dev, 951 "Impossible number of requests. " 952 "req_prod %d, req_cons %d, size %ld\n", 953 queue->tx.sring->req_prod, queue->tx.req_cons, 954 XEN_NETIF_TX_RING_SIZE); 955 xenvif_fatal_tx_err(queue->vif); 956 break; 957 } 958 959 work_to_do = XEN_RING_NR_UNCONSUMED_REQUESTS(&queue->tx); 960 if (!work_to_do) 961 break; 962 963 idx = queue->tx.req_cons; 964 rmb(); /* Ensure that we see the request before we copy it. */ 965 RING_COPY_REQUEST(&queue->tx, idx, &txreq); 966 967 /* Credit-based scheduling. */ 968 if (txreq.size > queue->remaining_credit && 969 tx_credit_exceeded(queue, txreq.size)) 970 break; 971 972 queue->remaining_credit -= txreq.size; 973 974 work_to_do--; 975 queue->tx.req_cons = ++idx; 976 977 memset(extras, 0, sizeof(extras)); 978 extra_count = 0; 979 if (txreq.flags & XEN_NETTXF_extra_info) { 980 work_to_do = xenvif_get_extras(queue, extras, 981 &extra_count, 982 work_to_do); 983 idx = queue->tx.req_cons; 984 if (unlikely(work_to_do < 0)) 985 break; 986 } 987 988 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) { 989 struct xen_netif_extra_info *extra; 990 991 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1]; 992 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); 993 994 make_tx_response(queue, &txreq, extra_count, 995 (ret == 0) ? 996 XEN_NETIF_RSP_OKAY : 997 XEN_NETIF_RSP_ERROR); 998 push_tx_responses(queue); 999 continue; 1000 } 1001 1002 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) { 1003 struct xen_netif_extra_info *extra; 1004 1005 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1]; 1006 xenvif_mcast_del(queue->vif, extra->u.mcast.addr); 1007 1008 make_tx_response(queue, &txreq, extra_count, 1009 XEN_NETIF_RSP_OKAY); 1010 push_tx_responses(queue); 1011 continue; 1012 } 1013 1014 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ? 1015 XEN_NETBACK_TX_COPY_LEN : txreq.size; 1016 1017 ret = xenvif_count_requests(queue, &txreq, extra_count, 1018 txfrags, work_to_do); 1019 1020 if (unlikely(ret < 0)) 1021 break; 1022 1023 idx += ret; 1024 1025 if (unlikely(txreq.size < ETH_HLEN)) { 1026 netdev_dbg(queue->vif->dev, 1027 "Bad packet size: %d\n", txreq.size); 1028 xenvif_tx_err(queue, &txreq, extra_count, idx); 1029 break; 1030 } 1031 1032 /* No crossing a page as the payload mustn't fragment. */ 1033 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) { 1034 netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n", 1035 txreq.offset, txreq.size); 1036 xenvif_fatal_tx_err(queue->vif); 1037 break; 1038 } 1039 1040 if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size) 1041 data_len = txreq.size; 1042 1043 skb = xenvif_alloc_skb(data_len); 1044 if (unlikely(skb == NULL)) { 1045 netdev_dbg(queue->vif->dev, 1046 "Can't allocate a skb in start_xmit.\n"); 1047 xenvif_tx_err(queue, &txreq, extra_count, idx); 1048 break; 1049 } 1050 1051 skb_shinfo(skb)->nr_frags = ret; 1052 /* At this point shinfo->nr_frags is in fact the number of 1053 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. 1054 */ 1055 frag_overflow = 0; 1056 nskb = NULL; 1057 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) { 1058 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS; 1059 BUG_ON(frag_overflow > MAX_SKB_FRAGS); 1060 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; 1061 nskb = xenvif_alloc_skb(0); 1062 if (unlikely(nskb == NULL)) { 1063 skb_shinfo(skb)->nr_frags = 0; 1064 kfree_skb(skb); 1065 xenvif_tx_err(queue, &txreq, extra_count, idx); 1066 if (net_ratelimit()) 1067 netdev_err(queue->vif->dev, 1068 "Can't allocate the frag_list skb.\n"); 1069 break; 1070 } 1071 } 1072 1073 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 1074 struct xen_netif_extra_info *gso; 1075 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1076 1077 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { 1078 /* Failure in xenvif_set_skb_gso is fatal. */ 1079 skb_shinfo(skb)->nr_frags = 0; 1080 kfree_skb(skb); 1081 kfree_skb(nskb); 1082 break; 1083 } 1084 } 1085 1086 if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) { 1087 struct xen_netif_extra_info *extra; 1088 enum pkt_hash_types type = PKT_HASH_TYPE_NONE; 1089 1090 extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1]; 1091 1092 switch (extra->u.hash.type) { 1093 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4: 1094 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6: 1095 type = PKT_HASH_TYPE_L3; 1096 break; 1097 1098 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP: 1099 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP: 1100 type = PKT_HASH_TYPE_L4; 1101 break; 1102 1103 default: 1104 break; 1105 } 1106 1107 if (type != PKT_HASH_TYPE_NONE) 1108 skb_set_hash(skb, 1109 *(u32 *)extra->u.hash.value, 1110 type); 1111 } 1112 1113 xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops, 1114 map_ops, frag_overflow, nskb, extra_count, 1115 data_len); 1116 1117 __skb_queue_tail(&queue->tx_queue, skb); 1118 1119 queue->tx.req_cons = idx; 1120 } 1121 1122 return; 1123 } 1124 1125 /* Consolidate skb with a frag_list into a brand new one with local pages on 1126 * frags. Returns 0 or -ENOMEM if can't allocate new pages. 1127 */ 1128 static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb) 1129 { 1130 unsigned int offset = skb_headlen(skb); 1131 skb_frag_t frags[MAX_SKB_FRAGS]; 1132 int i, f; 1133 struct ubuf_info *uarg; 1134 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; 1135 1136 queue->stats.tx_zerocopy_sent += 2; 1137 queue->stats.tx_frag_overflow++; 1138 1139 xenvif_fill_frags(queue, nskb); 1140 /* Subtract frags size, we will correct it later */ 1141 skb->truesize -= skb->data_len; 1142 skb->len += nskb->len; 1143 skb->data_len += nskb->len; 1144 1145 /* create a brand new frags array and coalesce there */ 1146 for (i = 0; offset < skb->len; i++) { 1147 struct page *page; 1148 unsigned int len; 1149 1150 BUG_ON(i >= MAX_SKB_FRAGS); 1151 page = alloc_page(GFP_ATOMIC); 1152 if (!page) { 1153 int j; 1154 skb->truesize += skb->data_len; 1155 for (j = 0; j < i; j++) 1156 put_page(skb_frag_page(&frags[j])); 1157 return -ENOMEM; 1158 } 1159 1160 if (offset + PAGE_SIZE < skb->len) 1161 len = PAGE_SIZE; 1162 else 1163 len = skb->len - offset; 1164 if (skb_copy_bits(skb, offset, page_address(page), len)) 1165 BUG(); 1166 1167 offset += len; 1168 skb_frag_fill_page_desc(&frags[i], page, 0, len); 1169 } 1170 1171 /* Release all the original (foreign) frags. */ 1172 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 1173 skb_frag_unref(skb, f); 1174 uarg = skb_shinfo(skb)->destructor_arg; 1175 /* increase inflight counter to offset decrement in callback */ 1176 atomic_inc(&queue->inflight_packets); 1177 uarg->callback(NULL, uarg, true); 1178 skb_shinfo(skb)->destructor_arg = NULL; 1179 1180 /* Fill the skb with the new (local) frags. */ 1181 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); 1182 skb_shinfo(skb)->nr_frags = i; 1183 skb->truesize += i * PAGE_SIZE; 1184 1185 return 0; 1186 } 1187 1188 static int xenvif_tx_submit(struct xenvif_queue *queue) 1189 { 1190 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; 1191 struct gnttab_copy *gop_copy = queue->tx_copy_ops; 1192 struct sk_buff *skb; 1193 int work_done = 0; 1194 1195 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { 1196 struct xen_netif_tx_request *txp; 1197 u16 pending_idx; 1198 1199 pending_idx = copy_pending_idx(skb, 0); 1200 txp = &queue->pending_tx_info[pending_idx].req; 1201 1202 /* Check the remap error code. */ 1203 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { 1204 /* If there was an error, xenvif_tx_check_gop is 1205 * expected to release all the frags which were mapped, 1206 * so kfree_skb shouldn't do it again 1207 */ 1208 skb_shinfo(skb)->nr_frags = 0; 1209 if (skb_has_frag_list(skb)) { 1210 struct sk_buff *nskb = 1211 skb_shinfo(skb)->frag_list; 1212 skb_shinfo(nskb)->nr_frags = 0; 1213 } 1214 kfree_skb(skb); 1215 continue; 1216 } 1217 1218 if (txp->flags & XEN_NETTXF_csum_blank) 1219 skb->ip_summed = CHECKSUM_PARTIAL; 1220 else if (txp->flags & XEN_NETTXF_data_validated) 1221 skb->ip_summed = CHECKSUM_UNNECESSARY; 1222 1223 xenvif_fill_frags(queue, skb); 1224 1225 if (unlikely(skb_has_frag_list(skb))) { 1226 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; 1227 xenvif_skb_zerocopy_prepare(queue, nskb); 1228 if (xenvif_handle_frag_list(queue, skb)) { 1229 if (net_ratelimit()) 1230 netdev_err(queue->vif->dev, 1231 "Not enough memory to consolidate frag_list!\n"); 1232 xenvif_skb_zerocopy_prepare(queue, skb); 1233 kfree_skb(skb); 1234 continue; 1235 } 1236 /* Copied all the bits from the frag list -- free it. */ 1237 skb_frag_list_init(skb); 1238 kfree_skb(nskb); 1239 } 1240 1241 skb->dev = queue->vif->dev; 1242 skb->protocol = eth_type_trans(skb, skb->dev); 1243 skb_reset_network_header(skb); 1244 1245 if (checksum_setup(queue, skb)) { 1246 netdev_dbg(queue->vif->dev, 1247 "Can't setup checksum in net_tx_action\n"); 1248 /* We have to set this flag to trigger the callback */ 1249 if (skb_shinfo(skb)->destructor_arg) 1250 xenvif_skb_zerocopy_prepare(queue, skb); 1251 kfree_skb(skb); 1252 continue; 1253 } 1254 1255 skb_probe_transport_header(skb); 1256 1257 /* If the packet is GSO then we will have just set up the 1258 * transport header offset in checksum_setup so it's now 1259 * straightforward to calculate gso_segs. 1260 */ 1261 if (skb_is_gso(skb)) { 1262 int mss, hdrlen; 1263 1264 /* GSO implies having the L4 header. */ 1265 WARN_ON_ONCE(!skb_transport_header_was_set(skb)); 1266 if (unlikely(!skb_transport_header_was_set(skb))) { 1267 kfree_skb(skb); 1268 continue; 1269 } 1270 1271 mss = skb_shinfo(skb)->gso_size; 1272 hdrlen = skb_tcp_all_headers(skb); 1273 1274 skb_shinfo(skb)->gso_segs = 1275 DIV_ROUND_UP(skb->len - hdrlen, mss); 1276 } 1277 1278 queue->stats.rx_bytes += skb->len; 1279 queue->stats.rx_packets++; 1280 1281 work_done++; 1282 1283 /* Set this flag right before netif_receive_skb, otherwise 1284 * someone might think this packet already left netback, and 1285 * do a skb_copy_ubufs while we are still in control of the 1286 * skb. E.g. the __pskb_pull_tail earlier can do such thing. 1287 */ 1288 if (skb_shinfo(skb)->destructor_arg) { 1289 xenvif_skb_zerocopy_prepare(queue, skb); 1290 queue->stats.tx_zerocopy_sent++; 1291 } 1292 1293 netif_receive_skb(skb); 1294 } 1295 1296 return work_done; 1297 } 1298 1299 void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base, 1300 bool zerocopy_success) 1301 { 1302 unsigned long flags; 1303 pending_ring_idx_t index; 1304 struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base); 1305 struct xenvif_queue *queue = ubuf_to_queue(ubuf); 1306 1307 /* This is the only place where we grab this lock, to protect callbacks 1308 * from each other. 1309 */ 1310 spin_lock_irqsave(&queue->callback_lock, flags); 1311 do { 1312 u16 pending_idx = ubuf->desc; 1313 ubuf = (struct ubuf_info_msgzc *) ubuf->ctx; 1314 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= 1315 MAX_PENDING_REQS); 1316 index = pending_index(queue->dealloc_prod); 1317 queue->dealloc_ring[index] = pending_idx; 1318 /* Sync with xenvif_tx_dealloc_action: 1319 * insert idx then incr producer. 1320 */ 1321 smp_wmb(); 1322 queue->dealloc_prod++; 1323 } while (ubuf); 1324 spin_unlock_irqrestore(&queue->callback_lock, flags); 1325 1326 if (likely(zerocopy_success)) 1327 queue->stats.tx_zerocopy_success++; 1328 else 1329 queue->stats.tx_zerocopy_fail++; 1330 xenvif_skb_zerocopy_complete(queue); 1331 } 1332 1333 static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) 1334 { 1335 struct gnttab_unmap_grant_ref *gop; 1336 pending_ring_idx_t dc, dp; 1337 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; 1338 unsigned int i = 0; 1339 1340 dc = queue->dealloc_cons; 1341 gop = queue->tx_unmap_ops; 1342 1343 /* Free up any grants we have finished using */ 1344 do { 1345 dp = queue->dealloc_prod; 1346 1347 /* Ensure we see all indices enqueued by all 1348 * xenvif_zerocopy_callback(). 1349 */ 1350 smp_rmb(); 1351 1352 while (dc != dp) { 1353 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); 1354 pending_idx = 1355 queue->dealloc_ring[pending_index(dc++)]; 1356 1357 pending_idx_release[gop - queue->tx_unmap_ops] = 1358 pending_idx; 1359 queue->pages_to_unmap[gop - queue->tx_unmap_ops] = 1360 queue->mmap_pages[pending_idx]; 1361 gnttab_set_unmap_op(gop, 1362 idx_to_kaddr(queue, pending_idx), 1363 GNTMAP_host_map, 1364 queue->grant_tx_handle[pending_idx]); 1365 xenvif_grant_handle_reset(queue, pending_idx); 1366 ++gop; 1367 } 1368 1369 } while (dp != queue->dealloc_prod); 1370 1371 queue->dealloc_cons = dc; 1372 1373 if (gop - queue->tx_unmap_ops > 0) { 1374 int ret; 1375 ret = gnttab_unmap_refs(queue->tx_unmap_ops, 1376 NULL, 1377 queue->pages_to_unmap, 1378 gop - queue->tx_unmap_ops); 1379 if (ret) { 1380 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n", 1381 gop - queue->tx_unmap_ops, ret); 1382 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { 1383 if (gop[i].status != GNTST_okay) 1384 netdev_err(queue->vif->dev, 1385 " host_addr: 0x%llx handle: 0x%x status: %d\n", 1386 gop[i].host_addr, 1387 gop[i].handle, 1388 gop[i].status); 1389 } 1390 BUG(); 1391 } 1392 } 1393 1394 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) 1395 xenvif_idx_release(queue, pending_idx_release[i], 1396 XEN_NETIF_RSP_OKAY); 1397 } 1398 1399 1400 /* Called after netfront has transmitted */ 1401 int xenvif_tx_action(struct xenvif_queue *queue, int budget) 1402 { 1403 unsigned nr_mops = 0, nr_cops = 0; 1404 int work_done, ret; 1405 1406 if (unlikely(!tx_work_todo(queue))) 1407 return 0; 1408 1409 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops); 1410 1411 if (nr_cops == 0) 1412 return 0; 1413 1414 gnttab_batch_copy(queue->tx_copy_ops, nr_cops); 1415 if (nr_mops != 0) { 1416 ret = gnttab_map_refs(queue->tx_map_ops, 1417 NULL, 1418 queue->pages_to_map, 1419 nr_mops); 1420 if (ret) { 1421 unsigned int i; 1422 1423 netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n", 1424 nr_mops, ret); 1425 for (i = 0; i < nr_mops; ++i) 1426 WARN_ON_ONCE(queue->tx_map_ops[i].status == 1427 GNTST_okay); 1428 } 1429 } 1430 1431 work_done = xenvif_tx_submit(queue); 1432 1433 return work_done; 1434 } 1435 1436 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, 1437 u8 status) 1438 { 1439 struct pending_tx_info *pending_tx_info; 1440 pending_ring_idx_t index; 1441 unsigned long flags; 1442 1443 pending_tx_info = &queue->pending_tx_info[pending_idx]; 1444 1445 spin_lock_irqsave(&queue->response_lock, flags); 1446 1447 make_tx_response(queue, &pending_tx_info->req, 1448 pending_tx_info->extra_count, status); 1449 1450 /* Release the pending index before pusing the Tx response so 1451 * its available before a new Tx request is pushed by the 1452 * frontend. 1453 */ 1454 index = pending_index(queue->pending_prod++); 1455 queue->pending_ring[index] = pending_idx; 1456 1457 push_tx_responses(queue); 1458 1459 spin_unlock_irqrestore(&queue->response_lock, flags); 1460 } 1461 1462 1463 static void make_tx_response(struct xenvif_queue *queue, 1464 struct xen_netif_tx_request *txp, 1465 unsigned int extra_count, 1466 s8 st) 1467 { 1468 RING_IDX i = queue->tx.rsp_prod_pvt; 1469 struct xen_netif_tx_response *resp; 1470 1471 resp = RING_GET_RESPONSE(&queue->tx, i); 1472 resp->id = txp->id; 1473 resp->status = st; 1474 1475 while (extra_count-- != 0) 1476 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; 1477 1478 queue->tx.rsp_prod_pvt = ++i; 1479 } 1480 1481 static void push_tx_responses(struct xenvif_queue *queue) 1482 { 1483 int notify; 1484 1485 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); 1486 if (notify) 1487 notify_remote_via_irq(queue->tx_irq); 1488 } 1489 1490 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) 1491 { 1492 int ret; 1493 struct gnttab_unmap_grant_ref tx_unmap_op; 1494 1495 gnttab_set_unmap_op(&tx_unmap_op, 1496 idx_to_kaddr(queue, pending_idx), 1497 GNTMAP_host_map, 1498 queue->grant_tx_handle[pending_idx]); 1499 xenvif_grant_handle_reset(queue, pending_idx); 1500 1501 ret = gnttab_unmap_refs(&tx_unmap_op, NULL, 1502 &queue->mmap_pages[pending_idx], 1); 1503 if (ret) { 1504 netdev_err(queue->vif->dev, 1505 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n", 1506 ret, 1507 pending_idx, 1508 tx_unmap_op.host_addr, 1509 tx_unmap_op.handle, 1510 tx_unmap_op.status); 1511 BUG(); 1512 } 1513 } 1514 1515 static inline int tx_work_todo(struct xenvif_queue *queue) 1516 { 1517 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) 1518 return 1; 1519 1520 return 0; 1521 } 1522 1523 static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue) 1524 { 1525 return queue->dealloc_cons != queue->dealloc_prod; 1526 } 1527 1528 void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue) 1529 { 1530 if (queue->tx.sring) 1531 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), 1532 queue->tx.sring); 1533 if (queue->rx.sring) 1534 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), 1535 queue->rx.sring); 1536 } 1537 1538 int xenvif_map_frontend_data_rings(struct xenvif_queue *queue, 1539 grant_ref_t tx_ring_ref, 1540 grant_ref_t rx_ring_ref) 1541 { 1542 void *addr; 1543 struct xen_netif_tx_sring *txs; 1544 struct xen_netif_rx_sring *rxs; 1545 RING_IDX rsp_prod, req_prod; 1546 int err; 1547 1548 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), 1549 &tx_ring_ref, 1, &addr); 1550 if (err) 1551 goto err; 1552 1553 txs = (struct xen_netif_tx_sring *)addr; 1554 rsp_prod = READ_ONCE(txs->rsp_prod); 1555 req_prod = READ_ONCE(txs->req_prod); 1556 1557 BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE); 1558 1559 err = -EIO; 1560 if (req_prod - rsp_prod > RING_SIZE(&queue->tx)) 1561 goto err; 1562 1563 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), 1564 &rx_ring_ref, 1, &addr); 1565 if (err) 1566 goto err; 1567 1568 rxs = (struct xen_netif_rx_sring *)addr; 1569 rsp_prod = READ_ONCE(rxs->rsp_prod); 1570 req_prod = READ_ONCE(rxs->req_prod); 1571 1572 BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE); 1573 1574 err = -EIO; 1575 if (req_prod - rsp_prod > RING_SIZE(&queue->rx)) 1576 goto err; 1577 1578 return 0; 1579 1580 err: 1581 xenvif_unmap_frontend_data_rings(queue); 1582 return err; 1583 } 1584 1585 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue) 1586 { 1587 /* Dealloc thread must remain running until all inflight 1588 * packets complete. 1589 */ 1590 return kthread_should_stop() && 1591 !atomic_read(&queue->inflight_packets); 1592 } 1593 1594 int xenvif_dealloc_kthread(void *data) 1595 { 1596 struct xenvif_queue *queue = data; 1597 1598 for (;;) { 1599 wait_event_interruptible(queue->dealloc_wq, 1600 tx_dealloc_work_todo(queue) || 1601 xenvif_dealloc_kthread_should_stop(queue)); 1602 if (xenvif_dealloc_kthread_should_stop(queue)) 1603 break; 1604 1605 xenvif_tx_dealloc_action(queue); 1606 cond_resched(); 1607 } 1608 1609 /* Unmap anything remaining*/ 1610 if (tx_dealloc_work_todo(queue)) 1611 xenvif_tx_dealloc_action(queue); 1612 1613 return 0; 1614 } 1615 1616 static void make_ctrl_response(struct xenvif *vif, 1617 const struct xen_netif_ctrl_request *req, 1618 u32 status, u32 data) 1619 { 1620 RING_IDX idx = vif->ctrl.rsp_prod_pvt; 1621 struct xen_netif_ctrl_response rsp = { 1622 .id = req->id, 1623 .type = req->type, 1624 .status = status, 1625 .data = data, 1626 }; 1627 1628 *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp; 1629 vif->ctrl.rsp_prod_pvt = ++idx; 1630 } 1631 1632 static void push_ctrl_response(struct xenvif *vif) 1633 { 1634 int notify; 1635 1636 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify); 1637 if (notify) 1638 notify_remote_via_irq(vif->ctrl_irq); 1639 } 1640 1641 static void process_ctrl_request(struct xenvif *vif, 1642 const struct xen_netif_ctrl_request *req) 1643 { 1644 u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED; 1645 u32 data = 0; 1646 1647 switch (req->type) { 1648 case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM: 1649 status = xenvif_set_hash_alg(vif, req->data[0]); 1650 break; 1651 1652 case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS: 1653 status = xenvif_get_hash_flags(vif, &data); 1654 break; 1655 1656 case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS: 1657 status = xenvif_set_hash_flags(vif, req->data[0]); 1658 break; 1659 1660 case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY: 1661 status = xenvif_set_hash_key(vif, req->data[0], 1662 req->data[1]); 1663 break; 1664 1665 case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE: 1666 status = XEN_NETIF_CTRL_STATUS_SUCCESS; 1667 data = XEN_NETBK_MAX_HASH_MAPPING_SIZE; 1668 break; 1669 1670 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE: 1671 status = xenvif_set_hash_mapping_size(vif, 1672 req->data[0]); 1673 break; 1674 1675 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING: 1676 status = xenvif_set_hash_mapping(vif, req->data[0], 1677 req->data[1], 1678 req->data[2]); 1679 break; 1680 1681 default: 1682 break; 1683 } 1684 1685 make_ctrl_response(vif, req, status, data); 1686 push_ctrl_response(vif); 1687 } 1688 1689 static void xenvif_ctrl_action(struct xenvif *vif) 1690 { 1691 for (;;) { 1692 RING_IDX req_prod, req_cons; 1693 1694 req_prod = vif->ctrl.sring->req_prod; 1695 req_cons = vif->ctrl.req_cons; 1696 1697 /* Make sure we can see requests before we process them. */ 1698 rmb(); 1699 1700 if (req_cons == req_prod) 1701 break; 1702 1703 while (req_cons != req_prod) { 1704 struct xen_netif_ctrl_request req; 1705 1706 RING_COPY_REQUEST(&vif->ctrl, req_cons, &req); 1707 req_cons++; 1708 1709 process_ctrl_request(vif, &req); 1710 } 1711 1712 vif->ctrl.req_cons = req_cons; 1713 vif->ctrl.sring->req_event = req_cons + 1; 1714 } 1715 } 1716 1717 static bool xenvif_ctrl_work_todo(struct xenvif *vif) 1718 { 1719 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl))) 1720 return true; 1721 1722 return false; 1723 } 1724 1725 irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data) 1726 { 1727 struct xenvif *vif = data; 1728 unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS; 1729 1730 while (xenvif_ctrl_work_todo(vif)) { 1731 xenvif_ctrl_action(vif); 1732 eoi_flag = 0; 1733 } 1734 1735 xen_irq_lateeoi(irq, eoi_flag); 1736 1737 return IRQ_HANDLED; 1738 } 1739 1740 static int __init netback_init(void) 1741 { 1742 int rc = 0; 1743 1744 if (!xen_domain()) 1745 return -ENODEV; 1746 1747 /* Allow as many queues as there are CPUs but max. 8 if user has not 1748 * specified a value. 1749 */ 1750 if (xenvif_max_queues == 0) 1751 xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT, 1752 num_online_cpus()); 1753 1754 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { 1755 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", 1756 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); 1757 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; 1758 } 1759 1760 rc = xenvif_xenbus_init(); 1761 if (rc) 1762 goto failed_init; 1763 1764 #ifdef CONFIG_DEBUG_FS 1765 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); 1766 #endif /* CONFIG_DEBUG_FS */ 1767 1768 return 0; 1769 1770 failed_init: 1771 return rc; 1772 } 1773 1774 module_init(netback_init); 1775 1776 static void __exit netback_fini(void) 1777 { 1778 #ifdef CONFIG_DEBUG_FS 1779 debugfs_remove_recursive(xen_netback_dbg_root); 1780 #endif /* CONFIG_DEBUG_FS */ 1781 xenvif_xenbus_fini(); 1782 } 1783 module_exit(netback_fini); 1784 1785 MODULE_LICENSE("Dual BSD/GPL"); 1786 MODULE_ALIAS("xen-backend:vif"); 1787