1 /* Virtio ring implementation. 2 * 3 * Copyright 2007 Rusty Russell IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include <linux/virtio.h> 20 #include <linux/virtio_ring.h> 21 #include <linux/virtio_config.h> 22 #include <linux/device.h> 23 #include <linux/slab.h> 24 #include <linux/module.h> 25 #include <linux/hrtimer.h> 26 #include <linux/kmemleak.h> 27 #include <linux/dma-mapping.h> 28 #include <xen/xen.h> 29 30 #ifdef DEBUG 31 /* For development, we want to crash whenever the ring is screwed. */ 32 #define BAD_RING(_vq, fmt, args...) \ 33 do { \ 34 dev_err(&(_vq)->vq.vdev->dev, \ 35 "%s:"fmt, (_vq)->vq.name, ##args); \ 36 BUG(); \ 37 } while (0) 38 /* Caller is supposed to guarantee no reentry. */ 39 #define START_USE(_vq) \ 40 do { \ 41 if ((_vq)->in_use) \ 42 panic("%s:in_use = %i\n", \ 43 (_vq)->vq.name, (_vq)->in_use); \ 44 (_vq)->in_use = __LINE__; \ 45 } while (0) 46 #define END_USE(_vq) \ 47 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 48 #else 49 #define BAD_RING(_vq, fmt, args...) \ 50 do { \ 51 dev_err(&_vq->vq.vdev->dev, \ 52 "%s:"fmt, (_vq)->vq.name, ##args); \ 53 (_vq)->broken = true; \ 54 } while (0) 55 #define START_USE(vq) 56 #define END_USE(vq) 57 #endif 58 59 struct vring_desc_state { 60 void *data; /* Data for callback. */ 61 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ 62 }; 63 64 struct vring_virtqueue { 65 struct virtqueue vq; 66 67 /* Actual memory layout for this queue */ 68 struct vring vring; 69 70 /* Can we use weak barriers? */ 71 bool weak_barriers; 72 73 /* Other side has made a mess, don't try any more. */ 74 bool broken; 75 76 /* Host supports indirect buffers */ 77 bool indirect; 78 79 /* Host publishes avail event idx */ 80 bool event; 81 82 /* Head of free buffer list. */ 83 unsigned int free_head; 84 /* Number we've added since last sync. */ 85 unsigned int num_added; 86 87 /* Last used index we've seen. */ 88 u16 last_used_idx; 89 90 /* Last written value to avail->flags */ 91 u16 avail_flags_shadow; 92 93 /* Last written value to avail->idx in guest byte order */ 94 u16 avail_idx_shadow; 95 96 /* How to notify other side. FIXME: commonalize hcalls! */ 97 bool (*notify)(struct virtqueue *vq); 98 99 /* DMA, allocation, and size information */ 100 bool we_own_ring; 101 size_t queue_size_in_bytes; 102 dma_addr_t queue_dma_addr; 103 104 #ifdef DEBUG 105 /* They're supposed to lock for us. */ 106 unsigned int in_use; 107 108 /* Figure out if their kicks are too delayed. */ 109 bool last_add_time_valid; 110 ktime_t last_add_time; 111 #endif 112 113 /* Per-descriptor state. */ 114 struct vring_desc_state desc_state[]; 115 }; 116 117 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 118 119 /* 120 * Modern virtio devices have feature bits to specify whether they need a 121 * quirk and bypass the IOMMU. If not there, just use the DMA API. 122 * 123 * If there, the interaction between virtio and DMA API is messy. 124 * 125 * On most systems with virtio, physical addresses match bus addresses, 126 * and it doesn't particularly matter whether we use the DMA API. 127 * 128 * On some systems, including Xen and any system with a physical device 129 * that speaks virtio behind a physical IOMMU, we must use the DMA API 130 * for virtio DMA to work at all. 131 * 132 * On other systems, including SPARC and PPC64, virtio-pci devices are 133 * enumerated as though they are behind an IOMMU, but the virtio host 134 * ignores the IOMMU, so we must either pretend that the IOMMU isn't 135 * there or somehow map everything as the identity. 136 * 137 * For the time being, we preserve historic behavior and bypass the DMA 138 * API. 139 * 140 * TODO: install a per-device DMA ops structure that does the right thing 141 * taking into account all the above quirks, and use the DMA API 142 * unconditionally on data path. 143 */ 144 145 static bool vring_use_dma_api(struct virtio_device *vdev) 146 { 147 if (!virtio_has_iommu_quirk(vdev)) 148 return true; 149 150 /* Otherwise, we are left to guess. */ 151 /* 152 * In theory, it's possible to have a buggy QEMU-supposed 153 * emulated Q35 IOMMU and Xen enabled at the same time. On 154 * such a configuration, virtio has never worked and will 155 * not work without an even larger kludge. Instead, enable 156 * the DMA API if we're a Xen guest, which at least allows 157 * all of the sensible Xen configurations to work correctly. 158 */ 159 if (xen_domain()) 160 return true; 161 162 return false; 163 } 164 165 /* 166 * The DMA ops on various arches are rather gnarly right now, and 167 * making all of the arch DMA ops work on the vring device itself 168 * is a mess. For now, we use the parent device for DMA ops. 169 */ 170 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq) 171 { 172 return vq->vq.vdev->dev.parent; 173 } 174 175 /* Map one sg entry. */ 176 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, 177 struct scatterlist *sg, 178 enum dma_data_direction direction) 179 { 180 if (!vring_use_dma_api(vq->vq.vdev)) 181 return (dma_addr_t)sg_phys(sg); 182 183 /* 184 * We can't use dma_map_sg, because we don't use scatterlists in 185 * the way it expects (we don't guarantee that the scatterlist 186 * will exist for the lifetime of the mapping). 187 */ 188 return dma_map_page(vring_dma_dev(vq), 189 sg_page(sg), sg->offset, sg->length, 190 direction); 191 } 192 193 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, 194 void *cpu_addr, size_t size, 195 enum dma_data_direction direction) 196 { 197 if (!vring_use_dma_api(vq->vq.vdev)) 198 return (dma_addr_t)virt_to_phys(cpu_addr); 199 200 return dma_map_single(vring_dma_dev(vq), 201 cpu_addr, size, direction); 202 } 203 204 static void vring_unmap_one(const struct vring_virtqueue *vq, 205 struct vring_desc *desc) 206 { 207 u16 flags; 208 209 if (!vring_use_dma_api(vq->vq.vdev)) 210 return; 211 212 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); 213 214 if (flags & VRING_DESC_F_INDIRECT) { 215 dma_unmap_single(vring_dma_dev(vq), 216 virtio64_to_cpu(vq->vq.vdev, desc->addr), 217 virtio32_to_cpu(vq->vq.vdev, desc->len), 218 (flags & VRING_DESC_F_WRITE) ? 219 DMA_FROM_DEVICE : DMA_TO_DEVICE); 220 } else { 221 dma_unmap_page(vring_dma_dev(vq), 222 virtio64_to_cpu(vq->vq.vdev, desc->addr), 223 virtio32_to_cpu(vq->vq.vdev, desc->len), 224 (flags & VRING_DESC_F_WRITE) ? 225 DMA_FROM_DEVICE : DMA_TO_DEVICE); 226 } 227 } 228 229 static int vring_mapping_error(const struct vring_virtqueue *vq, 230 dma_addr_t addr) 231 { 232 if (!vring_use_dma_api(vq->vq.vdev)) 233 return 0; 234 235 return dma_mapping_error(vring_dma_dev(vq), addr); 236 } 237 238 static struct vring_desc *alloc_indirect(struct virtqueue *_vq, 239 unsigned int total_sg, gfp_t gfp) 240 { 241 struct vring_desc *desc; 242 unsigned int i; 243 244 /* 245 * We require lowmem mappings for the descriptors because 246 * otherwise virt_to_phys will give us bogus addresses in the 247 * virtqueue. 248 */ 249 gfp &= ~__GFP_HIGHMEM; 250 251 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); 252 if (!desc) 253 return NULL; 254 255 for (i = 0; i < total_sg; i++) 256 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); 257 return desc; 258 } 259 260 static inline int virtqueue_add(struct virtqueue *_vq, 261 struct scatterlist *sgs[], 262 unsigned int total_sg, 263 unsigned int out_sgs, 264 unsigned int in_sgs, 265 void *data, 266 void *ctx, 267 gfp_t gfp) 268 { 269 struct vring_virtqueue *vq = to_vvq(_vq); 270 struct scatterlist *sg; 271 struct vring_desc *desc; 272 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx; 273 int head; 274 bool indirect; 275 276 START_USE(vq); 277 278 BUG_ON(data == NULL); 279 BUG_ON(ctx && vq->indirect); 280 281 if (unlikely(vq->broken)) { 282 END_USE(vq); 283 return -EIO; 284 } 285 286 #ifdef DEBUG 287 { 288 ktime_t now = ktime_get(); 289 290 /* No kick or get, with .1 second between? Warn. */ 291 if (vq->last_add_time_valid) 292 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) 293 > 100); 294 vq->last_add_time = now; 295 vq->last_add_time_valid = true; 296 } 297 #endif 298 299 BUG_ON(total_sg == 0); 300 301 head = vq->free_head; 302 303 /* If the host supports indirect descriptor tables, and we have multiple 304 * buffers, then go indirect. FIXME: tune this threshold */ 305 if (vq->indirect && total_sg > 1 && vq->vq.num_free) 306 desc = alloc_indirect(_vq, total_sg, gfp); 307 else { 308 desc = NULL; 309 WARN_ON_ONCE(total_sg > vq->vring.num && !vq->indirect); 310 } 311 312 if (desc) { 313 /* Use a single buffer which doesn't continue */ 314 indirect = true; 315 /* Set up rest to use this indirect table. */ 316 i = 0; 317 descs_used = 1; 318 } else { 319 indirect = false; 320 desc = vq->vring.desc; 321 i = head; 322 descs_used = total_sg; 323 } 324 325 if (vq->vq.num_free < descs_used) { 326 pr_debug("Can't add buf len %i - avail = %i\n", 327 descs_used, vq->vq.num_free); 328 /* FIXME: for historical reasons, we force a notify here if 329 * there are outgoing parts to the buffer. Presumably the 330 * host should service the ring ASAP. */ 331 if (out_sgs) 332 vq->notify(&vq->vq); 333 if (indirect) 334 kfree(desc); 335 END_USE(vq); 336 return -ENOSPC; 337 } 338 339 for (n = 0; n < out_sgs; n++) { 340 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 341 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); 342 if (vring_mapping_error(vq, addr)) 343 goto unmap_release; 344 345 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); 346 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 347 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 348 prev = i; 349 i = virtio16_to_cpu(_vq->vdev, desc[i].next); 350 } 351 } 352 for (; n < (out_sgs + in_sgs); n++) { 353 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 354 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); 355 if (vring_mapping_error(vq, addr)) 356 goto unmap_release; 357 358 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); 359 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 360 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 361 prev = i; 362 i = virtio16_to_cpu(_vq->vdev, desc[i].next); 363 } 364 } 365 /* Last one doesn't continue. */ 366 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); 367 368 if (indirect) { 369 /* Now that the indirect table is filled in, map it. */ 370 dma_addr_t addr = vring_map_single( 371 vq, desc, total_sg * sizeof(struct vring_desc), 372 DMA_TO_DEVICE); 373 if (vring_mapping_error(vq, addr)) 374 goto unmap_release; 375 376 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); 377 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr); 378 379 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); 380 } 381 382 /* We're using some buffers from the free list. */ 383 vq->vq.num_free -= descs_used; 384 385 /* Update free pointer */ 386 if (indirect) 387 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); 388 else 389 vq->free_head = i; 390 391 /* Store token and indirect buffer state. */ 392 vq->desc_state[head].data = data; 393 if (indirect) 394 vq->desc_state[head].indir_desc = desc; 395 else 396 vq->desc_state[head].indir_desc = ctx; 397 398 /* Put entry in available array (but don't update avail->idx until they 399 * do sync). */ 400 avail = vq->avail_idx_shadow & (vq->vring.num - 1); 401 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 402 403 /* Descriptors and available array need to be set before we expose the 404 * new available array entries. */ 405 virtio_wmb(vq->weak_barriers); 406 vq->avail_idx_shadow++; 407 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); 408 vq->num_added++; 409 410 pr_debug("Added buffer head %i to %p\n", head, vq); 411 END_USE(vq); 412 413 /* This is very unlikely, but theoretically possible. Kick 414 * just in case. */ 415 if (unlikely(vq->num_added == (1 << 16) - 1)) 416 virtqueue_kick(_vq); 417 418 return 0; 419 420 unmap_release: 421 err_idx = i; 422 i = head; 423 424 for (n = 0; n < total_sg; n++) { 425 if (i == err_idx) 426 break; 427 vring_unmap_one(vq, &desc[i]); 428 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next); 429 } 430 431 vq->vq.num_free += total_sg; 432 433 if (indirect) 434 kfree(desc); 435 436 END_USE(vq); 437 return -EIO; 438 } 439 440 /** 441 * virtqueue_add_sgs - expose buffers to other end 442 * @vq: the struct virtqueue we're talking about. 443 * @sgs: array of terminated scatterlists. 444 * @out_num: the number of scatterlists readable by other side 445 * @in_num: the number of scatterlists which are writable (after readable ones) 446 * @data: the token identifying the buffer. 447 * @gfp: how to do memory allocations (if necessary). 448 * 449 * Caller must ensure we don't call this with other virtqueue operations 450 * at the same time (except where noted). 451 * 452 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 453 */ 454 int virtqueue_add_sgs(struct virtqueue *_vq, 455 struct scatterlist *sgs[], 456 unsigned int out_sgs, 457 unsigned int in_sgs, 458 void *data, 459 gfp_t gfp) 460 { 461 unsigned int i, total_sg = 0; 462 463 /* Count them first. */ 464 for (i = 0; i < out_sgs + in_sgs; i++) { 465 struct scatterlist *sg; 466 for (sg = sgs[i]; sg; sg = sg_next(sg)) 467 total_sg++; 468 } 469 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, 470 data, NULL, gfp); 471 } 472 EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 473 474 /** 475 * virtqueue_add_outbuf - expose output buffers to other end 476 * @vq: the struct virtqueue we're talking about. 477 * @sg: scatterlist (must be well-formed and terminated!) 478 * @num: the number of entries in @sg readable by other side 479 * @data: the token identifying the buffer. 480 * @gfp: how to do memory allocations (if necessary). 481 * 482 * Caller must ensure we don't call this with other virtqueue operations 483 * at the same time (except where noted). 484 * 485 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 486 */ 487 int virtqueue_add_outbuf(struct virtqueue *vq, 488 struct scatterlist *sg, unsigned int num, 489 void *data, 490 gfp_t gfp) 491 { 492 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); 493 } 494 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 495 496 /** 497 * virtqueue_add_inbuf - expose input buffers to other end 498 * @vq: the struct virtqueue we're talking about. 499 * @sg: scatterlist (must be well-formed and terminated!) 500 * @num: the number of entries in @sg writable by other side 501 * @data: the token identifying the buffer. 502 * @gfp: how to do memory allocations (if necessary). 503 * 504 * Caller must ensure we don't call this with other virtqueue operations 505 * at the same time (except where noted). 506 * 507 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 508 */ 509 int virtqueue_add_inbuf(struct virtqueue *vq, 510 struct scatterlist *sg, unsigned int num, 511 void *data, 512 gfp_t gfp) 513 { 514 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); 515 } 516 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 517 518 /** 519 * virtqueue_add_inbuf_ctx - expose input buffers to other end 520 * @vq: the struct virtqueue we're talking about. 521 * @sg: scatterlist (must be well-formed and terminated!) 522 * @num: the number of entries in @sg writable by other side 523 * @data: the token identifying the buffer. 524 * @ctx: extra context for the token 525 * @gfp: how to do memory allocations (if necessary). 526 * 527 * Caller must ensure we don't call this with other virtqueue operations 528 * at the same time (except where noted). 529 * 530 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 531 */ 532 int virtqueue_add_inbuf_ctx(struct virtqueue *vq, 533 struct scatterlist *sg, unsigned int num, 534 void *data, 535 void *ctx, 536 gfp_t gfp) 537 { 538 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); 539 } 540 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); 541 542 /** 543 * virtqueue_kick_prepare - first half of split virtqueue_kick call. 544 * @vq: the struct virtqueue 545 * 546 * Instead of virtqueue_kick(), you can do: 547 * if (virtqueue_kick_prepare(vq)) 548 * virtqueue_notify(vq); 549 * 550 * This is sometimes useful because the virtqueue_kick_prepare() needs 551 * to be serialized, but the actual virtqueue_notify() call does not. 552 */ 553 bool virtqueue_kick_prepare(struct virtqueue *_vq) 554 { 555 struct vring_virtqueue *vq = to_vvq(_vq); 556 u16 new, old; 557 bool needs_kick; 558 559 START_USE(vq); 560 /* We need to expose available array entries before checking avail 561 * event. */ 562 virtio_mb(vq->weak_barriers); 563 564 old = vq->avail_idx_shadow - vq->num_added; 565 new = vq->avail_idx_shadow; 566 vq->num_added = 0; 567 568 #ifdef DEBUG 569 if (vq->last_add_time_valid) { 570 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), 571 vq->last_add_time)) > 100); 572 } 573 vq->last_add_time_valid = false; 574 #endif 575 576 if (vq->event) { 577 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), 578 new, old); 579 } else { 580 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); 581 } 582 END_USE(vq); 583 return needs_kick; 584 } 585 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 586 587 /** 588 * virtqueue_notify - second half of split virtqueue_kick call. 589 * @vq: the struct virtqueue 590 * 591 * This does not need to be serialized. 592 * 593 * Returns false if host notify failed or queue is broken, otherwise true. 594 */ 595 bool virtqueue_notify(struct virtqueue *_vq) 596 { 597 struct vring_virtqueue *vq = to_vvq(_vq); 598 599 if (unlikely(vq->broken)) 600 return false; 601 602 /* Prod other side to tell it about changes. */ 603 if (!vq->notify(_vq)) { 604 vq->broken = true; 605 return false; 606 } 607 return true; 608 } 609 EXPORT_SYMBOL_GPL(virtqueue_notify); 610 611 /** 612 * virtqueue_kick - update after add_buf 613 * @vq: the struct virtqueue 614 * 615 * After one or more virtqueue_add_* calls, invoke this to kick 616 * the other side. 617 * 618 * Caller must ensure we don't call this with other virtqueue 619 * operations at the same time (except where noted). 620 * 621 * Returns false if kick failed, otherwise true. 622 */ 623 bool virtqueue_kick(struct virtqueue *vq) 624 { 625 if (virtqueue_kick_prepare(vq)) 626 return virtqueue_notify(vq); 627 return true; 628 } 629 EXPORT_SYMBOL_GPL(virtqueue_kick); 630 631 static void detach_buf(struct vring_virtqueue *vq, unsigned int head, 632 void **ctx) 633 { 634 unsigned int i, j; 635 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); 636 637 /* Clear data ptr. */ 638 vq->desc_state[head].data = NULL; 639 640 /* Put back on free list: unmap first-level descriptors and find end */ 641 i = head; 642 643 while (vq->vring.desc[i].flags & nextflag) { 644 vring_unmap_one(vq, &vq->vring.desc[i]); 645 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); 646 vq->vq.num_free++; 647 } 648 649 vring_unmap_one(vq, &vq->vring.desc[i]); 650 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); 651 vq->free_head = head; 652 653 /* Plus final descriptor */ 654 vq->vq.num_free++; 655 656 if (vq->indirect) { 657 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc; 658 u32 len; 659 660 /* Free the indirect table, if any, now that it's unmapped. */ 661 if (!indir_desc) 662 return; 663 664 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len); 665 666 BUG_ON(!(vq->vring.desc[head].flags & 667 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))); 668 BUG_ON(len == 0 || len % sizeof(struct vring_desc)); 669 670 for (j = 0; j < len / sizeof(struct vring_desc); j++) 671 vring_unmap_one(vq, &indir_desc[j]); 672 673 kfree(indir_desc); 674 vq->desc_state[head].indir_desc = NULL; 675 } else if (ctx) { 676 *ctx = vq->desc_state[head].indir_desc; 677 } 678 } 679 680 static inline bool more_used(const struct vring_virtqueue *vq) 681 { 682 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); 683 } 684 685 /** 686 * virtqueue_get_buf - get the next used buffer 687 * @vq: the struct virtqueue we're talking about. 688 * @len: the length written into the buffer 689 * 690 * If the device wrote data into the buffer, @len will be set to the 691 * amount written. This means you don't need to clear the buffer 692 * beforehand to ensure there's no data leakage in the case of short 693 * writes. 694 * 695 * Caller must ensure we don't call this with other virtqueue 696 * operations at the same time (except where noted). 697 * 698 * Returns NULL if there are no used buffers, or the "data" token 699 * handed to virtqueue_add_*(). 700 */ 701 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, 702 void **ctx) 703 { 704 struct vring_virtqueue *vq = to_vvq(_vq); 705 void *ret; 706 unsigned int i; 707 u16 last_used; 708 709 START_USE(vq); 710 711 if (unlikely(vq->broken)) { 712 END_USE(vq); 713 return NULL; 714 } 715 716 if (!more_used(vq)) { 717 pr_debug("No more buffers in queue\n"); 718 END_USE(vq); 719 return NULL; 720 } 721 722 /* Only get used array entries after they have been exposed by host. */ 723 virtio_rmb(vq->weak_barriers); 724 725 last_used = (vq->last_used_idx & (vq->vring.num - 1)); 726 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); 727 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); 728 729 if (unlikely(i >= vq->vring.num)) { 730 BAD_RING(vq, "id %u out of range\n", i); 731 return NULL; 732 } 733 if (unlikely(!vq->desc_state[i].data)) { 734 BAD_RING(vq, "id %u is not a head!\n", i); 735 return NULL; 736 } 737 738 /* detach_buf clears data, so grab it now. */ 739 ret = vq->desc_state[i].data; 740 detach_buf(vq, i, ctx); 741 vq->last_used_idx++; 742 /* If we expect an interrupt for the next entry, tell host 743 * by writing event index and flush out the write before 744 * the read in the next get_buf call. */ 745 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) 746 virtio_store_mb(vq->weak_barriers, 747 &vring_used_event(&vq->vring), 748 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); 749 750 #ifdef DEBUG 751 vq->last_add_time_valid = false; 752 #endif 753 754 END_USE(vq); 755 return ret; 756 } 757 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx); 758 759 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 760 { 761 return virtqueue_get_buf_ctx(_vq, len, NULL); 762 } 763 EXPORT_SYMBOL_GPL(virtqueue_get_buf); 764 /** 765 * virtqueue_disable_cb - disable callbacks 766 * @vq: the struct virtqueue we're talking about. 767 * 768 * Note that this is not necessarily synchronous, hence unreliable and only 769 * useful as an optimization. 770 * 771 * Unlike other operations, this need not be serialized. 772 */ 773 void virtqueue_disable_cb(struct virtqueue *_vq) 774 { 775 struct vring_virtqueue *vq = to_vvq(_vq); 776 777 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { 778 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 779 if (!vq->event) 780 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 781 } 782 783 } 784 EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 785 786 /** 787 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb 788 * @vq: the struct virtqueue we're talking about. 789 * 790 * This re-enables callbacks; it returns current queue state 791 * in an opaque unsigned value. This value should be later tested by 792 * virtqueue_poll, to detect a possible race between the driver checking for 793 * more work, and enabling callbacks. 794 * 795 * Caller must ensure we don't call this with other virtqueue 796 * operations at the same time (except where noted). 797 */ 798 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) 799 { 800 struct vring_virtqueue *vq = to_vvq(_vq); 801 u16 last_used_idx; 802 803 START_USE(vq); 804 805 /* We optimistically turn back on interrupts, then check if there was 806 * more to do. */ 807 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 808 * either clear the flags bit or point the event index at the next 809 * entry. Always do both to keep code simple. */ 810 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 811 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 812 if (!vq->event) 813 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 814 } 815 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); 816 END_USE(vq); 817 return last_used_idx; 818 } 819 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); 820 821 /** 822 * virtqueue_poll - query pending used buffers 823 * @vq: the struct virtqueue we're talking about. 824 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). 825 * 826 * Returns "true" if there are pending used buffers in the queue. 827 * 828 * This does not need to be serialized. 829 */ 830 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) 831 { 832 struct vring_virtqueue *vq = to_vvq(_vq); 833 834 virtio_mb(vq->weak_barriers); 835 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); 836 } 837 EXPORT_SYMBOL_GPL(virtqueue_poll); 838 839 /** 840 * virtqueue_enable_cb - restart callbacks after disable_cb. 841 * @vq: the struct virtqueue we're talking about. 842 * 843 * This re-enables callbacks; it returns "false" if there are pending 844 * buffers in the queue, to detect a possible race between the driver 845 * checking for more work, and enabling callbacks. 846 * 847 * Caller must ensure we don't call this with other virtqueue 848 * operations at the same time (except where noted). 849 */ 850 bool virtqueue_enable_cb(struct virtqueue *_vq) 851 { 852 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); 853 return !virtqueue_poll(_vq, last_used_idx); 854 } 855 EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 856 857 /** 858 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 859 * @vq: the struct virtqueue we're talking about. 860 * 861 * This re-enables callbacks but hints to the other side to delay 862 * interrupts until most of the available buffers have been processed; 863 * it returns "false" if there are many pending buffers in the queue, 864 * to detect a possible race between the driver checking for more work, 865 * and enabling callbacks. 866 * 867 * Caller must ensure we don't call this with other virtqueue 868 * operations at the same time (except where noted). 869 */ 870 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 871 { 872 struct vring_virtqueue *vq = to_vvq(_vq); 873 u16 bufs; 874 875 START_USE(vq); 876 877 /* We optimistically turn back on interrupts, then check if there was 878 * more to do. */ 879 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 880 * either clear the flags bit or point the event index at the next 881 * entry. Always update the event index to keep code simple. */ 882 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 883 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 884 if (!vq->event) 885 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 886 } 887 /* TODO: tune this threshold */ 888 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; 889 890 virtio_store_mb(vq->weak_barriers, 891 &vring_used_event(&vq->vring), 892 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); 893 894 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { 895 END_USE(vq); 896 return false; 897 } 898 899 END_USE(vq); 900 return true; 901 } 902 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 903 904 /** 905 * virtqueue_detach_unused_buf - detach first unused buffer 906 * @vq: the struct virtqueue we're talking about. 907 * 908 * Returns NULL or the "data" token handed to virtqueue_add_*(). 909 * This is not valid on an active queue; it is useful only for device 910 * shutdown. 911 */ 912 void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 913 { 914 struct vring_virtqueue *vq = to_vvq(_vq); 915 unsigned int i; 916 void *buf; 917 918 START_USE(vq); 919 920 for (i = 0; i < vq->vring.num; i++) { 921 if (!vq->desc_state[i].data) 922 continue; 923 /* detach_buf clears data, so grab it now. */ 924 buf = vq->desc_state[i].data; 925 detach_buf(vq, i, NULL); 926 vq->avail_idx_shadow--; 927 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); 928 END_USE(vq); 929 return buf; 930 } 931 /* That should have freed everything. */ 932 BUG_ON(vq->vq.num_free != vq->vring.num); 933 934 END_USE(vq); 935 return NULL; 936 } 937 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 938 939 irqreturn_t vring_interrupt(int irq, void *_vq) 940 { 941 struct vring_virtqueue *vq = to_vvq(_vq); 942 943 if (!more_used(vq)) { 944 pr_debug("virtqueue interrupt with no work for %p\n", vq); 945 return IRQ_NONE; 946 } 947 948 if (unlikely(vq->broken)) 949 return IRQ_HANDLED; 950 951 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 952 if (vq->vq.callback) 953 vq->vq.callback(&vq->vq); 954 955 return IRQ_HANDLED; 956 } 957 EXPORT_SYMBOL_GPL(vring_interrupt); 958 959 struct virtqueue *__vring_new_virtqueue(unsigned int index, 960 struct vring vring, 961 struct virtio_device *vdev, 962 bool weak_barriers, 963 bool context, 964 bool (*notify)(struct virtqueue *), 965 void (*callback)(struct virtqueue *), 966 const char *name) 967 { 968 unsigned int i; 969 struct vring_virtqueue *vq; 970 971 vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state), 972 GFP_KERNEL); 973 if (!vq) 974 return NULL; 975 976 vq->vring = vring; 977 vq->vq.callback = callback; 978 vq->vq.vdev = vdev; 979 vq->vq.name = name; 980 vq->vq.num_free = vring.num; 981 vq->vq.index = index; 982 vq->we_own_ring = false; 983 vq->queue_dma_addr = 0; 984 vq->queue_size_in_bytes = 0; 985 vq->notify = notify; 986 vq->weak_barriers = weak_barriers; 987 vq->broken = false; 988 vq->last_used_idx = 0; 989 vq->avail_flags_shadow = 0; 990 vq->avail_idx_shadow = 0; 991 vq->num_added = 0; 992 list_add_tail(&vq->vq.list, &vdev->vqs); 993 #ifdef DEBUG 994 vq->in_use = false; 995 vq->last_add_time_valid = false; 996 #endif 997 998 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && 999 !context; 1000 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 1001 1002 /* No callback? Tell other side not to bother us. */ 1003 if (!callback) { 1004 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 1005 if (!vq->event) 1006 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); 1007 } 1008 1009 /* Put everything in free lists. */ 1010 vq->free_head = 0; 1011 for (i = 0; i < vring.num-1; i++) 1012 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); 1013 memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state)); 1014 1015 return &vq->vq; 1016 } 1017 EXPORT_SYMBOL_GPL(__vring_new_virtqueue); 1018 1019 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size, 1020 dma_addr_t *dma_handle, gfp_t flag) 1021 { 1022 if (vring_use_dma_api(vdev)) { 1023 return dma_alloc_coherent(vdev->dev.parent, size, 1024 dma_handle, flag); 1025 } else { 1026 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); 1027 if (queue) { 1028 phys_addr_t phys_addr = virt_to_phys(queue); 1029 *dma_handle = (dma_addr_t)phys_addr; 1030 1031 /* 1032 * Sanity check: make sure we dind't truncate 1033 * the address. The only arches I can find that 1034 * have 64-bit phys_addr_t but 32-bit dma_addr_t 1035 * are certain non-highmem MIPS and x86 1036 * configurations, but these configurations 1037 * should never allocate physical pages above 32 1038 * bits, so this is fine. Just in case, throw a 1039 * warning and abort if we end up with an 1040 * unrepresentable address. 1041 */ 1042 if (WARN_ON_ONCE(*dma_handle != phys_addr)) { 1043 free_pages_exact(queue, PAGE_ALIGN(size)); 1044 return NULL; 1045 } 1046 } 1047 return queue; 1048 } 1049 } 1050 1051 static void vring_free_queue(struct virtio_device *vdev, size_t size, 1052 void *queue, dma_addr_t dma_handle) 1053 { 1054 if (vring_use_dma_api(vdev)) { 1055 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle); 1056 } else { 1057 free_pages_exact(queue, PAGE_ALIGN(size)); 1058 } 1059 } 1060 1061 struct virtqueue *vring_create_virtqueue( 1062 unsigned int index, 1063 unsigned int num, 1064 unsigned int vring_align, 1065 struct virtio_device *vdev, 1066 bool weak_barriers, 1067 bool may_reduce_num, 1068 bool context, 1069 bool (*notify)(struct virtqueue *), 1070 void (*callback)(struct virtqueue *), 1071 const char *name) 1072 { 1073 struct virtqueue *vq; 1074 void *queue = NULL; 1075 dma_addr_t dma_addr; 1076 size_t queue_size_in_bytes; 1077 struct vring vring; 1078 1079 /* We assume num is a power of 2. */ 1080 if (num & (num - 1)) { 1081 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 1082 return NULL; 1083 } 1084 1085 /* TODO: allocate each queue chunk individually */ 1086 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { 1087 queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 1088 &dma_addr, 1089 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); 1090 if (queue) 1091 break; 1092 } 1093 1094 if (!num) 1095 return NULL; 1096 1097 if (!queue) { 1098 /* Try to get a single page. You are my only hope! */ 1099 queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 1100 &dma_addr, GFP_KERNEL|__GFP_ZERO); 1101 } 1102 if (!queue) 1103 return NULL; 1104 1105 queue_size_in_bytes = vring_size(num, vring_align); 1106 vring_init(&vring, num, queue, vring_align); 1107 1108 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, 1109 notify, callback, name); 1110 if (!vq) { 1111 vring_free_queue(vdev, queue_size_in_bytes, queue, 1112 dma_addr); 1113 return NULL; 1114 } 1115 1116 to_vvq(vq)->queue_dma_addr = dma_addr; 1117 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes; 1118 to_vvq(vq)->we_own_ring = true; 1119 1120 return vq; 1121 } 1122 EXPORT_SYMBOL_GPL(vring_create_virtqueue); 1123 1124 struct virtqueue *vring_new_virtqueue(unsigned int index, 1125 unsigned int num, 1126 unsigned int vring_align, 1127 struct virtio_device *vdev, 1128 bool weak_barriers, 1129 bool context, 1130 void *pages, 1131 bool (*notify)(struct virtqueue *vq), 1132 void (*callback)(struct virtqueue *vq), 1133 const char *name) 1134 { 1135 struct vring vring; 1136 vring_init(&vring, num, pages, vring_align); 1137 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, 1138 notify, callback, name); 1139 } 1140 EXPORT_SYMBOL_GPL(vring_new_virtqueue); 1141 1142 void vring_del_virtqueue(struct virtqueue *_vq) 1143 { 1144 struct vring_virtqueue *vq = to_vvq(_vq); 1145 1146 if (vq->we_own_ring) { 1147 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes, 1148 vq->vring.desc, vq->queue_dma_addr); 1149 } 1150 list_del(&_vq->list); 1151 kfree(vq); 1152 } 1153 EXPORT_SYMBOL_GPL(vring_del_virtqueue); 1154 1155 /* Manipulates transport-specific feature bits. */ 1156 void vring_transport_features(struct virtio_device *vdev) 1157 { 1158 unsigned int i; 1159 1160 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 1161 switch (i) { 1162 case VIRTIO_RING_F_INDIRECT_DESC: 1163 break; 1164 case VIRTIO_RING_F_EVENT_IDX: 1165 break; 1166 case VIRTIO_F_VERSION_1: 1167 break; 1168 case VIRTIO_F_IOMMU_PLATFORM: 1169 break; 1170 default: 1171 /* We don't understand this bit. */ 1172 __virtio_clear_bit(vdev, i); 1173 } 1174 } 1175 } 1176 EXPORT_SYMBOL_GPL(vring_transport_features); 1177 1178 /** 1179 * virtqueue_get_vring_size - return the size of the virtqueue's vring 1180 * @vq: the struct virtqueue containing the vring of interest. 1181 * 1182 * Returns the size of the vring. This is mainly used for boasting to 1183 * userspace. Unlike other operations, this need not be serialized. 1184 */ 1185 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 1186 { 1187 1188 struct vring_virtqueue *vq = to_vvq(_vq); 1189 1190 return vq->vring.num; 1191 } 1192 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 1193 1194 bool virtqueue_is_broken(struct virtqueue *_vq) 1195 { 1196 struct vring_virtqueue *vq = to_vvq(_vq); 1197 1198 return vq->broken; 1199 } 1200 EXPORT_SYMBOL_GPL(virtqueue_is_broken); 1201 1202 /* 1203 * This should prevent the device from being used, allowing drivers to 1204 * recover. You may need to grab appropriate locks to flush. 1205 */ 1206 void virtio_break_device(struct virtio_device *dev) 1207 { 1208 struct virtqueue *_vq; 1209 1210 list_for_each_entry(_vq, &dev->vqs, list) { 1211 struct vring_virtqueue *vq = to_vvq(_vq); 1212 vq->broken = true; 1213 } 1214 } 1215 EXPORT_SYMBOL_GPL(virtio_break_device); 1216 1217 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) 1218 { 1219 struct vring_virtqueue *vq = to_vvq(_vq); 1220 1221 BUG_ON(!vq->we_own_ring); 1222 1223 return vq->queue_dma_addr; 1224 } 1225 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr); 1226 1227 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) 1228 { 1229 struct vring_virtqueue *vq = to_vvq(_vq); 1230 1231 BUG_ON(!vq->we_own_ring); 1232 1233 return vq->queue_dma_addr + 1234 ((char *)vq->vring.avail - (char *)vq->vring.desc); 1235 } 1236 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr); 1237 1238 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) 1239 { 1240 struct vring_virtqueue *vq = to_vvq(_vq); 1241 1242 BUG_ON(!vq->we_own_ring); 1243 1244 return vq->queue_dma_addr + 1245 ((char *)vq->vring.used - (char *)vq->vring.desc); 1246 } 1247 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr); 1248 1249 const struct vring *virtqueue_get_vring(struct virtqueue *vq) 1250 { 1251 return &to_vvq(vq)->vring; 1252 } 1253 EXPORT_SYMBOL_GPL(virtqueue_get_vring); 1254 1255 MODULE_LICENSE("GPL"); 1256