1 /* Virtio ring implementation. 2 * 3 * Copyright 2007 Rusty Russell IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include <linux/virtio.h> 20 #include <linux/virtio_ring.h> 21 #include <linux/virtio_config.h> 22 #include <linux/device.h> 23 #include <linux/slab.h> 24 #include <linux/module.h> 25 #include <linux/hrtimer.h> 26 #include <linux/kmemleak.h> 27 #include <linux/dma-mapping.h> 28 #include <xen/xen.h> 29 30 #ifdef DEBUG 31 /* For development, we want to crash whenever the ring is screwed. */ 32 #define BAD_RING(_vq, fmt, args...) \ 33 do { \ 34 dev_err(&(_vq)->vq.vdev->dev, \ 35 "%s:"fmt, (_vq)->vq.name, ##args); \ 36 BUG(); \ 37 } while (0) 38 /* Caller is supposed to guarantee no reentry. */ 39 #define START_USE(_vq) \ 40 do { \ 41 if ((_vq)->in_use) \ 42 panic("%s:in_use = %i\n", \ 43 (_vq)->vq.name, (_vq)->in_use); \ 44 (_vq)->in_use = __LINE__; \ 45 } while (0) 46 #define END_USE(_vq) \ 47 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 48 #else 49 #define BAD_RING(_vq, fmt, args...) \ 50 do { \ 51 dev_err(&_vq->vq.vdev->dev, \ 52 "%s:"fmt, (_vq)->vq.name, ##args); \ 53 (_vq)->broken = true; \ 54 } while (0) 55 #define START_USE(vq) 56 #define END_USE(vq) 57 #endif 58 59 struct vring_desc_state { 60 void *data; /* Data for callback. */ 61 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ 62 }; 63 64 struct vring_virtqueue { 65 struct virtqueue vq; 66 67 /* Actual memory layout for this queue */ 68 struct vring vring; 69 70 /* Can we use weak barriers? */ 71 bool weak_barriers; 72 73 /* Other side has made a mess, don't try any more. */ 74 bool broken; 75 76 /* Host supports indirect buffers */ 77 bool indirect; 78 79 /* Host publishes avail event idx */ 80 bool event; 81 82 /* Head of free buffer list. */ 83 unsigned int free_head; 84 /* Number we've added since last sync. */ 85 unsigned int num_added; 86 87 /* Last used index we've seen. */ 88 u16 last_used_idx; 89 90 /* Last written value to avail->flags */ 91 u16 avail_flags_shadow; 92 93 /* Last written value to avail->idx in guest byte order */ 94 u16 avail_idx_shadow; 95 96 /* How to notify other side. FIXME: commonalize hcalls! */ 97 bool (*notify)(struct virtqueue *vq); 98 99 /* DMA, allocation, and size information */ 100 bool we_own_ring; 101 size_t queue_size_in_bytes; 102 dma_addr_t queue_dma_addr; 103 104 #ifdef DEBUG 105 /* They're supposed to lock for us. */ 106 unsigned int in_use; 107 108 /* Figure out if their kicks are too delayed. */ 109 bool last_add_time_valid; 110 ktime_t last_add_time; 111 #endif 112 113 /* Per-descriptor state. */ 114 struct vring_desc_state desc_state[]; 115 }; 116 117 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 118 119 /* 120 * Modern virtio devices have feature bits to specify whether they need a 121 * quirk and bypass the IOMMU. If not there, just use the DMA API. 122 * 123 * If there, the interaction between virtio and DMA API is messy. 124 * 125 * On most systems with virtio, physical addresses match bus addresses, 126 * and it doesn't particularly matter whether we use the DMA API. 127 * 128 * On some systems, including Xen and any system with a physical device 129 * that speaks virtio behind a physical IOMMU, we must use the DMA API 130 * for virtio DMA to work at all. 131 * 132 * On other systems, including SPARC and PPC64, virtio-pci devices are 133 * enumerated as though they are behind an IOMMU, but the virtio host 134 * ignores the IOMMU, so we must either pretend that the IOMMU isn't 135 * there or somehow map everything as the identity. 136 * 137 * For the time being, we preserve historic behavior and bypass the DMA 138 * API. 139 * 140 * TODO: install a per-device DMA ops structure that does the right thing 141 * taking into account all the above quirks, and use the DMA API 142 * unconditionally on data path. 143 */ 144 145 static bool vring_use_dma_api(struct virtio_device *vdev) 146 { 147 if (!virtio_has_iommu_quirk(vdev)) 148 return true; 149 150 /* Otherwise, we are left to guess. */ 151 /* 152 * In theory, it's possible to have a buggy QEMU-supposed 153 * emulated Q35 IOMMU and Xen enabled at the same time. On 154 * such a configuration, virtio has never worked and will 155 * not work without an even larger kludge. Instead, enable 156 * the DMA API if we're a Xen guest, which at least allows 157 * all of the sensible Xen configurations to work correctly. 158 */ 159 if (xen_domain()) 160 return true; 161 162 return false; 163 } 164 165 /* 166 * The DMA ops on various arches are rather gnarly right now, and 167 * making all of the arch DMA ops work on the vring device itself 168 * is a mess. For now, we use the parent device for DMA ops. 169 */ 170 static struct device *vring_dma_dev(const struct vring_virtqueue *vq) 171 { 172 return vq->vq.vdev->dev.parent; 173 } 174 175 /* Map one sg entry. */ 176 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, 177 struct scatterlist *sg, 178 enum dma_data_direction direction) 179 { 180 if (!vring_use_dma_api(vq->vq.vdev)) 181 return (dma_addr_t)sg_phys(sg); 182 183 /* 184 * We can't use dma_map_sg, because we don't use scatterlists in 185 * the way it expects (we don't guarantee that the scatterlist 186 * will exist for the lifetime of the mapping). 187 */ 188 return dma_map_page(vring_dma_dev(vq), 189 sg_page(sg), sg->offset, sg->length, 190 direction); 191 } 192 193 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, 194 void *cpu_addr, size_t size, 195 enum dma_data_direction direction) 196 { 197 if (!vring_use_dma_api(vq->vq.vdev)) 198 return (dma_addr_t)virt_to_phys(cpu_addr); 199 200 return dma_map_single(vring_dma_dev(vq), 201 cpu_addr, size, direction); 202 } 203 204 static void vring_unmap_one(const struct vring_virtqueue *vq, 205 struct vring_desc *desc) 206 { 207 u16 flags; 208 209 if (!vring_use_dma_api(vq->vq.vdev)) 210 return; 211 212 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); 213 214 if (flags & VRING_DESC_F_INDIRECT) { 215 dma_unmap_single(vring_dma_dev(vq), 216 virtio64_to_cpu(vq->vq.vdev, desc->addr), 217 virtio32_to_cpu(vq->vq.vdev, desc->len), 218 (flags & VRING_DESC_F_WRITE) ? 219 DMA_FROM_DEVICE : DMA_TO_DEVICE); 220 } else { 221 dma_unmap_page(vring_dma_dev(vq), 222 virtio64_to_cpu(vq->vq.vdev, desc->addr), 223 virtio32_to_cpu(vq->vq.vdev, desc->len), 224 (flags & VRING_DESC_F_WRITE) ? 225 DMA_FROM_DEVICE : DMA_TO_DEVICE); 226 } 227 } 228 229 static int vring_mapping_error(const struct vring_virtqueue *vq, 230 dma_addr_t addr) 231 { 232 if (!vring_use_dma_api(vq->vq.vdev)) 233 return 0; 234 235 return dma_mapping_error(vring_dma_dev(vq), addr); 236 } 237 238 static struct vring_desc *alloc_indirect(struct virtqueue *_vq, 239 unsigned int total_sg, gfp_t gfp) 240 { 241 struct vring_desc *desc; 242 unsigned int i; 243 244 /* 245 * We require lowmem mappings for the descriptors because 246 * otherwise virt_to_phys will give us bogus addresses in the 247 * virtqueue. 248 */ 249 gfp &= ~__GFP_HIGHMEM; 250 251 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); 252 if (!desc) 253 return NULL; 254 255 for (i = 0; i < total_sg; i++) 256 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); 257 return desc; 258 } 259 260 static inline int virtqueue_add(struct virtqueue *_vq, 261 struct scatterlist *sgs[], 262 unsigned int total_sg, 263 unsigned int out_sgs, 264 unsigned int in_sgs, 265 void *data, 266 gfp_t gfp) 267 { 268 struct vring_virtqueue *vq = to_vvq(_vq); 269 struct scatterlist *sg; 270 struct vring_desc *desc; 271 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx; 272 int head; 273 bool indirect; 274 275 START_USE(vq); 276 277 BUG_ON(data == NULL); 278 279 if (unlikely(vq->broken)) { 280 END_USE(vq); 281 return -EIO; 282 } 283 284 #ifdef DEBUG 285 { 286 ktime_t now = ktime_get(); 287 288 /* No kick or get, with .1 second between? Warn. */ 289 if (vq->last_add_time_valid) 290 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) 291 > 100); 292 vq->last_add_time = now; 293 vq->last_add_time_valid = true; 294 } 295 #endif 296 297 BUG_ON(total_sg > vq->vring.num); 298 BUG_ON(total_sg == 0); 299 300 head = vq->free_head; 301 302 /* If the host supports indirect descriptor tables, and we have multiple 303 * buffers, then go indirect. FIXME: tune this threshold */ 304 if (vq->indirect && total_sg > 1 && vq->vq.num_free) 305 desc = alloc_indirect(_vq, total_sg, gfp); 306 else 307 desc = NULL; 308 309 if (desc) { 310 /* Use a single buffer which doesn't continue */ 311 indirect = true; 312 /* Set up rest to use this indirect table. */ 313 i = 0; 314 descs_used = 1; 315 } else { 316 indirect = false; 317 desc = vq->vring.desc; 318 i = head; 319 descs_used = total_sg; 320 } 321 322 if (vq->vq.num_free < descs_used) { 323 pr_debug("Can't add buf len %i - avail = %i\n", 324 descs_used, vq->vq.num_free); 325 /* FIXME: for historical reasons, we force a notify here if 326 * there are outgoing parts to the buffer. Presumably the 327 * host should service the ring ASAP. */ 328 if (out_sgs) 329 vq->notify(&vq->vq); 330 if (indirect) 331 kfree(desc); 332 END_USE(vq); 333 return -ENOSPC; 334 } 335 336 for (n = 0; n < out_sgs; n++) { 337 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 338 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); 339 if (vring_mapping_error(vq, addr)) 340 goto unmap_release; 341 342 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); 343 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 344 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 345 prev = i; 346 i = virtio16_to_cpu(_vq->vdev, desc[i].next); 347 } 348 } 349 for (; n < (out_sgs + in_sgs); n++) { 350 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 351 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); 352 if (vring_mapping_error(vq, addr)) 353 goto unmap_release; 354 355 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); 356 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 357 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 358 prev = i; 359 i = virtio16_to_cpu(_vq->vdev, desc[i].next); 360 } 361 } 362 /* Last one doesn't continue. */ 363 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); 364 365 if (indirect) { 366 /* Now that the indirect table is filled in, map it. */ 367 dma_addr_t addr = vring_map_single( 368 vq, desc, total_sg * sizeof(struct vring_desc), 369 DMA_TO_DEVICE); 370 if (vring_mapping_error(vq, addr)) 371 goto unmap_release; 372 373 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); 374 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr); 375 376 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); 377 } 378 379 /* We're using some buffers from the free list. */ 380 vq->vq.num_free -= descs_used; 381 382 /* Update free pointer */ 383 if (indirect) 384 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); 385 else 386 vq->free_head = i; 387 388 /* Store token and indirect buffer state. */ 389 vq->desc_state[head].data = data; 390 if (indirect) 391 vq->desc_state[head].indir_desc = desc; 392 393 /* Put entry in available array (but don't update avail->idx until they 394 * do sync). */ 395 avail = vq->avail_idx_shadow & (vq->vring.num - 1); 396 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 397 398 /* Descriptors and available array need to be set before we expose the 399 * new available array entries. */ 400 virtio_wmb(vq->weak_barriers); 401 vq->avail_idx_shadow++; 402 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); 403 vq->num_added++; 404 405 pr_debug("Added buffer head %i to %p\n", head, vq); 406 END_USE(vq); 407 408 /* This is very unlikely, but theoretically possible. Kick 409 * just in case. */ 410 if (unlikely(vq->num_added == (1 << 16) - 1)) 411 virtqueue_kick(_vq); 412 413 return 0; 414 415 unmap_release: 416 err_idx = i; 417 i = head; 418 419 for (n = 0; n < total_sg; n++) { 420 if (i == err_idx) 421 break; 422 vring_unmap_one(vq, &desc[i]); 423 i = vq->vring.desc[i].next; 424 } 425 426 vq->vq.num_free += total_sg; 427 428 if (indirect) 429 kfree(desc); 430 431 END_USE(vq); 432 return -EIO; 433 } 434 435 /** 436 * virtqueue_add_sgs - expose buffers to other end 437 * @vq: the struct virtqueue we're talking about. 438 * @sgs: array of terminated scatterlists. 439 * @out_num: the number of scatterlists readable by other side 440 * @in_num: the number of scatterlists which are writable (after readable ones) 441 * @data: the token identifying the buffer. 442 * @gfp: how to do memory allocations (if necessary). 443 * 444 * Caller must ensure we don't call this with other virtqueue operations 445 * at the same time (except where noted). 446 * 447 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 448 */ 449 int virtqueue_add_sgs(struct virtqueue *_vq, 450 struct scatterlist *sgs[], 451 unsigned int out_sgs, 452 unsigned int in_sgs, 453 void *data, 454 gfp_t gfp) 455 { 456 unsigned int i, total_sg = 0; 457 458 /* Count them first. */ 459 for (i = 0; i < out_sgs + in_sgs; i++) { 460 struct scatterlist *sg; 461 for (sg = sgs[i]; sg; sg = sg_next(sg)) 462 total_sg++; 463 } 464 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); 465 } 466 EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 467 468 /** 469 * virtqueue_add_outbuf - expose output buffers to other end 470 * @vq: the struct virtqueue we're talking about. 471 * @sg: scatterlist (must be well-formed and terminated!) 472 * @num: the number of entries in @sg readable by other side 473 * @data: the token identifying the buffer. 474 * @gfp: how to do memory allocations (if necessary). 475 * 476 * Caller must ensure we don't call this with other virtqueue operations 477 * at the same time (except where noted). 478 * 479 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 480 */ 481 int virtqueue_add_outbuf(struct virtqueue *vq, 482 struct scatterlist *sg, unsigned int num, 483 void *data, 484 gfp_t gfp) 485 { 486 return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); 487 } 488 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 489 490 /** 491 * virtqueue_add_inbuf - expose input buffers to other end 492 * @vq: the struct virtqueue we're talking about. 493 * @sg: scatterlist (must be well-formed and terminated!) 494 * @num: the number of entries in @sg writable by other side 495 * @data: the token identifying the buffer. 496 * @gfp: how to do memory allocations (if necessary). 497 * 498 * Caller must ensure we don't call this with other virtqueue operations 499 * at the same time (except where noted). 500 * 501 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 502 */ 503 int virtqueue_add_inbuf(struct virtqueue *vq, 504 struct scatterlist *sg, unsigned int num, 505 void *data, 506 gfp_t gfp) 507 { 508 return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); 509 } 510 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 511 512 /** 513 * virtqueue_kick_prepare - first half of split virtqueue_kick call. 514 * @vq: the struct virtqueue 515 * 516 * Instead of virtqueue_kick(), you can do: 517 * if (virtqueue_kick_prepare(vq)) 518 * virtqueue_notify(vq); 519 * 520 * This is sometimes useful because the virtqueue_kick_prepare() needs 521 * to be serialized, but the actual virtqueue_notify() call does not. 522 */ 523 bool virtqueue_kick_prepare(struct virtqueue *_vq) 524 { 525 struct vring_virtqueue *vq = to_vvq(_vq); 526 u16 new, old; 527 bool needs_kick; 528 529 START_USE(vq); 530 /* We need to expose available array entries before checking avail 531 * event. */ 532 virtio_mb(vq->weak_barriers); 533 534 old = vq->avail_idx_shadow - vq->num_added; 535 new = vq->avail_idx_shadow; 536 vq->num_added = 0; 537 538 #ifdef DEBUG 539 if (vq->last_add_time_valid) { 540 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), 541 vq->last_add_time)) > 100); 542 } 543 vq->last_add_time_valid = false; 544 #endif 545 546 if (vq->event) { 547 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), 548 new, old); 549 } else { 550 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); 551 } 552 END_USE(vq); 553 return needs_kick; 554 } 555 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 556 557 /** 558 * virtqueue_notify - second half of split virtqueue_kick call. 559 * @vq: the struct virtqueue 560 * 561 * This does not need to be serialized. 562 * 563 * Returns false if host notify failed or queue is broken, otherwise true. 564 */ 565 bool virtqueue_notify(struct virtqueue *_vq) 566 { 567 struct vring_virtqueue *vq = to_vvq(_vq); 568 569 if (unlikely(vq->broken)) 570 return false; 571 572 /* Prod other side to tell it about changes. */ 573 if (!vq->notify(_vq)) { 574 vq->broken = true; 575 return false; 576 } 577 return true; 578 } 579 EXPORT_SYMBOL_GPL(virtqueue_notify); 580 581 /** 582 * virtqueue_kick - update after add_buf 583 * @vq: the struct virtqueue 584 * 585 * After one or more virtqueue_add_* calls, invoke this to kick 586 * the other side. 587 * 588 * Caller must ensure we don't call this with other virtqueue 589 * operations at the same time (except where noted). 590 * 591 * Returns false if kick failed, otherwise true. 592 */ 593 bool virtqueue_kick(struct virtqueue *vq) 594 { 595 if (virtqueue_kick_prepare(vq)) 596 return virtqueue_notify(vq); 597 return true; 598 } 599 EXPORT_SYMBOL_GPL(virtqueue_kick); 600 601 static void detach_buf(struct vring_virtqueue *vq, unsigned int head) 602 { 603 unsigned int i, j; 604 u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); 605 606 /* Clear data ptr. */ 607 vq->desc_state[head].data = NULL; 608 609 /* Put back on free list: unmap first-level descriptors and find end */ 610 i = head; 611 612 while (vq->vring.desc[i].flags & nextflag) { 613 vring_unmap_one(vq, &vq->vring.desc[i]); 614 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); 615 vq->vq.num_free++; 616 } 617 618 vring_unmap_one(vq, &vq->vring.desc[i]); 619 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); 620 vq->free_head = head; 621 622 /* Plus final descriptor */ 623 vq->vq.num_free++; 624 625 /* Free the indirect table, if any, now that it's unmapped. */ 626 if (vq->desc_state[head].indir_desc) { 627 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc; 628 u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len); 629 630 BUG_ON(!(vq->vring.desc[head].flags & 631 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))); 632 BUG_ON(len == 0 || len % sizeof(struct vring_desc)); 633 634 for (j = 0; j < len / sizeof(struct vring_desc); j++) 635 vring_unmap_one(vq, &indir_desc[j]); 636 637 kfree(vq->desc_state[head].indir_desc); 638 vq->desc_state[head].indir_desc = NULL; 639 } 640 } 641 642 static inline bool more_used(const struct vring_virtqueue *vq) 643 { 644 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); 645 } 646 647 /** 648 * virtqueue_get_buf - get the next used buffer 649 * @vq: the struct virtqueue we're talking about. 650 * @len: the length written into the buffer 651 * 652 * If the driver wrote data into the buffer, @len will be set to the 653 * amount written. This means you don't need to clear the buffer 654 * beforehand to ensure there's no data leakage in the case of short 655 * writes. 656 * 657 * Caller must ensure we don't call this with other virtqueue 658 * operations at the same time (except where noted). 659 * 660 * Returns NULL if there are no used buffers, or the "data" token 661 * handed to virtqueue_add_*(). 662 */ 663 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 664 { 665 struct vring_virtqueue *vq = to_vvq(_vq); 666 void *ret; 667 unsigned int i; 668 u16 last_used; 669 670 START_USE(vq); 671 672 if (unlikely(vq->broken)) { 673 END_USE(vq); 674 return NULL; 675 } 676 677 if (!more_used(vq)) { 678 pr_debug("No more buffers in queue\n"); 679 END_USE(vq); 680 return NULL; 681 } 682 683 /* Only get used array entries after they have been exposed by host. */ 684 virtio_rmb(vq->weak_barriers); 685 686 last_used = (vq->last_used_idx & (vq->vring.num - 1)); 687 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); 688 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); 689 690 if (unlikely(i >= vq->vring.num)) { 691 BAD_RING(vq, "id %u out of range\n", i); 692 return NULL; 693 } 694 if (unlikely(!vq->desc_state[i].data)) { 695 BAD_RING(vq, "id %u is not a head!\n", i); 696 return NULL; 697 } 698 699 /* detach_buf clears data, so grab it now. */ 700 ret = vq->desc_state[i].data; 701 detach_buf(vq, i); 702 vq->last_used_idx++; 703 /* If we expect an interrupt for the next entry, tell host 704 * by writing event index and flush out the write before 705 * the read in the next get_buf call. */ 706 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) 707 virtio_store_mb(vq->weak_barriers, 708 &vring_used_event(&vq->vring), 709 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); 710 711 #ifdef DEBUG 712 vq->last_add_time_valid = false; 713 #endif 714 715 END_USE(vq); 716 return ret; 717 } 718 EXPORT_SYMBOL_GPL(virtqueue_get_buf); 719 720 /** 721 * virtqueue_disable_cb - disable callbacks 722 * @vq: the struct virtqueue we're talking about. 723 * 724 * Note that this is not necessarily synchronous, hence unreliable and only 725 * useful as an optimization. 726 * 727 * Unlike other operations, this need not be serialized. 728 */ 729 void virtqueue_disable_cb(struct virtqueue *_vq) 730 { 731 struct vring_virtqueue *vq = to_vvq(_vq); 732 733 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { 734 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 735 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 736 } 737 738 } 739 EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 740 741 /** 742 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb 743 * @vq: the struct virtqueue we're talking about. 744 * 745 * This re-enables callbacks; it returns current queue state 746 * in an opaque unsigned value. This value should be later tested by 747 * virtqueue_poll, to detect a possible race between the driver checking for 748 * more work, and enabling callbacks. 749 * 750 * Caller must ensure we don't call this with other virtqueue 751 * operations at the same time (except where noted). 752 */ 753 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) 754 { 755 struct vring_virtqueue *vq = to_vvq(_vq); 756 u16 last_used_idx; 757 758 START_USE(vq); 759 760 /* We optimistically turn back on interrupts, then check if there was 761 * more to do. */ 762 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 763 * either clear the flags bit or point the event index at the next 764 * entry. Always do both to keep code simple. */ 765 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 766 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 767 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 768 } 769 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); 770 END_USE(vq); 771 return last_used_idx; 772 } 773 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); 774 775 /** 776 * virtqueue_poll - query pending used buffers 777 * @vq: the struct virtqueue we're talking about. 778 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). 779 * 780 * Returns "true" if there are pending used buffers in the queue. 781 * 782 * This does not need to be serialized. 783 */ 784 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) 785 { 786 struct vring_virtqueue *vq = to_vvq(_vq); 787 788 virtio_mb(vq->weak_barriers); 789 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); 790 } 791 EXPORT_SYMBOL_GPL(virtqueue_poll); 792 793 /** 794 * virtqueue_enable_cb - restart callbacks after disable_cb. 795 * @vq: the struct virtqueue we're talking about. 796 * 797 * This re-enables callbacks; it returns "false" if there are pending 798 * buffers in the queue, to detect a possible race between the driver 799 * checking for more work, and enabling callbacks. 800 * 801 * Caller must ensure we don't call this with other virtqueue 802 * operations at the same time (except where noted). 803 */ 804 bool virtqueue_enable_cb(struct virtqueue *_vq) 805 { 806 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); 807 return !virtqueue_poll(_vq, last_used_idx); 808 } 809 EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 810 811 /** 812 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 813 * @vq: the struct virtqueue we're talking about. 814 * 815 * This re-enables callbacks but hints to the other side to delay 816 * interrupts until most of the available buffers have been processed; 817 * it returns "false" if there are many pending buffers in the queue, 818 * to detect a possible race between the driver checking for more work, 819 * and enabling callbacks. 820 * 821 * Caller must ensure we don't call this with other virtqueue 822 * operations at the same time (except where noted). 823 */ 824 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 825 { 826 struct vring_virtqueue *vq = to_vvq(_vq); 827 u16 bufs; 828 829 START_USE(vq); 830 831 /* We optimistically turn back on interrupts, then check if there was 832 * more to do. */ 833 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 834 * either clear the flags bit or point the event index at the next 835 * entry. Always do both to keep code simple. */ 836 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 837 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 838 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); 839 } 840 /* TODO: tune this threshold */ 841 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; 842 843 virtio_store_mb(vq->weak_barriers, 844 &vring_used_event(&vq->vring), 845 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); 846 847 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { 848 END_USE(vq); 849 return false; 850 } 851 852 END_USE(vq); 853 return true; 854 } 855 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 856 857 /** 858 * virtqueue_detach_unused_buf - detach first unused buffer 859 * @vq: the struct virtqueue we're talking about. 860 * 861 * Returns NULL or the "data" token handed to virtqueue_add_*(). 862 * This is not valid on an active queue; it is useful only for device 863 * shutdown. 864 */ 865 void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 866 { 867 struct vring_virtqueue *vq = to_vvq(_vq); 868 unsigned int i; 869 void *buf; 870 871 START_USE(vq); 872 873 for (i = 0; i < vq->vring.num; i++) { 874 if (!vq->desc_state[i].data) 875 continue; 876 /* detach_buf clears data, so grab it now. */ 877 buf = vq->desc_state[i].data; 878 detach_buf(vq, i); 879 vq->avail_idx_shadow--; 880 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); 881 END_USE(vq); 882 return buf; 883 } 884 /* That should have freed everything. */ 885 BUG_ON(vq->vq.num_free != vq->vring.num); 886 887 END_USE(vq); 888 return NULL; 889 } 890 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 891 892 irqreturn_t vring_interrupt(int irq, void *_vq) 893 { 894 struct vring_virtqueue *vq = to_vvq(_vq); 895 896 if (!more_used(vq)) { 897 pr_debug("virtqueue interrupt with no work for %p\n", vq); 898 return IRQ_NONE; 899 } 900 901 if (unlikely(vq->broken)) 902 return IRQ_HANDLED; 903 904 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 905 if (vq->vq.callback) 906 vq->vq.callback(&vq->vq); 907 908 return IRQ_HANDLED; 909 } 910 EXPORT_SYMBOL_GPL(vring_interrupt); 911 912 struct virtqueue *__vring_new_virtqueue(unsigned int index, 913 struct vring vring, 914 struct virtio_device *vdev, 915 bool weak_barriers, 916 bool (*notify)(struct virtqueue *), 917 void (*callback)(struct virtqueue *), 918 const char *name) 919 { 920 unsigned int i; 921 struct vring_virtqueue *vq; 922 923 vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state), 924 GFP_KERNEL); 925 if (!vq) 926 return NULL; 927 928 vq->vring = vring; 929 vq->vq.callback = callback; 930 vq->vq.vdev = vdev; 931 vq->vq.name = name; 932 vq->vq.num_free = vring.num; 933 vq->vq.index = index; 934 vq->we_own_ring = false; 935 vq->queue_dma_addr = 0; 936 vq->queue_size_in_bytes = 0; 937 vq->notify = notify; 938 vq->weak_barriers = weak_barriers; 939 vq->broken = false; 940 vq->last_used_idx = 0; 941 vq->avail_flags_shadow = 0; 942 vq->avail_idx_shadow = 0; 943 vq->num_added = 0; 944 list_add_tail(&vq->vq.list, &vdev->vqs); 945 #ifdef DEBUG 946 vq->in_use = false; 947 vq->last_add_time_valid = false; 948 #endif 949 950 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 951 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 952 953 /* No callback? Tell other side not to bother us. */ 954 if (!callback) { 955 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 956 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); 957 } 958 959 /* Put everything in free lists. */ 960 vq->free_head = 0; 961 for (i = 0; i < vring.num-1; i++) 962 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); 963 memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state)); 964 965 return &vq->vq; 966 } 967 EXPORT_SYMBOL_GPL(__vring_new_virtqueue); 968 969 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size, 970 dma_addr_t *dma_handle, gfp_t flag) 971 { 972 if (vring_use_dma_api(vdev)) { 973 return dma_alloc_coherent(vdev->dev.parent, size, 974 dma_handle, flag); 975 } else { 976 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); 977 if (queue) { 978 phys_addr_t phys_addr = virt_to_phys(queue); 979 *dma_handle = (dma_addr_t)phys_addr; 980 981 /* 982 * Sanity check: make sure we dind't truncate 983 * the address. The only arches I can find that 984 * have 64-bit phys_addr_t but 32-bit dma_addr_t 985 * are certain non-highmem MIPS and x86 986 * configurations, but these configurations 987 * should never allocate physical pages above 32 988 * bits, so this is fine. Just in case, throw a 989 * warning and abort if we end up with an 990 * unrepresentable address. 991 */ 992 if (WARN_ON_ONCE(*dma_handle != phys_addr)) { 993 free_pages_exact(queue, PAGE_ALIGN(size)); 994 return NULL; 995 } 996 } 997 return queue; 998 } 999 } 1000 1001 static void vring_free_queue(struct virtio_device *vdev, size_t size, 1002 void *queue, dma_addr_t dma_handle) 1003 { 1004 if (vring_use_dma_api(vdev)) { 1005 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle); 1006 } else { 1007 free_pages_exact(queue, PAGE_ALIGN(size)); 1008 } 1009 } 1010 1011 struct virtqueue *vring_create_virtqueue( 1012 unsigned int index, 1013 unsigned int num, 1014 unsigned int vring_align, 1015 struct virtio_device *vdev, 1016 bool weak_barriers, 1017 bool may_reduce_num, 1018 bool (*notify)(struct virtqueue *), 1019 void (*callback)(struct virtqueue *), 1020 const char *name) 1021 { 1022 struct virtqueue *vq; 1023 void *queue = NULL; 1024 dma_addr_t dma_addr; 1025 size_t queue_size_in_bytes; 1026 struct vring vring; 1027 1028 /* We assume num is a power of 2. */ 1029 if (num & (num - 1)) { 1030 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 1031 return NULL; 1032 } 1033 1034 /* TODO: allocate each queue chunk individually */ 1035 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { 1036 queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 1037 &dma_addr, 1038 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); 1039 if (queue) 1040 break; 1041 } 1042 1043 if (!num) 1044 return NULL; 1045 1046 if (!queue) { 1047 /* Try to get a single page. You are my only hope! */ 1048 queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 1049 &dma_addr, GFP_KERNEL|__GFP_ZERO); 1050 } 1051 if (!queue) 1052 return NULL; 1053 1054 queue_size_in_bytes = vring_size(num, vring_align); 1055 vring_init(&vring, num, queue, vring_align); 1056 1057 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, 1058 notify, callback, name); 1059 if (!vq) { 1060 vring_free_queue(vdev, queue_size_in_bytes, queue, 1061 dma_addr); 1062 return NULL; 1063 } 1064 1065 to_vvq(vq)->queue_dma_addr = dma_addr; 1066 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes; 1067 to_vvq(vq)->we_own_ring = true; 1068 1069 return vq; 1070 } 1071 EXPORT_SYMBOL_GPL(vring_create_virtqueue); 1072 1073 struct virtqueue *vring_new_virtqueue(unsigned int index, 1074 unsigned int num, 1075 unsigned int vring_align, 1076 struct virtio_device *vdev, 1077 bool weak_barriers, 1078 void *pages, 1079 bool (*notify)(struct virtqueue *vq), 1080 void (*callback)(struct virtqueue *vq), 1081 const char *name) 1082 { 1083 struct vring vring; 1084 vring_init(&vring, num, pages, vring_align); 1085 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, 1086 notify, callback, name); 1087 } 1088 EXPORT_SYMBOL_GPL(vring_new_virtqueue); 1089 1090 void vring_del_virtqueue(struct virtqueue *_vq) 1091 { 1092 struct vring_virtqueue *vq = to_vvq(_vq); 1093 1094 if (vq->we_own_ring) { 1095 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes, 1096 vq->vring.desc, vq->queue_dma_addr); 1097 } 1098 list_del(&_vq->list); 1099 kfree(vq); 1100 } 1101 EXPORT_SYMBOL_GPL(vring_del_virtqueue); 1102 1103 /* Manipulates transport-specific feature bits. */ 1104 void vring_transport_features(struct virtio_device *vdev) 1105 { 1106 unsigned int i; 1107 1108 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 1109 switch (i) { 1110 case VIRTIO_RING_F_INDIRECT_DESC: 1111 break; 1112 case VIRTIO_RING_F_EVENT_IDX: 1113 break; 1114 case VIRTIO_F_VERSION_1: 1115 break; 1116 case VIRTIO_F_IOMMU_PLATFORM: 1117 break; 1118 default: 1119 /* We don't understand this bit. */ 1120 __virtio_clear_bit(vdev, i); 1121 } 1122 } 1123 } 1124 EXPORT_SYMBOL_GPL(vring_transport_features); 1125 1126 /** 1127 * virtqueue_get_vring_size - return the size of the virtqueue's vring 1128 * @vq: the struct virtqueue containing the vring of interest. 1129 * 1130 * Returns the size of the vring. This is mainly used for boasting to 1131 * userspace. Unlike other operations, this need not be serialized. 1132 */ 1133 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 1134 { 1135 1136 struct vring_virtqueue *vq = to_vvq(_vq); 1137 1138 return vq->vring.num; 1139 } 1140 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 1141 1142 bool virtqueue_is_broken(struct virtqueue *_vq) 1143 { 1144 struct vring_virtqueue *vq = to_vvq(_vq); 1145 1146 return vq->broken; 1147 } 1148 EXPORT_SYMBOL_GPL(virtqueue_is_broken); 1149 1150 /* 1151 * This should prevent the device from being used, allowing drivers to 1152 * recover. You may need to grab appropriate locks to flush. 1153 */ 1154 void virtio_break_device(struct virtio_device *dev) 1155 { 1156 struct virtqueue *_vq; 1157 1158 list_for_each_entry(_vq, &dev->vqs, list) { 1159 struct vring_virtqueue *vq = to_vvq(_vq); 1160 vq->broken = true; 1161 } 1162 } 1163 EXPORT_SYMBOL_GPL(virtio_break_device); 1164 1165 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) 1166 { 1167 struct vring_virtqueue *vq = to_vvq(_vq); 1168 1169 BUG_ON(!vq->we_own_ring); 1170 1171 return vq->queue_dma_addr; 1172 } 1173 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr); 1174 1175 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) 1176 { 1177 struct vring_virtqueue *vq = to_vvq(_vq); 1178 1179 BUG_ON(!vq->we_own_ring); 1180 1181 return vq->queue_dma_addr + 1182 ((char *)vq->vring.avail - (char *)vq->vring.desc); 1183 } 1184 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr); 1185 1186 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) 1187 { 1188 struct vring_virtqueue *vq = to_vvq(_vq); 1189 1190 BUG_ON(!vq->we_own_ring); 1191 1192 return vq->queue_dma_addr + 1193 ((char *)vq->vring.used - (char *)vq->vring.desc); 1194 } 1195 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr); 1196 1197 const struct vring *virtqueue_get_vring(struct virtqueue *vq) 1198 { 1199 return &to_vvq(vq)->vring; 1200 } 1201 EXPORT_SYMBOL_GPL(virtqueue_get_vring); 1202 1203 MODULE_LICENSE("GPL"); 1204