1 /* Virtio ring implementation. 2 * 3 * Copyright 2007 Rusty Russell IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include <linux/virtio.h> 20 #include <linux/virtio_ring.h> 21 #include <linux/virtio_config.h> 22 #include <linux/device.h> 23 #include <linux/slab.h> 24 #include <linux/module.h> 25 #include <linux/hrtimer.h> 26 #include <linux/dma-mapping.h> 27 #include <xen/xen.h> 28 29 #ifdef DEBUG 30 /* For development, we want to crash whenever the ring is screwed. */ 31 #define BAD_RING(_vq, fmt, args...) \ 32 do { \ 33 dev_err(&(_vq)->vq.vdev->dev, \ 34 "%s:"fmt, (_vq)->vq.name, ##args); \ 35 BUG(); \ 36 } while (0) 37 /* Caller is supposed to guarantee no reentry. */ 38 #define START_USE(_vq) \ 39 do { \ 40 if ((_vq)->in_use) \ 41 panic("%s:in_use = %i\n", \ 42 (_vq)->vq.name, (_vq)->in_use); \ 43 (_vq)->in_use = __LINE__; \ 44 } while (0) 45 #define END_USE(_vq) \ 46 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 47 #define LAST_ADD_TIME_UPDATE(_vq) \ 48 do { \ 49 ktime_t now = ktime_get(); \ 50 \ 51 /* No kick or get, with .1 second between? Warn. */ \ 52 if ((_vq)->last_add_time_valid) \ 53 WARN_ON(ktime_to_ms(ktime_sub(now, \ 54 (_vq)->last_add_time)) > 100); \ 55 (_vq)->last_add_time = now; \ 56 (_vq)->last_add_time_valid = true; \ 57 } while (0) 58 #define LAST_ADD_TIME_CHECK(_vq) \ 59 do { \ 60 if ((_vq)->last_add_time_valid) { \ 61 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \ 62 (_vq)->last_add_time)) > 100); \ 63 } \ 64 } while (0) 65 #define LAST_ADD_TIME_INVALID(_vq) \ 66 ((_vq)->last_add_time_valid = false) 67 #else 68 #define BAD_RING(_vq, fmt, args...) \ 69 do { \ 70 dev_err(&_vq->vq.vdev->dev, \ 71 "%s:"fmt, (_vq)->vq.name, ##args); \ 72 (_vq)->broken = true; \ 73 } while (0) 74 #define START_USE(vq) 75 #define END_USE(vq) 76 #define LAST_ADD_TIME_UPDATE(vq) 77 #define LAST_ADD_TIME_CHECK(vq) 78 #define LAST_ADD_TIME_INVALID(vq) 79 #endif 80 81 struct vring_desc_state { 82 void *data; /* Data for callback. */ 83 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ 84 }; 85 86 struct vring_virtqueue { 87 struct virtqueue vq; 88 89 /* Can we use weak barriers? */ 90 bool weak_barriers; 91 92 /* Other side has made a mess, don't try any more. */ 93 bool broken; 94 95 /* Host supports indirect buffers */ 96 bool indirect; 97 98 /* Host publishes avail event idx */ 99 bool event; 100 101 /* Head of free buffer list. */ 102 unsigned int free_head; 103 /* Number we've added since last sync. */ 104 unsigned int num_added; 105 106 /* Last used index we've seen. */ 107 u16 last_used_idx; 108 109 struct { 110 /* Actual memory layout for this queue */ 111 struct vring vring; 112 113 /* Last written value to avail->flags */ 114 u16 avail_flags_shadow; 115 116 /* Last written value to avail->idx in guest byte order */ 117 u16 avail_idx_shadow; 118 } split; 119 120 /* How to notify other side. FIXME: commonalize hcalls! */ 121 bool (*notify)(struct virtqueue *vq); 122 123 /* DMA, allocation, and size information */ 124 bool we_own_ring; 125 size_t queue_size_in_bytes; 126 dma_addr_t queue_dma_addr; 127 128 #ifdef DEBUG 129 /* They're supposed to lock for us. */ 130 unsigned int in_use; 131 132 /* Figure out if their kicks are too delayed. */ 133 bool last_add_time_valid; 134 ktime_t last_add_time; 135 #endif 136 137 /* Per-descriptor state. */ 138 struct vring_desc_state desc_state[]; 139 }; 140 141 142 /* 143 * Helpers. 144 */ 145 146 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 147 148 static inline bool virtqueue_use_indirect(struct virtqueue *_vq, 149 unsigned int total_sg) 150 { 151 struct vring_virtqueue *vq = to_vvq(_vq); 152 153 /* 154 * If the host supports indirect descriptor tables, and we have multiple 155 * buffers, then go indirect. FIXME: tune this threshold 156 */ 157 return (vq->indirect && total_sg > 1 && vq->vq.num_free); 158 } 159 160 /* 161 * Modern virtio devices have feature bits to specify whether they need a 162 * quirk and bypass the IOMMU. If not there, just use the DMA API. 163 * 164 * If there, the interaction between virtio and DMA API is messy. 165 * 166 * On most systems with virtio, physical addresses match bus addresses, 167 * and it doesn't particularly matter whether we use the DMA API. 168 * 169 * On some systems, including Xen and any system with a physical device 170 * that speaks virtio behind a physical IOMMU, we must use the DMA API 171 * for virtio DMA to work at all. 172 * 173 * On other systems, including SPARC and PPC64, virtio-pci devices are 174 * enumerated as though they are behind an IOMMU, but the virtio host 175 * ignores the IOMMU, so we must either pretend that the IOMMU isn't 176 * there or somehow map everything as the identity. 177 * 178 * For the time being, we preserve historic behavior and bypass the DMA 179 * API. 180 * 181 * TODO: install a per-device DMA ops structure that does the right thing 182 * taking into account all the above quirks, and use the DMA API 183 * unconditionally on data path. 184 */ 185 186 static bool vring_use_dma_api(struct virtio_device *vdev) 187 { 188 if (!virtio_has_iommu_quirk(vdev)) 189 return true; 190 191 /* Otherwise, we are left to guess. */ 192 /* 193 * In theory, it's possible to have a buggy QEMU-supposed 194 * emulated Q35 IOMMU and Xen enabled at the same time. On 195 * such a configuration, virtio has never worked and will 196 * not work without an even larger kludge. Instead, enable 197 * the DMA API if we're a Xen guest, which at least allows 198 * all of the sensible Xen configurations to work correctly. 199 */ 200 if (xen_domain()) 201 return true; 202 203 return false; 204 } 205 206 /* 207 * The DMA ops on various arches are rather gnarly right now, and 208 * making all of the arch DMA ops work on the vring device itself 209 * is a mess. For now, we use the parent device for DMA ops. 210 */ 211 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq) 212 { 213 return vq->vq.vdev->dev.parent; 214 } 215 216 /* Map one sg entry. */ 217 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, 218 struct scatterlist *sg, 219 enum dma_data_direction direction) 220 { 221 if (!vring_use_dma_api(vq->vq.vdev)) 222 return (dma_addr_t)sg_phys(sg); 223 224 /* 225 * We can't use dma_map_sg, because we don't use scatterlists in 226 * the way it expects (we don't guarantee that the scatterlist 227 * will exist for the lifetime of the mapping). 228 */ 229 return dma_map_page(vring_dma_dev(vq), 230 sg_page(sg), sg->offset, sg->length, 231 direction); 232 } 233 234 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, 235 void *cpu_addr, size_t size, 236 enum dma_data_direction direction) 237 { 238 if (!vring_use_dma_api(vq->vq.vdev)) 239 return (dma_addr_t)virt_to_phys(cpu_addr); 240 241 return dma_map_single(vring_dma_dev(vq), 242 cpu_addr, size, direction); 243 } 244 245 static int vring_mapping_error(const struct vring_virtqueue *vq, 246 dma_addr_t addr) 247 { 248 if (!vring_use_dma_api(vq->vq.vdev)) 249 return 0; 250 251 return dma_mapping_error(vring_dma_dev(vq), addr); 252 } 253 254 255 /* 256 * Split ring specific functions - *_split(). 257 */ 258 259 static void vring_unmap_one_split(const struct vring_virtqueue *vq, 260 struct vring_desc *desc) 261 { 262 u16 flags; 263 264 if (!vring_use_dma_api(vq->vq.vdev)) 265 return; 266 267 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); 268 269 if (flags & VRING_DESC_F_INDIRECT) { 270 dma_unmap_single(vring_dma_dev(vq), 271 virtio64_to_cpu(vq->vq.vdev, desc->addr), 272 virtio32_to_cpu(vq->vq.vdev, desc->len), 273 (flags & VRING_DESC_F_WRITE) ? 274 DMA_FROM_DEVICE : DMA_TO_DEVICE); 275 } else { 276 dma_unmap_page(vring_dma_dev(vq), 277 virtio64_to_cpu(vq->vq.vdev, desc->addr), 278 virtio32_to_cpu(vq->vq.vdev, desc->len), 279 (flags & VRING_DESC_F_WRITE) ? 280 DMA_FROM_DEVICE : DMA_TO_DEVICE); 281 } 282 } 283 284 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, 285 unsigned int total_sg, 286 gfp_t gfp) 287 { 288 struct vring_desc *desc; 289 unsigned int i; 290 291 /* 292 * We require lowmem mappings for the descriptors because 293 * otherwise virt_to_phys will give us bogus addresses in the 294 * virtqueue. 295 */ 296 gfp &= ~__GFP_HIGHMEM; 297 298 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp); 299 if (!desc) 300 return NULL; 301 302 for (i = 0; i < total_sg; i++) 303 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); 304 return desc; 305 } 306 307 static inline int virtqueue_add_split(struct virtqueue *_vq, 308 struct scatterlist *sgs[], 309 unsigned int total_sg, 310 unsigned int out_sgs, 311 unsigned int in_sgs, 312 void *data, 313 void *ctx, 314 gfp_t gfp) 315 { 316 struct vring_virtqueue *vq = to_vvq(_vq); 317 struct scatterlist *sg; 318 struct vring_desc *desc; 319 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx; 320 int head; 321 bool indirect; 322 323 START_USE(vq); 324 325 BUG_ON(data == NULL); 326 BUG_ON(ctx && vq->indirect); 327 328 if (unlikely(vq->broken)) { 329 END_USE(vq); 330 return -EIO; 331 } 332 333 LAST_ADD_TIME_UPDATE(vq); 334 335 BUG_ON(total_sg == 0); 336 337 head = vq->free_head; 338 339 if (virtqueue_use_indirect(_vq, total_sg)) 340 desc = alloc_indirect_split(_vq, total_sg, gfp); 341 else { 342 desc = NULL; 343 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); 344 } 345 346 if (desc) { 347 /* Use a single buffer which doesn't continue */ 348 indirect = true; 349 /* Set up rest to use this indirect table. */ 350 i = 0; 351 descs_used = 1; 352 } else { 353 indirect = false; 354 desc = vq->split.vring.desc; 355 i = head; 356 descs_used = total_sg; 357 } 358 359 if (vq->vq.num_free < descs_used) { 360 pr_debug("Can't add buf len %i - avail = %i\n", 361 descs_used, vq->vq.num_free); 362 /* FIXME: for historical reasons, we force a notify here if 363 * there are outgoing parts to the buffer. Presumably the 364 * host should service the ring ASAP. */ 365 if (out_sgs) 366 vq->notify(&vq->vq); 367 if (indirect) 368 kfree(desc); 369 END_USE(vq); 370 return -ENOSPC; 371 } 372 373 for (n = 0; n < out_sgs; n++) { 374 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 375 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); 376 if (vring_mapping_error(vq, addr)) 377 goto unmap_release; 378 379 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); 380 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 381 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 382 prev = i; 383 i = virtio16_to_cpu(_vq->vdev, desc[i].next); 384 } 385 } 386 for (; n < (out_sgs + in_sgs); n++) { 387 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 388 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); 389 if (vring_mapping_error(vq, addr)) 390 goto unmap_release; 391 392 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); 393 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 394 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 395 prev = i; 396 i = virtio16_to_cpu(_vq->vdev, desc[i].next); 397 } 398 } 399 /* Last one doesn't continue. */ 400 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); 401 402 if (indirect) { 403 /* Now that the indirect table is filled in, map it. */ 404 dma_addr_t addr = vring_map_single( 405 vq, desc, total_sg * sizeof(struct vring_desc), 406 DMA_TO_DEVICE); 407 if (vring_mapping_error(vq, addr)) 408 goto unmap_release; 409 410 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, 411 VRING_DESC_F_INDIRECT); 412 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, 413 addr); 414 415 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev, 416 total_sg * sizeof(struct vring_desc)); 417 } 418 419 /* We're using some buffers from the free list. */ 420 vq->vq.num_free -= descs_used; 421 422 /* Update free pointer */ 423 if (indirect) 424 vq->free_head = virtio16_to_cpu(_vq->vdev, 425 vq->split.vring.desc[head].next); 426 else 427 vq->free_head = i; 428 429 /* Store token and indirect buffer state. */ 430 vq->desc_state[head].data = data; 431 if (indirect) 432 vq->desc_state[head].indir_desc = desc; 433 else 434 vq->desc_state[head].indir_desc = ctx; 435 436 /* Put entry in available array (but don't update avail->idx until they 437 * do sync). */ 438 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); 439 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 440 441 /* Descriptors and available array need to be set before we expose the 442 * new available array entries. */ 443 virtio_wmb(vq->weak_barriers); 444 vq->split.avail_idx_shadow++; 445 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, 446 vq->split.avail_idx_shadow); 447 vq->num_added++; 448 449 pr_debug("Added buffer head %i to %p\n", head, vq); 450 END_USE(vq); 451 452 /* This is very unlikely, but theoretically possible. Kick 453 * just in case. */ 454 if (unlikely(vq->num_added == (1 << 16) - 1)) 455 virtqueue_kick(_vq); 456 457 return 0; 458 459 unmap_release: 460 err_idx = i; 461 i = head; 462 463 for (n = 0; n < total_sg; n++) { 464 if (i == err_idx) 465 break; 466 vring_unmap_one_split(vq, &desc[i]); 467 i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next); 468 } 469 470 if (indirect) 471 kfree(desc); 472 473 END_USE(vq); 474 return -EIO; 475 } 476 477 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) 478 { 479 struct vring_virtqueue *vq = to_vvq(_vq); 480 u16 new, old; 481 bool needs_kick; 482 483 START_USE(vq); 484 /* We need to expose available array entries before checking avail 485 * event. */ 486 virtio_mb(vq->weak_barriers); 487 488 old = vq->split.avail_idx_shadow - vq->num_added; 489 new = vq->split.avail_idx_shadow; 490 vq->num_added = 0; 491 492 LAST_ADD_TIME_CHECK(vq); 493 LAST_ADD_TIME_INVALID(vq); 494 495 if (vq->event) { 496 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, 497 vring_avail_event(&vq->split.vring)), 498 new, old); 499 } else { 500 needs_kick = !(vq->split.vring.used->flags & 501 cpu_to_virtio16(_vq->vdev, 502 VRING_USED_F_NO_NOTIFY)); 503 } 504 END_USE(vq); 505 return needs_kick; 506 } 507 508 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, 509 void **ctx) 510 { 511 unsigned int i, j; 512 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); 513 514 /* Clear data ptr. */ 515 vq->desc_state[head].data = NULL; 516 517 /* Put back on free list: unmap first-level descriptors and find end */ 518 i = head; 519 520 while (vq->split.vring.desc[i].flags & nextflag) { 521 vring_unmap_one_split(vq, &vq->split.vring.desc[i]); 522 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next); 523 vq->vq.num_free++; 524 } 525 526 vring_unmap_one_split(vq, &vq->split.vring.desc[i]); 527 vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, 528 vq->free_head); 529 vq->free_head = head; 530 531 /* Plus final descriptor */ 532 vq->vq.num_free++; 533 534 if (vq->indirect) { 535 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc; 536 u32 len; 537 538 /* Free the indirect table, if any, now that it's unmapped. */ 539 if (!indir_desc) 540 return; 541 542 len = virtio32_to_cpu(vq->vq.vdev, 543 vq->split.vring.desc[head].len); 544 545 BUG_ON(!(vq->split.vring.desc[head].flags & 546 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))); 547 BUG_ON(len == 0 || len % sizeof(struct vring_desc)); 548 549 for (j = 0; j < len / sizeof(struct vring_desc); j++) 550 vring_unmap_one_split(vq, &indir_desc[j]); 551 552 kfree(indir_desc); 553 vq->desc_state[head].indir_desc = NULL; 554 } else if (ctx) { 555 *ctx = vq->desc_state[head].indir_desc; 556 } 557 } 558 559 static inline bool more_used_split(const struct vring_virtqueue *vq) 560 { 561 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, 562 vq->split.vring.used->idx); 563 } 564 565 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, 566 unsigned int *len, 567 void **ctx) 568 { 569 struct vring_virtqueue *vq = to_vvq(_vq); 570 void *ret; 571 unsigned int i; 572 u16 last_used; 573 574 START_USE(vq); 575 576 if (unlikely(vq->broken)) { 577 END_USE(vq); 578 return NULL; 579 } 580 581 if (!more_used_split(vq)) { 582 pr_debug("No more buffers in queue\n"); 583 END_USE(vq); 584 return NULL; 585 } 586 587 /* Only get used array entries after they have been exposed by host. */ 588 virtio_rmb(vq->weak_barriers); 589 590 last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); 591 i = virtio32_to_cpu(_vq->vdev, 592 vq->split.vring.used->ring[last_used].id); 593 *len = virtio32_to_cpu(_vq->vdev, 594 vq->split.vring.used->ring[last_used].len); 595 596 if (unlikely(i >= vq->split.vring.num)) { 597 BAD_RING(vq, "id %u out of range\n", i); 598 return NULL; 599 } 600 if (unlikely(!vq->desc_state[i].data)) { 601 BAD_RING(vq, "id %u is not a head!\n", i); 602 return NULL; 603 } 604 605 /* detach_buf_split clears data, so grab it now. */ 606 ret = vq->desc_state[i].data; 607 detach_buf_split(vq, i, ctx); 608 vq->last_used_idx++; 609 /* If we expect an interrupt for the next entry, tell host 610 * by writing event index and flush out the write before 611 * the read in the next get_buf call. */ 612 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) 613 virtio_store_mb(vq->weak_barriers, 614 &vring_used_event(&vq->split.vring), 615 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); 616 617 LAST_ADD_TIME_INVALID(vq); 618 619 END_USE(vq); 620 return ret; 621 } 622 623 static void virtqueue_disable_cb_split(struct virtqueue *_vq) 624 { 625 struct vring_virtqueue *vq = to_vvq(_vq); 626 627 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { 628 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 629 if (!vq->event) 630 vq->split.vring.avail->flags = 631 cpu_to_virtio16(_vq->vdev, 632 vq->split.avail_flags_shadow); 633 } 634 } 635 636 static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq) 637 { 638 struct vring_virtqueue *vq = to_vvq(_vq); 639 u16 last_used_idx; 640 641 START_USE(vq); 642 643 /* We optimistically turn back on interrupts, then check if there was 644 * more to do. */ 645 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 646 * either clear the flags bit or point the event index at the next 647 * entry. Always do both to keep code simple. */ 648 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 649 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 650 if (!vq->event) 651 vq->split.vring.avail->flags = 652 cpu_to_virtio16(_vq->vdev, 653 vq->split.avail_flags_shadow); 654 } 655 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, 656 last_used_idx = vq->last_used_idx); 657 END_USE(vq); 658 return last_used_idx; 659 } 660 661 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx) 662 { 663 struct vring_virtqueue *vq = to_vvq(_vq); 664 665 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, 666 vq->split.vring.used->idx); 667 } 668 669 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq) 670 { 671 struct vring_virtqueue *vq = to_vvq(_vq); 672 u16 bufs; 673 674 START_USE(vq); 675 676 /* We optimistically turn back on interrupts, then check if there was 677 * more to do. */ 678 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 679 * either clear the flags bit or point the event index at the next 680 * entry. Always update the event index to keep code simple. */ 681 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 682 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 683 if (!vq->event) 684 vq->split.vring.avail->flags = 685 cpu_to_virtio16(_vq->vdev, 686 vq->split.avail_flags_shadow); 687 } 688 /* TODO: tune this threshold */ 689 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; 690 691 virtio_store_mb(vq->weak_barriers, 692 &vring_used_event(&vq->split.vring), 693 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); 694 695 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) 696 - vq->last_used_idx) > bufs)) { 697 END_USE(vq); 698 return false; 699 } 700 701 END_USE(vq); 702 return true; 703 } 704 705 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq) 706 { 707 struct vring_virtqueue *vq = to_vvq(_vq); 708 unsigned int i; 709 void *buf; 710 711 START_USE(vq); 712 713 for (i = 0; i < vq->split.vring.num; i++) { 714 if (!vq->desc_state[i].data) 715 continue; 716 /* detach_buf_split clears data, so grab it now. */ 717 buf = vq->desc_state[i].data; 718 detach_buf_split(vq, i, NULL); 719 vq->split.avail_idx_shadow--; 720 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, 721 vq->split.avail_idx_shadow); 722 END_USE(vq); 723 return buf; 724 } 725 /* That should have freed everything. */ 726 BUG_ON(vq->vq.num_free != vq->split.vring.num); 727 728 END_USE(vq); 729 return NULL; 730 } 731 732 733 /* 734 * Generic functions and exported symbols. 735 */ 736 737 static inline int virtqueue_add(struct virtqueue *_vq, 738 struct scatterlist *sgs[], 739 unsigned int total_sg, 740 unsigned int out_sgs, 741 unsigned int in_sgs, 742 void *data, 743 void *ctx, 744 gfp_t gfp) 745 { 746 return virtqueue_add_split(_vq, sgs, total_sg, 747 out_sgs, in_sgs, data, ctx, gfp); 748 } 749 750 /** 751 * virtqueue_add_sgs - expose buffers to other end 752 * @vq: the struct virtqueue we're talking about. 753 * @sgs: array of terminated scatterlists. 754 * @out_num: the number of scatterlists readable by other side 755 * @in_num: the number of scatterlists which are writable (after readable ones) 756 * @data: the token identifying the buffer. 757 * @gfp: how to do memory allocations (if necessary). 758 * 759 * Caller must ensure we don't call this with other virtqueue operations 760 * at the same time (except where noted). 761 * 762 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 763 */ 764 int virtqueue_add_sgs(struct virtqueue *_vq, 765 struct scatterlist *sgs[], 766 unsigned int out_sgs, 767 unsigned int in_sgs, 768 void *data, 769 gfp_t gfp) 770 { 771 unsigned int i, total_sg = 0; 772 773 /* Count them first. */ 774 for (i = 0; i < out_sgs + in_sgs; i++) { 775 struct scatterlist *sg; 776 777 for (sg = sgs[i]; sg; sg = sg_next(sg)) 778 total_sg++; 779 } 780 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, 781 data, NULL, gfp); 782 } 783 EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 784 785 /** 786 * virtqueue_add_outbuf - expose output buffers to other end 787 * @vq: the struct virtqueue we're talking about. 788 * @sg: scatterlist (must be well-formed and terminated!) 789 * @num: the number of entries in @sg readable by other side 790 * @data: the token identifying the buffer. 791 * @gfp: how to do memory allocations (if necessary). 792 * 793 * Caller must ensure we don't call this with other virtqueue operations 794 * at the same time (except where noted). 795 * 796 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 797 */ 798 int virtqueue_add_outbuf(struct virtqueue *vq, 799 struct scatterlist *sg, unsigned int num, 800 void *data, 801 gfp_t gfp) 802 { 803 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); 804 } 805 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 806 807 /** 808 * virtqueue_add_inbuf - expose input buffers to other end 809 * @vq: the struct virtqueue we're talking about. 810 * @sg: scatterlist (must be well-formed and terminated!) 811 * @num: the number of entries in @sg writable by other side 812 * @data: the token identifying the buffer. 813 * @gfp: how to do memory allocations (if necessary). 814 * 815 * Caller must ensure we don't call this with other virtqueue operations 816 * at the same time (except where noted). 817 * 818 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 819 */ 820 int virtqueue_add_inbuf(struct virtqueue *vq, 821 struct scatterlist *sg, unsigned int num, 822 void *data, 823 gfp_t gfp) 824 { 825 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); 826 } 827 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 828 829 /** 830 * virtqueue_add_inbuf_ctx - expose input buffers to other end 831 * @vq: the struct virtqueue we're talking about. 832 * @sg: scatterlist (must be well-formed and terminated!) 833 * @num: the number of entries in @sg writable by other side 834 * @data: the token identifying the buffer. 835 * @ctx: extra context for the token 836 * @gfp: how to do memory allocations (if necessary). 837 * 838 * Caller must ensure we don't call this with other virtqueue operations 839 * at the same time (except where noted). 840 * 841 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 842 */ 843 int virtqueue_add_inbuf_ctx(struct virtqueue *vq, 844 struct scatterlist *sg, unsigned int num, 845 void *data, 846 void *ctx, 847 gfp_t gfp) 848 { 849 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); 850 } 851 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); 852 853 /** 854 * virtqueue_kick_prepare - first half of split virtqueue_kick call. 855 * @vq: the struct virtqueue 856 * 857 * Instead of virtqueue_kick(), you can do: 858 * if (virtqueue_kick_prepare(vq)) 859 * virtqueue_notify(vq); 860 * 861 * This is sometimes useful because the virtqueue_kick_prepare() needs 862 * to be serialized, but the actual virtqueue_notify() call does not. 863 */ 864 bool virtqueue_kick_prepare(struct virtqueue *_vq) 865 { 866 return virtqueue_kick_prepare_split(_vq); 867 } 868 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 869 870 /** 871 * virtqueue_notify - second half of split virtqueue_kick call. 872 * @vq: the struct virtqueue 873 * 874 * This does not need to be serialized. 875 * 876 * Returns false if host notify failed or queue is broken, otherwise true. 877 */ 878 bool virtqueue_notify(struct virtqueue *_vq) 879 { 880 struct vring_virtqueue *vq = to_vvq(_vq); 881 882 if (unlikely(vq->broken)) 883 return false; 884 885 /* Prod other side to tell it about changes. */ 886 if (!vq->notify(_vq)) { 887 vq->broken = true; 888 return false; 889 } 890 return true; 891 } 892 EXPORT_SYMBOL_GPL(virtqueue_notify); 893 894 /** 895 * virtqueue_kick - update after add_buf 896 * @vq: the struct virtqueue 897 * 898 * After one or more virtqueue_add_* calls, invoke this to kick 899 * the other side. 900 * 901 * Caller must ensure we don't call this with other virtqueue 902 * operations at the same time (except where noted). 903 * 904 * Returns false if kick failed, otherwise true. 905 */ 906 bool virtqueue_kick(struct virtqueue *vq) 907 { 908 if (virtqueue_kick_prepare(vq)) 909 return virtqueue_notify(vq); 910 return true; 911 } 912 EXPORT_SYMBOL_GPL(virtqueue_kick); 913 914 /** 915 * virtqueue_get_buf - get the next used buffer 916 * @vq: the struct virtqueue we're talking about. 917 * @len: the length written into the buffer 918 * 919 * If the device wrote data into the buffer, @len will be set to the 920 * amount written. This means you don't need to clear the buffer 921 * beforehand to ensure there's no data leakage in the case of short 922 * writes. 923 * 924 * Caller must ensure we don't call this with other virtqueue 925 * operations at the same time (except where noted). 926 * 927 * Returns NULL if there are no used buffers, or the "data" token 928 * handed to virtqueue_add_*(). 929 */ 930 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, 931 void **ctx) 932 { 933 return virtqueue_get_buf_ctx_split(_vq, len, ctx); 934 } 935 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx); 936 937 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 938 { 939 return virtqueue_get_buf_ctx(_vq, len, NULL); 940 } 941 EXPORT_SYMBOL_GPL(virtqueue_get_buf); 942 943 /** 944 * virtqueue_disable_cb - disable callbacks 945 * @vq: the struct virtqueue we're talking about. 946 * 947 * Note that this is not necessarily synchronous, hence unreliable and only 948 * useful as an optimization. 949 * 950 * Unlike other operations, this need not be serialized. 951 */ 952 void virtqueue_disable_cb(struct virtqueue *_vq) 953 { 954 virtqueue_disable_cb_split(_vq); 955 } 956 EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 957 958 /** 959 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb 960 * @vq: the struct virtqueue we're talking about. 961 * 962 * This re-enables callbacks; it returns current queue state 963 * in an opaque unsigned value. This value should be later tested by 964 * virtqueue_poll, to detect a possible race between the driver checking for 965 * more work, and enabling callbacks. 966 * 967 * Caller must ensure we don't call this with other virtqueue 968 * operations at the same time (except where noted). 969 */ 970 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) 971 { 972 return virtqueue_enable_cb_prepare_split(_vq); 973 } 974 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); 975 976 /** 977 * virtqueue_poll - query pending used buffers 978 * @vq: the struct virtqueue we're talking about. 979 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). 980 * 981 * Returns "true" if there are pending used buffers in the queue. 982 * 983 * This does not need to be serialized. 984 */ 985 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) 986 { 987 struct vring_virtqueue *vq = to_vvq(_vq); 988 989 virtio_mb(vq->weak_barriers); 990 return virtqueue_poll_split(_vq, last_used_idx); 991 } 992 EXPORT_SYMBOL_GPL(virtqueue_poll); 993 994 /** 995 * virtqueue_enable_cb - restart callbacks after disable_cb. 996 * @vq: the struct virtqueue we're talking about. 997 * 998 * This re-enables callbacks; it returns "false" if there are pending 999 * buffers in the queue, to detect a possible race between the driver 1000 * checking for more work, and enabling callbacks. 1001 * 1002 * Caller must ensure we don't call this with other virtqueue 1003 * operations at the same time (except where noted). 1004 */ 1005 bool virtqueue_enable_cb(struct virtqueue *_vq) 1006 { 1007 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); 1008 1009 return !virtqueue_poll(_vq, last_used_idx); 1010 } 1011 EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 1012 1013 /** 1014 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 1015 * @vq: the struct virtqueue we're talking about. 1016 * 1017 * This re-enables callbacks but hints to the other side to delay 1018 * interrupts until most of the available buffers have been processed; 1019 * it returns "false" if there are many pending buffers in the queue, 1020 * to detect a possible race between the driver checking for more work, 1021 * and enabling callbacks. 1022 * 1023 * Caller must ensure we don't call this with other virtqueue 1024 * operations at the same time (except where noted). 1025 */ 1026 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 1027 { 1028 return virtqueue_enable_cb_delayed_split(_vq); 1029 } 1030 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 1031 1032 /** 1033 * virtqueue_detach_unused_buf - detach first unused buffer 1034 * @vq: the struct virtqueue we're talking about. 1035 * 1036 * Returns NULL or the "data" token handed to virtqueue_add_*(). 1037 * This is not valid on an active queue; it is useful only for device 1038 * shutdown. 1039 */ 1040 void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 1041 { 1042 return virtqueue_detach_unused_buf_split(_vq); 1043 } 1044 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 1045 1046 static inline bool more_used(const struct vring_virtqueue *vq) 1047 { 1048 return more_used_split(vq); 1049 } 1050 1051 irqreturn_t vring_interrupt(int irq, void *_vq) 1052 { 1053 struct vring_virtqueue *vq = to_vvq(_vq); 1054 1055 if (!more_used(vq)) { 1056 pr_debug("virtqueue interrupt with no work for %p\n", vq); 1057 return IRQ_NONE; 1058 } 1059 1060 if (unlikely(vq->broken)) 1061 return IRQ_HANDLED; 1062 1063 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 1064 if (vq->vq.callback) 1065 vq->vq.callback(&vq->vq); 1066 1067 return IRQ_HANDLED; 1068 } 1069 EXPORT_SYMBOL_GPL(vring_interrupt); 1070 1071 struct virtqueue *__vring_new_virtqueue(unsigned int index, 1072 struct vring vring, 1073 struct virtio_device *vdev, 1074 bool weak_barriers, 1075 bool context, 1076 bool (*notify)(struct virtqueue *), 1077 void (*callback)(struct virtqueue *), 1078 const char *name) 1079 { 1080 unsigned int i; 1081 struct vring_virtqueue *vq; 1082 1083 vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state), 1084 GFP_KERNEL); 1085 if (!vq) 1086 return NULL; 1087 1088 vq->vq.callback = callback; 1089 vq->vq.vdev = vdev; 1090 vq->vq.name = name; 1091 vq->vq.num_free = vring.num; 1092 vq->vq.index = index; 1093 vq->we_own_ring = false; 1094 vq->queue_dma_addr = 0; 1095 vq->queue_size_in_bytes = 0; 1096 vq->notify = notify; 1097 vq->weak_barriers = weak_barriers; 1098 vq->broken = false; 1099 vq->last_used_idx = 0; 1100 vq->num_added = 0; 1101 list_add_tail(&vq->vq.list, &vdev->vqs); 1102 #ifdef DEBUG 1103 vq->in_use = false; 1104 vq->last_add_time_valid = false; 1105 #endif 1106 1107 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && 1108 !context; 1109 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 1110 1111 vq->split.vring = vring; 1112 vq->split.avail_flags_shadow = 0; 1113 vq->split.avail_idx_shadow = 0; 1114 1115 /* No callback? Tell other side not to bother us. */ 1116 if (!callback) { 1117 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 1118 if (!vq->event) 1119 vq->split.vring.avail->flags = cpu_to_virtio16(vdev, 1120 vq->split.avail_flags_shadow); 1121 } 1122 1123 /* Put everything in free lists. */ 1124 vq->free_head = 0; 1125 for (i = 0; i < vring.num-1; i++) 1126 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); 1127 memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state)); 1128 1129 return &vq->vq; 1130 } 1131 EXPORT_SYMBOL_GPL(__vring_new_virtqueue); 1132 1133 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size, 1134 dma_addr_t *dma_handle, gfp_t flag) 1135 { 1136 if (vring_use_dma_api(vdev)) { 1137 return dma_alloc_coherent(vdev->dev.parent, size, 1138 dma_handle, flag); 1139 } else { 1140 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); 1141 if (queue) { 1142 phys_addr_t phys_addr = virt_to_phys(queue); 1143 *dma_handle = (dma_addr_t)phys_addr; 1144 1145 /* 1146 * Sanity check: make sure we dind't truncate 1147 * the address. The only arches I can find that 1148 * have 64-bit phys_addr_t but 32-bit dma_addr_t 1149 * are certain non-highmem MIPS and x86 1150 * configurations, but these configurations 1151 * should never allocate physical pages above 32 1152 * bits, so this is fine. Just in case, throw a 1153 * warning and abort if we end up with an 1154 * unrepresentable address. 1155 */ 1156 if (WARN_ON_ONCE(*dma_handle != phys_addr)) { 1157 free_pages_exact(queue, PAGE_ALIGN(size)); 1158 return NULL; 1159 } 1160 } 1161 return queue; 1162 } 1163 } 1164 1165 static void vring_free_queue(struct virtio_device *vdev, size_t size, 1166 void *queue, dma_addr_t dma_handle) 1167 { 1168 if (vring_use_dma_api(vdev)) { 1169 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle); 1170 } else { 1171 free_pages_exact(queue, PAGE_ALIGN(size)); 1172 } 1173 } 1174 1175 struct virtqueue *vring_create_virtqueue( 1176 unsigned int index, 1177 unsigned int num, 1178 unsigned int vring_align, 1179 struct virtio_device *vdev, 1180 bool weak_barriers, 1181 bool may_reduce_num, 1182 bool context, 1183 bool (*notify)(struct virtqueue *), 1184 void (*callback)(struct virtqueue *), 1185 const char *name) 1186 { 1187 struct virtqueue *vq; 1188 void *queue = NULL; 1189 dma_addr_t dma_addr; 1190 size_t queue_size_in_bytes; 1191 struct vring vring; 1192 1193 /* We assume num is a power of 2. */ 1194 if (num & (num - 1)) { 1195 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 1196 return NULL; 1197 } 1198 1199 /* TODO: allocate each queue chunk individually */ 1200 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { 1201 queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 1202 &dma_addr, 1203 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); 1204 if (queue) 1205 break; 1206 } 1207 1208 if (!num) 1209 return NULL; 1210 1211 if (!queue) { 1212 /* Try to get a single page. You are my only hope! */ 1213 queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 1214 &dma_addr, GFP_KERNEL|__GFP_ZERO); 1215 } 1216 if (!queue) 1217 return NULL; 1218 1219 queue_size_in_bytes = vring_size(num, vring_align); 1220 vring_init(&vring, num, queue, vring_align); 1221 1222 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, 1223 notify, callback, name); 1224 if (!vq) { 1225 vring_free_queue(vdev, queue_size_in_bytes, queue, 1226 dma_addr); 1227 return NULL; 1228 } 1229 1230 to_vvq(vq)->queue_dma_addr = dma_addr; 1231 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes; 1232 to_vvq(vq)->we_own_ring = true; 1233 1234 return vq; 1235 } 1236 EXPORT_SYMBOL_GPL(vring_create_virtqueue); 1237 1238 struct virtqueue *vring_new_virtqueue(unsigned int index, 1239 unsigned int num, 1240 unsigned int vring_align, 1241 struct virtio_device *vdev, 1242 bool weak_barriers, 1243 bool context, 1244 void *pages, 1245 bool (*notify)(struct virtqueue *vq), 1246 void (*callback)(struct virtqueue *vq), 1247 const char *name) 1248 { 1249 struct vring vring; 1250 vring_init(&vring, num, pages, vring_align); 1251 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, 1252 notify, callback, name); 1253 } 1254 EXPORT_SYMBOL_GPL(vring_new_virtqueue); 1255 1256 void vring_del_virtqueue(struct virtqueue *_vq) 1257 { 1258 struct vring_virtqueue *vq = to_vvq(_vq); 1259 1260 if (vq->we_own_ring) { 1261 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes, 1262 vq->split.vring.desc, vq->queue_dma_addr); 1263 } 1264 list_del(&_vq->list); 1265 kfree(vq); 1266 } 1267 EXPORT_SYMBOL_GPL(vring_del_virtqueue); 1268 1269 /* Manipulates transport-specific feature bits. */ 1270 void vring_transport_features(struct virtio_device *vdev) 1271 { 1272 unsigned int i; 1273 1274 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 1275 switch (i) { 1276 case VIRTIO_RING_F_INDIRECT_DESC: 1277 break; 1278 case VIRTIO_RING_F_EVENT_IDX: 1279 break; 1280 case VIRTIO_F_VERSION_1: 1281 break; 1282 case VIRTIO_F_IOMMU_PLATFORM: 1283 break; 1284 default: 1285 /* We don't understand this bit. */ 1286 __virtio_clear_bit(vdev, i); 1287 } 1288 } 1289 } 1290 EXPORT_SYMBOL_GPL(vring_transport_features); 1291 1292 /** 1293 * virtqueue_get_vring_size - return the size of the virtqueue's vring 1294 * @vq: the struct virtqueue containing the vring of interest. 1295 * 1296 * Returns the size of the vring. This is mainly used for boasting to 1297 * userspace. Unlike other operations, this need not be serialized. 1298 */ 1299 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 1300 { 1301 1302 struct vring_virtqueue *vq = to_vvq(_vq); 1303 1304 return vq->split.vring.num; 1305 } 1306 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 1307 1308 bool virtqueue_is_broken(struct virtqueue *_vq) 1309 { 1310 struct vring_virtqueue *vq = to_vvq(_vq); 1311 1312 return vq->broken; 1313 } 1314 EXPORT_SYMBOL_GPL(virtqueue_is_broken); 1315 1316 /* 1317 * This should prevent the device from being used, allowing drivers to 1318 * recover. You may need to grab appropriate locks to flush. 1319 */ 1320 void virtio_break_device(struct virtio_device *dev) 1321 { 1322 struct virtqueue *_vq; 1323 1324 list_for_each_entry(_vq, &dev->vqs, list) { 1325 struct vring_virtqueue *vq = to_vvq(_vq); 1326 vq->broken = true; 1327 } 1328 } 1329 EXPORT_SYMBOL_GPL(virtio_break_device); 1330 1331 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) 1332 { 1333 struct vring_virtqueue *vq = to_vvq(_vq); 1334 1335 BUG_ON(!vq->we_own_ring); 1336 1337 return vq->queue_dma_addr; 1338 } 1339 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr); 1340 1341 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) 1342 { 1343 struct vring_virtqueue *vq = to_vvq(_vq); 1344 1345 BUG_ON(!vq->we_own_ring); 1346 1347 return vq->queue_dma_addr + 1348 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); 1349 } 1350 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr); 1351 1352 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) 1353 { 1354 struct vring_virtqueue *vq = to_vvq(_vq); 1355 1356 BUG_ON(!vq->we_own_ring); 1357 1358 return vq->queue_dma_addr + 1359 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); 1360 } 1361 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr); 1362 1363 const struct vring *virtqueue_get_vring(struct virtqueue *vq) 1364 { 1365 return &to_vvq(vq)->split.vring; 1366 } 1367 EXPORT_SYMBOL_GPL(virtqueue_get_vring); 1368 1369 MODULE_LICENSE("GPL"); 1370