1 /* Virtio ring implementation. 2 * 3 * Copyright 2007 Rusty Russell IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include <linux/virtio.h> 20 #include <linux/virtio_ring.h> 21 #include <linux/virtio_config.h> 22 #include <linux/device.h> 23 #include <linux/slab.h> 24 #include <linux/module.h> 25 #include <linux/hrtimer.h> 26 #include <linux/dma-mapping.h> 27 #include <xen/xen.h> 28 29 #ifdef DEBUG 30 /* For development, we want to crash whenever the ring is screwed. */ 31 #define BAD_RING(_vq, fmt, args...) \ 32 do { \ 33 dev_err(&(_vq)->vq.vdev->dev, \ 34 "%s:"fmt, (_vq)->vq.name, ##args); \ 35 BUG(); \ 36 } while (0) 37 /* Caller is supposed to guarantee no reentry. */ 38 #define START_USE(_vq) \ 39 do { \ 40 if ((_vq)->in_use) \ 41 panic("%s:in_use = %i\n", \ 42 (_vq)->vq.name, (_vq)->in_use); \ 43 (_vq)->in_use = __LINE__; \ 44 } while (0) 45 #define END_USE(_vq) \ 46 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 47 #define LAST_ADD_TIME_UPDATE(_vq) \ 48 do { \ 49 ktime_t now = ktime_get(); \ 50 \ 51 /* No kick or get, with .1 second between? Warn. */ \ 52 if ((_vq)->last_add_time_valid) \ 53 WARN_ON(ktime_to_ms(ktime_sub(now, \ 54 (_vq)->last_add_time)) > 100); \ 55 (_vq)->last_add_time = now; \ 56 (_vq)->last_add_time_valid = true; \ 57 } while (0) 58 #define LAST_ADD_TIME_CHECK(_vq) \ 59 do { \ 60 if ((_vq)->last_add_time_valid) { \ 61 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \ 62 (_vq)->last_add_time)) > 100); \ 63 } \ 64 } while (0) 65 #define LAST_ADD_TIME_INVALID(_vq) \ 66 ((_vq)->last_add_time_valid = false) 67 #else 68 #define BAD_RING(_vq, fmt, args...) \ 69 do { \ 70 dev_err(&_vq->vq.vdev->dev, \ 71 "%s:"fmt, (_vq)->vq.name, ##args); \ 72 (_vq)->broken = true; \ 73 } while (0) 74 #define START_USE(vq) 75 #define END_USE(vq) 76 #define LAST_ADD_TIME_UPDATE(vq) 77 #define LAST_ADD_TIME_CHECK(vq) 78 #define LAST_ADD_TIME_INVALID(vq) 79 #endif 80 81 struct vring_desc_state_split { 82 void *data; /* Data for callback. */ 83 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ 84 }; 85 86 struct vring_virtqueue { 87 struct virtqueue vq; 88 89 /* Can we use weak barriers? */ 90 bool weak_barriers; 91 92 /* Other side has made a mess, don't try any more. */ 93 bool broken; 94 95 /* Host supports indirect buffers */ 96 bool indirect; 97 98 /* Host publishes avail event idx */ 99 bool event; 100 101 /* Head of free buffer list. */ 102 unsigned int free_head; 103 /* Number we've added since last sync. */ 104 unsigned int num_added; 105 106 /* Last used index we've seen. */ 107 u16 last_used_idx; 108 109 struct { 110 /* Actual memory layout for this queue */ 111 struct vring vring; 112 113 /* Last written value to avail->flags */ 114 u16 avail_flags_shadow; 115 116 /* Last written value to avail->idx in guest byte order */ 117 u16 avail_idx_shadow; 118 119 /* Per-descriptor state. */ 120 struct vring_desc_state_split *desc_state; 121 } split; 122 123 /* How to notify other side. FIXME: commonalize hcalls! */ 124 bool (*notify)(struct virtqueue *vq); 125 126 /* DMA, allocation, and size information */ 127 bool we_own_ring; 128 size_t queue_size_in_bytes; 129 dma_addr_t queue_dma_addr; 130 131 #ifdef DEBUG 132 /* They're supposed to lock for us. */ 133 unsigned int in_use; 134 135 /* Figure out if their kicks are too delayed. */ 136 bool last_add_time_valid; 137 ktime_t last_add_time; 138 #endif 139 }; 140 141 142 /* 143 * Helpers. 144 */ 145 146 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 147 148 static inline bool virtqueue_use_indirect(struct virtqueue *_vq, 149 unsigned int total_sg) 150 { 151 struct vring_virtqueue *vq = to_vvq(_vq); 152 153 /* 154 * If the host supports indirect descriptor tables, and we have multiple 155 * buffers, then go indirect. FIXME: tune this threshold 156 */ 157 return (vq->indirect && total_sg > 1 && vq->vq.num_free); 158 } 159 160 /* 161 * Modern virtio devices have feature bits to specify whether they need a 162 * quirk and bypass the IOMMU. If not there, just use the DMA API. 163 * 164 * If there, the interaction between virtio and DMA API is messy. 165 * 166 * On most systems with virtio, physical addresses match bus addresses, 167 * and it doesn't particularly matter whether we use the DMA API. 168 * 169 * On some systems, including Xen and any system with a physical device 170 * that speaks virtio behind a physical IOMMU, we must use the DMA API 171 * for virtio DMA to work at all. 172 * 173 * On other systems, including SPARC and PPC64, virtio-pci devices are 174 * enumerated as though they are behind an IOMMU, but the virtio host 175 * ignores the IOMMU, so we must either pretend that the IOMMU isn't 176 * there or somehow map everything as the identity. 177 * 178 * For the time being, we preserve historic behavior and bypass the DMA 179 * API. 180 * 181 * TODO: install a per-device DMA ops structure that does the right thing 182 * taking into account all the above quirks, and use the DMA API 183 * unconditionally on data path. 184 */ 185 186 static bool vring_use_dma_api(struct virtio_device *vdev) 187 { 188 if (!virtio_has_iommu_quirk(vdev)) 189 return true; 190 191 /* Otherwise, we are left to guess. */ 192 /* 193 * In theory, it's possible to have a buggy QEMU-supposed 194 * emulated Q35 IOMMU and Xen enabled at the same time. On 195 * such a configuration, virtio has never worked and will 196 * not work without an even larger kludge. Instead, enable 197 * the DMA API if we're a Xen guest, which at least allows 198 * all of the sensible Xen configurations to work correctly. 199 */ 200 if (xen_domain()) 201 return true; 202 203 return false; 204 } 205 206 /* 207 * The DMA ops on various arches are rather gnarly right now, and 208 * making all of the arch DMA ops work on the vring device itself 209 * is a mess. For now, we use the parent device for DMA ops. 210 */ 211 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq) 212 { 213 return vq->vq.vdev->dev.parent; 214 } 215 216 /* Map one sg entry. */ 217 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, 218 struct scatterlist *sg, 219 enum dma_data_direction direction) 220 { 221 if (!vring_use_dma_api(vq->vq.vdev)) 222 return (dma_addr_t)sg_phys(sg); 223 224 /* 225 * We can't use dma_map_sg, because we don't use scatterlists in 226 * the way it expects (we don't guarantee that the scatterlist 227 * will exist for the lifetime of the mapping). 228 */ 229 return dma_map_page(vring_dma_dev(vq), 230 sg_page(sg), sg->offset, sg->length, 231 direction); 232 } 233 234 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, 235 void *cpu_addr, size_t size, 236 enum dma_data_direction direction) 237 { 238 if (!vring_use_dma_api(vq->vq.vdev)) 239 return (dma_addr_t)virt_to_phys(cpu_addr); 240 241 return dma_map_single(vring_dma_dev(vq), 242 cpu_addr, size, direction); 243 } 244 245 static int vring_mapping_error(const struct vring_virtqueue *vq, 246 dma_addr_t addr) 247 { 248 if (!vring_use_dma_api(vq->vq.vdev)) 249 return 0; 250 251 return dma_mapping_error(vring_dma_dev(vq), addr); 252 } 253 254 255 /* 256 * Split ring specific functions - *_split(). 257 */ 258 259 static void vring_unmap_one_split(const struct vring_virtqueue *vq, 260 struct vring_desc *desc) 261 { 262 u16 flags; 263 264 if (!vring_use_dma_api(vq->vq.vdev)) 265 return; 266 267 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); 268 269 if (flags & VRING_DESC_F_INDIRECT) { 270 dma_unmap_single(vring_dma_dev(vq), 271 virtio64_to_cpu(vq->vq.vdev, desc->addr), 272 virtio32_to_cpu(vq->vq.vdev, desc->len), 273 (flags & VRING_DESC_F_WRITE) ? 274 DMA_FROM_DEVICE : DMA_TO_DEVICE); 275 } else { 276 dma_unmap_page(vring_dma_dev(vq), 277 virtio64_to_cpu(vq->vq.vdev, desc->addr), 278 virtio32_to_cpu(vq->vq.vdev, desc->len), 279 (flags & VRING_DESC_F_WRITE) ? 280 DMA_FROM_DEVICE : DMA_TO_DEVICE); 281 } 282 } 283 284 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, 285 unsigned int total_sg, 286 gfp_t gfp) 287 { 288 struct vring_desc *desc; 289 unsigned int i; 290 291 /* 292 * We require lowmem mappings for the descriptors because 293 * otherwise virt_to_phys will give us bogus addresses in the 294 * virtqueue. 295 */ 296 gfp &= ~__GFP_HIGHMEM; 297 298 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp); 299 if (!desc) 300 return NULL; 301 302 for (i = 0; i < total_sg; i++) 303 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); 304 return desc; 305 } 306 307 static inline int virtqueue_add_split(struct virtqueue *_vq, 308 struct scatterlist *sgs[], 309 unsigned int total_sg, 310 unsigned int out_sgs, 311 unsigned int in_sgs, 312 void *data, 313 void *ctx, 314 gfp_t gfp) 315 { 316 struct vring_virtqueue *vq = to_vvq(_vq); 317 struct scatterlist *sg; 318 struct vring_desc *desc; 319 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx; 320 int head; 321 bool indirect; 322 323 START_USE(vq); 324 325 BUG_ON(data == NULL); 326 BUG_ON(ctx && vq->indirect); 327 328 if (unlikely(vq->broken)) { 329 END_USE(vq); 330 return -EIO; 331 } 332 333 LAST_ADD_TIME_UPDATE(vq); 334 335 BUG_ON(total_sg == 0); 336 337 head = vq->free_head; 338 339 if (virtqueue_use_indirect(_vq, total_sg)) 340 desc = alloc_indirect_split(_vq, total_sg, gfp); 341 else { 342 desc = NULL; 343 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); 344 } 345 346 if (desc) { 347 /* Use a single buffer which doesn't continue */ 348 indirect = true; 349 /* Set up rest to use this indirect table. */ 350 i = 0; 351 descs_used = 1; 352 } else { 353 indirect = false; 354 desc = vq->split.vring.desc; 355 i = head; 356 descs_used = total_sg; 357 } 358 359 if (vq->vq.num_free < descs_used) { 360 pr_debug("Can't add buf len %i - avail = %i\n", 361 descs_used, vq->vq.num_free); 362 /* FIXME: for historical reasons, we force a notify here if 363 * there are outgoing parts to the buffer. Presumably the 364 * host should service the ring ASAP. */ 365 if (out_sgs) 366 vq->notify(&vq->vq); 367 if (indirect) 368 kfree(desc); 369 END_USE(vq); 370 return -ENOSPC; 371 } 372 373 for (n = 0; n < out_sgs; n++) { 374 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 375 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); 376 if (vring_mapping_error(vq, addr)) 377 goto unmap_release; 378 379 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); 380 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 381 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 382 prev = i; 383 i = virtio16_to_cpu(_vq->vdev, desc[i].next); 384 } 385 } 386 for (; n < (out_sgs + in_sgs); n++) { 387 for (sg = sgs[n]; sg; sg = sg_next(sg)) { 388 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); 389 if (vring_mapping_error(vq, addr)) 390 goto unmap_release; 391 392 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); 393 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr); 394 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); 395 prev = i; 396 i = virtio16_to_cpu(_vq->vdev, desc[i].next); 397 } 398 } 399 /* Last one doesn't continue. */ 400 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); 401 402 if (indirect) { 403 /* Now that the indirect table is filled in, map it. */ 404 dma_addr_t addr = vring_map_single( 405 vq, desc, total_sg * sizeof(struct vring_desc), 406 DMA_TO_DEVICE); 407 if (vring_mapping_error(vq, addr)) 408 goto unmap_release; 409 410 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, 411 VRING_DESC_F_INDIRECT); 412 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, 413 addr); 414 415 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev, 416 total_sg * sizeof(struct vring_desc)); 417 } 418 419 /* We're using some buffers from the free list. */ 420 vq->vq.num_free -= descs_used; 421 422 /* Update free pointer */ 423 if (indirect) 424 vq->free_head = virtio16_to_cpu(_vq->vdev, 425 vq->split.vring.desc[head].next); 426 else 427 vq->free_head = i; 428 429 /* Store token and indirect buffer state. */ 430 vq->split.desc_state[head].data = data; 431 if (indirect) 432 vq->split.desc_state[head].indir_desc = desc; 433 else 434 vq->split.desc_state[head].indir_desc = ctx; 435 436 /* Put entry in available array (but don't update avail->idx until they 437 * do sync). */ 438 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); 439 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 440 441 /* Descriptors and available array need to be set before we expose the 442 * new available array entries. */ 443 virtio_wmb(vq->weak_barriers); 444 vq->split.avail_idx_shadow++; 445 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, 446 vq->split.avail_idx_shadow); 447 vq->num_added++; 448 449 pr_debug("Added buffer head %i to %p\n", head, vq); 450 END_USE(vq); 451 452 /* This is very unlikely, but theoretically possible. Kick 453 * just in case. */ 454 if (unlikely(vq->num_added == (1 << 16) - 1)) 455 virtqueue_kick(_vq); 456 457 return 0; 458 459 unmap_release: 460 err_idx = i; 461 i = head; 462 463 for (n = 0; n < total_sg; n++) { 464 if (i == err_idx) 465 break; 466 vring_unmap_one_split(vq, &desc[i]); 467 i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next); 468 } 469 470 if (indirect) 471 kfree(desc); 472 473 END_USE(vq); 474 return -EIO; 475 } 476 477 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) 478 { 479 struct vring_virtqueue *vq = to_vvq(_vq); 480 u16 new, old; 481 bool needs_kick; 482 483 START_USE(vq); 484 /* We need to expose available array entries before checking avail 485 * event. */ 486 virtio_mb(vq->weak_barriers); 487 488 old = vq->split.avail_idx_shadow - vq->num_added; 489 new = vq->split.avail_idx_shadow; 490 vq->num_added = 0; 491 492 LAST_ADD_TIME_CHECK(vq); 493 LAST_ADD_TIME_INVALID(vq); 494 495 if (vq->event) { 496 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, 497 vring_avail_event(&vq->split.vring)), 498 new, old); 499 } else { 500 needs_kick = !(vq->split.vring.used->flags & 501 cpu_to_virtio16(_vq->vdev, 502 VRING_USED_F_NO_NOTIFY)); 503 } 504 END_USE(vq); 505 return needs_kick; 506 } 507 508 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, 509 void **ctx) 510 { 511 unsigned int i, j; 512 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); 513 514 /* Clear data ptr. */ 515 vq->split.desc_state[head].data = NULL; 516 517 /* Put back on free list: unmap first-level descriptors and find end */ 518 i = head; 519 520 while (vq->split.vring.desc[i].flags & nextflag) { 521 vring_unmap_one_split(vq, &vq->split.vring.desc[i]); 522 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next); 523 vq->vq.num_free++; 524 } 525 526 vring_unmap_one_split(vq, &vq->split.vring.desc[i]); 527 vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, 528 vq->free_head); 529 vq->free_head = head; 530 531 /* Plus final descriptor */ 532 vq->vq.num_free++; 533 534 if (vq->indirect) { 535 struct vring_desc *indir_desc = 536 vq->split.desc_state[head].indir_desc; 537 u32 len; 538 539 /* Free the indirect table, if any, now that it's unmapped. */ 540 if (!indir_desc) 541 return; 542 543 len = virtio32_to_cpu(vq->vq.vdev, 544 vq->split.vring.desc[head].len); 545 546 BUG_ON(!(vq->split.vring.desc[head].flags & 547 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))); 548 BUG_ON(len == 0 || len % sizeof(struct vring_desc)); 549 550 for (j = 0; j < len / sizeof(struct vring_desc); j++) 551 vring_unmap_one_split(vq, &indir_desc[j]); 552 553 kfree(indir_desc); 554 vq->split.desc_state[head].indir_desc = NULL; 555 } else if (ctx) { 556 *ctx = vq->split.desc_state[head].indir_desc; 557 } 558 } 559 560 static inline bool more_used_split(const struct vring_virtqueue *vq) 561 { 562 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, 563 vq->split.vring.used->idx); 564 } 565 566 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, 567 unsigned int *len, 568 void **ctx) 569 { 570 struct vring_virtqueue *vq = to_vvq(_vq); 571 void *ret; 572 unsigned int i; 573 u16 last_used; 574 575 START_USE(vq); 576 577 if (unlikely(vq->broken)) { 578 END_USE(vq); 579 return NULL; 580 } 581 582 if (!more_used_split(vq)) { 583 pr_debug("No more buffers in queue\n"); 584 END_USE(vq); 585 return NULL; 586 } 587 588 /* Only get used array entries after they have been exposed by host. */ 589 virtio_rmb(vq->weak_barriers); 590 591 last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); 592 i = virtio32_to_cpu(_vq->vdev, 593 vq->split.vring.used->ring[last_used].id); 594 *len = virtio32_to_cpu(_vq->vdev, 595 vq->split.vring.used->ring[last_used].len); 596 597 if (unlikely(i >= vq->split.vring.num)) { 598 BAD_RING(vq, "id %u out of range\n", i); 599 return NULL; 600 } 601 if (unlikely(!vq->split.desc_state[i].data)) { 602 BAD_RING(vq, "id %u is not a head!\n", i); 603 return NULL; 604 } 605 606 /* detach_buf_split clears data, so grab it now. */ 607 ret = vq->split.desc_state[i].data; 608 detach_buf_split(vq, i, ctx); 609 vq->last_used_idx++; 610 /* If we expect an interrupt for the next entry, tell host 611 * by writing event index and flush out the write before 612 * the read in the next get_buf call. */ 613 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) 614 virtio_store_mb(vq->weak_barriers, 615 &vring_used_event(&vq->split.vring), 616 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); 617 618 LAST_ADD_TIME_INVALID(vq); 619 620 END_USE(vq); 621 return ret; 622 } 623 624 static void virtqueue_disable_cb_split(struct virtqueue *_vq) 625 { 626 struct vring_virtqueue *vq = to_vvq(_vq); 627 628 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { 629 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 630 if (!vq->event) 631 vq->split.vring.avail->flags = 632 cpu_to_virtio16(_vq->vdev, 633 vq->split.avail_flags_shadow); 634 } 635 } 636 637 static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq) 638 { 639 struct vring_virtqueue *vq = to_vvq(_vq); 640 u16 last_used_idx; 641 642 START_USE(vq); 643 644 /* We optimistically turn back on interrupts, then check if there was 645 * more to do. */ 646 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 647 * either clear the flags bit or point the event index at the next 648 * entry. Always do both to keep code simple. */ 649 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 650 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 651 if (!vq->event) 652 vq->split.vring.avail->flags = 653 cpu_to_virtio16(_vq->vdev, 654 vq->split.avail_flags_shadow); 655 } 656 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, 657 last_used_idx = vq->last_used_idx); 658 END_USE(vq); 659 return last_used_idx; 660 } 661 662 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx) 663 { 664 struct vring_virtqueue *vq = to_vvq(_vq); 665 666 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, 667 vq->split.vring.used->idx); 668 } 669 670 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq) 671 { 672 struct vring_virtqueue *vq = to_vvq(_vq); 673 u16 bufs; 674 675 START_USE(vq); 676 677 /* We optimistically turn back on interrupts, then check if there was 678 * more to do. */ 679 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 680 * either clear the flags bit or point the event index at the next 681 * entry. Always update the event index to keep code simple. */ 682 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { 683 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; 684 if (!vq->event) 685 vq->split.vring.avail->flags = 686 cpu_to_virtio16(_vq->vdev, 687 vq->split.avail_flags_shadow); 688 } 689 /* TODO: tune this threshold */ 690 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; 691 692 virtio_store_mb(vq->weak_barriers, 693 &vring_used_event(&vq->split.vring), 694 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); 695 696 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) 697 - vq->last_used_idx) > bufs)) { 698 END_USE(vq); 699 return false; 700 } 701 702 END_USE(vq); 703 return true; 704 } 705 706 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq) 707 { 708 struct vring_virtqueue *vq = to_vvq(_vq); 709 unsigned int i; 710 void *buf; 711 712 START_USE(vq); 713 714 for (i = 0; i < vq->split.vring.num; i++) { 715 if (!vq->split.desc_state[i].data) 716 continue; 717 /* detach_buf_split clears data, so grab it now. */ 718 buf = vq->split.desc_state[i].data; 719 detach_buf_split(vq, i, NULL); 720 vq->split.avail_idx_shadow--; 721 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, 722 vq->split.avail_idx_shadow); 723 END_USE(vq); 724 return buf; 725 } 726 /* That should have freed everything. */ 727 BUG_ON(vq->vq.num_free != vq->split.vring.num); 728 729 END_USE(vq); 730 return NULL; 731 } 732 733 734 /* 735 * Generic functions and exported symbols. 736 */ 737 738 static inline int virtqueue_add(struct virtqueue *_vq, 739 struct scatterlist *sgs[], 740 unsigned int total_sg, 741 unsigned int out_sgs, 742 unsigned int in_sgs, 743 void *data, 744 void *ctx, 745 gfp_t gfp) 746 { 747 return virtqueue_add_split(_vq, sgs, total_sg, 748 out_sgs, in_sgs, data, ctx, gfp); 749 } 750 751 /** 752 * virtqueue_add_sgs - expose buffers to other end 753 * @vq: the struct virtqueue we're talking about. 754 * @sgs: array of terminated scatterlists. 755 * @out_num: the number of scatterlists readable by other side 756 * @in_num: the number of scatterlists which are writable (after readable ones) 757 * @data: the token identifying the buffer. 758 * @gfp: how to do memory allocations (if necessary). 759 * 760 * Caller must ensure we don't call this with other virtqueue operations 761 * at the same time (except where noted). 762 * 763 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 764 */ 765 int virtqueue_add_sgs(struct virtqueue *_vq, 766 struct scatterlist *sgs[], 767 unsigned int out_sgs, 768 unsigned int in_sgs, 769 void *data, 770 gfp_t gfp) 771 { 772 unsigned int i, total_sg = 0; 773 774 /* Count them first. */ 775 for (i = 0; i < out_sgs + in_sgs; i++) { 776 struct scatterlist *sg; 777 778 for (sg = sgs[i]; sg; sg = sg_next(sg)) 779 total_sg++; 780 } 781 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, 782 data, NULL, gfp); 783 } 784 EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 785 786 /** 787 * virtqueue_add_outbuf - expose output buffers to other end 788 * @vq: the struct virtqueue we're talking about. 789 * @sg: scatterlist (must be well-formed and terminated!) 790 * @num: the number of entries in @sg readable by other side 791 * @data: the token identifying the buffer. 792 * @gfp: how to do memory allocations (if necessary). 793 * 794 * Caller must ensure we don't call this with other virtqueue operations 795 * at the same time (except where noted). 796 * 797 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 798 */ 799 int virtqueue_add_outbuf(struct virtqueue *vq, 800 struct scatterlist *sg, unsigned int num, 801 void *data, 802 gfp_t gfp) 803 { 804 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); 805 } 806 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 807 808 /** 809 * virtqueue_add_inbuf - expose input buffers to other end 810 * @vq: the struct virtqueue we're talking about. 811 * @sg: scatterlist (must be well-formed and terminated!) 812 * @num: the number of entries in @sg writable by other side 813 * @data: the token identifying the buffer. 814 * @gfp: how to do memory allocations (if necessary). 815 * 816 * Caller must ensure we don't call this with other virtqueue operations 817 * at the same time (except where noted). 818 * 819 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 820 */ 821 int virtqueue_add_inbuf(struct virtqueue *vq, 822 struct scatterlist *sg, unsigned int num, 823 void *data, 824 gfp_t gfp) 825 { 826 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); 827 } 828 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 829 830 /** 831 * virtqueue_add_inbuf_ctx - expose input buffers to other end 832 * @vq: the struct virtqueue we're talking about. 833 * @sg: scatterlist (must be well-formed and terminated!) 834 * @num: the number of entries in @sg writable by other side 835 * @data: the token identifying the buffer. 836 * @ctx: extra context for the token 837 * @gfp: how to do memory allocations (if necessary). 838 * 839 * Caller must ensure we don't call this with other virtqueue operations 840 * at the same time (except where noted). 841 * 842 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 843 */ 844 int virtqueue_add_inbuf_ctx(struct virtqueue *vq, 845 struct scatterlist *sg, unsigned int num, 846 void *data, 847 void *ctx, 848 gfp_t gfp) 849 { 850 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); 851 } 852 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); 853 854 /** 855 * virtqueue_kick_prepare - first half of split virtqueue_kick call. 856 * @vq: the struct virtqueue 857 * 858 * Instead of virtqueue_kick(), you can do: 859 * if (virtqueue_kick_prepare(vq)) 860 * virtqueue_notify(vq); 861 * 862 * This is sometimes useful because the virtqueue_kick_prepare() needs 863 * to be serialized, but the actual virtqueue_notify() call does not. 864 */ 865 bool virtqueue_kick_prepare(struct virtqueue *_vq) 866 { 867 return virtqueue_kick_prepare_split(_vq); 868 } 869 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 870 871 /** 872 * virtqueue_notify - second half of split virtqueue_kick call. 873 * @vq: the struct virtqueue 874 * 875 * This does not need to be serialized. 876 * 877 * Returns false if host notify failed or queue is broken, otherwise true. 878 */ 879 bool virtqueue_notify(struct virtqueue *_vq) 880 { 881 struct vring_virtqueue *vq = to_vvq(_vq); 882 883 if (unlikely(vq->broken)) 884 return false; 885 886 /* Prod other side to tell it about changes. */ 887 if (!vq->notify(_vq)) { 888 vq->broken = true; 889 return false; 890 } 891 return true; 892 } 893 EXPORT_SYMBOL_GPL(virtqueue_notify); 894 895 /** 896 * virtqueue_kick - update after add_buf 897 * @vq: the struct virtqueue 898 * 899 * After one or more virtqueue_add_* calls, invoke this to kick 900 * the other side. 901 * 902 * Caller must ensure we don't call this with other virtqueue 903 * operations at the same time (except where noted). 904 * 905 * Returns false if kick failed, otherwise true. 906 */ 907 bool virtqueue_kick(struct virtqueue *vq) 908 { 909 if (virtqueue_kick_prepare(vq)) 910 return virtqueue_notify(vq); 911 return true; 912 } 913 EXPORT_SYMBOL_GPL(virtqueue_kick); 914 915 /** 916 * virtqueue_get_buf - get the next used buffer 917 * @vq: the struct virtqueue we're talking about. 918 * @len: the length written into the buffer 919 * 920 * If the device wrote data into the buffer, @len will be set to the 921 * amount written. This means you don't need to clear the buffer 922 * beforehand to ensure there's no data leakage in the case of short 923 * writes. 924 * 925 * Caller must ensure we don't call this with other virtqueue 926 * operations at the same time (except where noted). 927 * 928 * Returns NULL if there are no used buffers, or the "data" token 929 * handed to virtqueue_add_*(). 930 */ 931 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, 932 void **ctx) 933 { 934 return virtqueue_get_buf_ctx_split(_vq, len, ctx); 935 } 936 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx); 937 938 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 939 { 940 return virtqueue_get_buf_ctx(_vq, len, NULL); 941 } 942 EXPORT_SYMBOL_GPL(virtqueue_get_buf); 943 944 /** 945 * virtqueue_disable_cb - disable callbacks 946 * @vq: the struct virtqueue we're talking about. 947 * 948 * Note that this is not necessarily synchronous, hence unreliable and only 949 * useful as an optimization. 950 * 951 * Unlike other operations, this need not be serialized. 952 */ 953 void virtqueue_disable_cb(struct virtqueue *_vq) 954 { 955 virtqueue_disable_cb_split(_vq); 956 } 957 EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 958 959 /** 960 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb 961 * @vq: the struct virtqueue we're talking about. 962 * 963 * This re-enables callbacks; it returns current queue state 964 * in an opaque unsigned value. This value should be later tested by 965 * virtqueue_poll, to detect a possible race between the driver checking for 966 * more work, and enabling callbacks. 967 * 968 * Caller must ensure we don't call this with other virtqueue 969 * operations at the same time (except where noted). 970 */ 971 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) 972 { 973 return virtqueue_enable_cb_prepare_split(_vq); 974 } 975 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); 976 977 /** 978 * virtqueue_poll - query pending used buffers 979 * @vq: the struct virtqueue we're talking about. 980 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). 981 * 982 * Returns "true" if there are pending used buffers in the queue. 983 * 984 * This does not need to be serialized. 985 */ 986 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) 987 { 988 struct vring_virtqueue *vq = to_vvq(_vq); 989 990 virtio_mb(vq->weak_barriers); 991 return virtqueue_poll_split(_vq, last_used_idx); 992 } 993 EXPORT_SYMBOL_GPL(virtqueue_poll); 994 995 /** 996 * virtqueue_enable_cb - restart callbacks after disable_cb. 997 * @vq: the struct virtqueue we're talking about. 998 * 999 * This re-enables callbacks; it returns "false" if there are pending 1000 * buffers in the queue, to detect a possible race between the driver 1001 * checking for more work, and enabling callbacks. 1002 * 1003 * Caller must ensure we don't call this with other virtqueue 1004 * operations at the same time (except where noted). 1005 */ 1006 bool virtqueue_enable_cb(struct virtqueue *_vq) 1007 { 1008 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); 1009 1010 return !virtqueue_poll(_vq, last_used_idx); 1011 } 1012 EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 1013 1014 /** 1015 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 1016 * @vq: the struct virtqueue we're talking about. 1017 * 1018 * This re-enables callbacks but hints to the other side to delay 1019 * interrupts until most of the available buffers have been processed; 1020 * it returns "false" if there are many pending buffers in the queue, 1021 * to detect a possible race between the driver checking for more work, 1022 * and enabling callbacks. 1023 * 1024 * Caller must ensure we don't call this with other virtqueue 1025 * operations at the same time (except where noted). 1026 */ 1027 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 1028 { 1029 return virtqueue_enable_cb_delayed_split(_vq); 1030 } 1031 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 1032 1033 /** 1034 * virtqueue_detach_unused_buf - detach first unused buffer 1035 * @vq: the struct virtqueue we're talking about. 1036 * 1037 * Returns NULL or the "data" token handed to virtqueue_add_*(). 1038 * This is not valid on an active queue; it is useful only for device 1039 * shutdown. 1040 */ 1041 void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 1042 { 1043 return virtqueue_detach_unused_buf_split(_vq); 1044 } 1045 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 1046 1047 static inline bool more_used(const struct vring_virtqueue *vq) 1048 { 1049 return more_used_split(vq); 1050 } 1051 1052 irqreturn_t vring_interrupt(int irq, void *_vq) 1053 { 1054 struct vring_virtqueue *vq = to_vvq(_vq); 1055 1056 if (!more_used(vq)) { 1057 pr_debug("virtqueue interrupt with no work for %p\n", vq); 1058 return IRQ_NONE; 1059 } 1060 1061 if (unlikely(vq->broken)) 1062 return IRQ_HANDLED; 1063 1064 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 1065 if (vq->vq.callback) 1066 vq->vq.callback(&vq->vq); 1067 1068 return IRQ_HANDLED; 1069 } 1070 EXPORT_SYMBOL_GPL(vring_interrupt); 1071 1072 struct virtqueue *__vring_new_virtqueue(unsigned int index, 1073 struct vring vring, 1074 struct virtio_device *vdev, 1075 bool weak_barriers, 1076 bool context, 1077 bool (*notify)(struct virtqueue *), 1078 void (*callback)(struct virtqueue *), 1079 const char *name) 1080 { 1081 unsigned int i; 1082 struct vring_virtqueue *vq; 1083 1084 vq = kmalloc(sizeof(*vq), GFP_KERNEL); 1085 if (!vq) 1086 return NULL; 1087 1088 vq->vq.callback = callback; 1089 vq->vq.vdev = vdev; 1090 vq->vq.name = name; 1091 vq->vq.num_free = vring.num; 1092 vq->vq.index = index; 1093 vq->we_own_ring = false; 1094 vq->queue_dma_addr = 0; 1095 vq->queue_size_in_bytes = 0; 1096 vq->notify = notify; 1097 vq->weak_barriers = weak_barriers; 1098 vq->broken = false; 1099 vq->last_used_idx = 0; 1100 vq->num_added = 0; 1101 list_add_tail(&vq->vq.list, &vdev->vqs); 1102 #ifdef DEBUG 1103 vq->in_use = false; 1104 vq->last_add_time_valid = false; 1105 #endif 1106 1107 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && 1108 !context; 1109 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 1110 1111 vq->split.vring = vring; 1112 vq->split.avail_flags_shadow = 0; 1113 vq->split.avail_idx_shadow = 0; 1114 1115 /* No callback? Tell other side not to bother us. */ 1116 if (!callback) { 1117 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 1118 if (!vq->event) 1119 vq->split.vring.avail->flags = cpu_to_virtio16(vdev, 1120 vq->split.avail_flags_shadow); 1121 } 1122 1123 vq->split.desc_state = kmalloc_array(vring.num, 1124 sizeof(struct vring_desc_state_split), GFP_KERNEL); 1125 if (!vq->split.desc_state) { 1126 kfree(vq); 1127 return NULL; 1128 } 1129 1130 /* Put everything in free lists. */ 1131 vq->free_head = 0; 1132 for (i = 0; i < vring.num-1; i++) 1133 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); 1134 memset(vq->split.desc_state, 0, vring.num * 1135 sizeof(struct vring_desc_state_split)); 1136 1137 return &vq->vq; 1138 } 1139 EXPORT_SYMBOL_GPL(__vring_new_virtqueue); 1140 1141 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size, 1142 dma_addr_t *dma_handle, gfp_t flag) 1143 { 1144 if (vring_use_dma_api(vdev)) { 1145 return dma_alloc_coherent(vdev->dev.parent, size, 1146 dma_handle, flag); 1147 } else { 1148 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); 1149 if (queue) { 1150 phys_addr_t phys_addr = virt_to_phys(queue); 1151 *dma_handle = (dma_addr_t)phys_addr; 1152 1153 /* 1154 * Sanity check: make sure we dind't truncate 1155 * the address. The only arches I can find that 1156 * have 64-bit phys_addr_t but 32-bit dma_addr_t 1157 * are certain non-highmem MIPS and x86 1158 * configurations, but these configurations 1159 * should never allocate physical pages above 32 1160 * bits, so this is fine. Just in case, throw a 1161 * warning and abort if we end up with an 1162 * unrepresentable address. 1163 */ 1164 if (WARN_ON_ONCE(*dma_handle != phys_addr)) { 1165 free_pages_exact(queue, PAGE_ALIGN(size)); 1166 return NULL; 1167 } 1168 } 1169 return queue; 1170 } 1171 } 1172 1173 static void vring_free_queue(struct virtio_device *vdev, size_t size, 1174 void *queue, dma_addr_t dma_handle) 1175 { 1176 if (vring_use_dma_api(vdev)) { 1177 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle); 1178 } else { 1179 free_pages_exact(queue, PAGE_ALIGN(size)); 1180 } 1181 } 1182 1183 struct virtqueue *vring_create_virtqueue( 1184 unsigned int index, 1185 unsigned int num, 1186 unsigned int vring_align, 1187 struct virtio_device *vdev, 1188 bool weak_barriers, 1189 bool may_reduce_num, 1190 bool context, 1191 bool (*notify)(struct virtqueue *), 1192 void (*callback)(struct virtqueue *), 1193 const char *name) 1194 { 1195 struct virtqueue *vq; 1196 void *queue = NULL; 1197 dma_addr_t dma_addr; 1198 size_t queue_size_in_bytes; 1199 struct vring vring; 1200 1201 /* We assume num is a power of 2. */ 1202 if (num & (num - 1)) { 1203 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 1204 return NULL; 1205 } 1206 1207 /* TODO: allocate each queue chunk individually */ 1208 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { 1209 queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 1210 &dma_addr, 1211 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); 1212 if (queue) 1213 break; 1214 } 1215 1216 if (!num) 1217 return NULL; 1218 1219 if (!queue) { 1220 /* Try to get a single page. You are my only hope! */ 1221 queue = vring_alloc_queue(vdev, vring_size(num, vring_align), 1222 &dma_addr, GFP_KERNEL|__GFP_ZERO); 1223 } 1224 if (!queue) 1225 return NULL; 1226 1227 queue_size_in_bytes = vring_size(num, vring_align); 1228 vring_init(&vring, num, queue, vring_align); 1229 1230 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, 1231 notify, callback, name); 1232 if (!vq) { 1233 vring_free_queue(vdev, queue_size_in_bytes, queue, 1234 dma_addr); 1235 return NULL; 1236 } 1237 1238 to_vvq(vq)->queue_dma_addr = dma_addr; 1239 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes; 1240 to_vvq(vq)->we_own_ring = true; 1241 1242 return vq; 1243 } 1244 EXPORT_SYMBOL_GPL(vring_create_virtqueue); 1245 1246 struct virtqueue *vring_new_virtqueue(unsigned int index, 1247 unsigned int num, 1248 unsigned int vring_align, 1249 struct virtio_device *vdev, 1250 bool weak_barriers, 1251 bool context, 1252 void *pages, 1253 bool (*notify)(struct virtqueue *vq), 1254 void (*callback)(struct virtqueue *vq), 1255 const char *name) 1256 { 1257 struct vring vring; 1258 vring_init(&vring, num, pages, vring_align); 1259 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, 1260 notify, callback, name); 1261 } 1262 EXPORT_SYMBOL_GPL(vring_new_virtqueue); 1263 1264 void vring_del_virtqueue(struct virtqueue *_vq) 1265 { 1266 struct vring_virtqueue *vq = to_vvq(_vq); 1267 1268 if (vq->we_own_ring) { 1269 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes, 1270 vq->split.vring.desc, vq->queue_dma_addr); 1271 kfree(vq->split.desc_state); 1272 } 1273 list_del(&_vq->list); 1274 kfree(vq); 1275 } 1276 EXPORT_SYMBOL_GPL(vring_del_virtqueue); 1277 1278 /* Manipulates transport-specific feature bits. */ 1279 void vring_transport_features(struct virtio_device *vdev) 1280 { 1281 unsigned int i; 1282 1283 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 1284 switch (i) { 1285 case VIRTIO_RING_F_INDIRECT_DESC: 1286 break; 1287 case VIRTIO_RING_F_EVENT_IDX: 1288 break; 1289 case VIRTIO_F_VERSION_1: 1290 break; 1291 case VIRTIO_F_IOMMU_PLATFORM: 1292 break; 1293 default: 1294 /* We don't understand this bit. */ 1295 __virtio_clear_bit(vdev, i); 1296 } 1297 } 1298 } 1299 EXPORT_SYMBOL_GPL(vring_transport_features); 1300 1301 /** 1302 * virtqueue_get_vring_size - return the size of the virtqueue's vring 1303 * @vq: the struct virtqueue containing the vring of interest. 1304 * 1305 * Returns the size of the vring. This is mainly used for boasting to 1306 * userspace. Unlike other operations, this need not be serialized. 1307 */ 1308 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 1309 { 1310 1311 struct vring_virtqueue *vq = to_vvq(_vq); 1312 1313 return vq->split.vring.num; 1314 } 1315 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 1316 1317 bool virtqueue_is_broken(struct virtqueue *_vq) 1318 { 1319 struct vring_virtqueue *vq = to_vvq(_vq); 1320 1321 return vq->broken; 1322 } 1323 EXPORT_SYMBOL_GPL(virtqueue_is_broken); 1324 1325 /* 1326 * This should prevent the device from being used, allowing drivers to 1327 * recover. You may need to grab appropriate locks to flush. 1328 */ 1329 void virtio_break_device(struct virtio_device *dev) 1330 { 1331 struct virtqueue *_vq; 1332 1333 list_for_each_entry(_vq, &dev->vqs, list) { 1334 struct vring_virtqueue *vq = to_vvq(_vq); 1335 vq->broken = true; 1336 } 1337 } 1338 EXPORT_SYMBOL_GPL(virtio_break_device); 1339 1340 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) 1341 { 1342 struct vring_virtqueue *vq = to_vvq(_vq); 1343 1344 BUG_ON(!vq->we_own_ring); 1345 1346 return vq->queue_dma_addr; 1347 } 1348 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr); 1349 1350 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) 1351 { 1352 struct vring_virtqueue *vq = to_vvq(_vq); 1353 1354 BUG_ON(!vq->we_own_ring); 1355 1356 return vq->queue_dma_addr + 1357 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); 1358 } 1359 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr); 1360 1361 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) 1362 { 1363 struct vring_virtqueue *vq = to_vvq(_vq); 1364 1365 BUG_ON(!vq->we_own_ring); 1366 1367 return vq->queue_dma_addr + 1368 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); 1369 } 1370 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr); 1371 1372 const struct vring *virtqueue_get_vring(struct virtqueue *vq) 1373 { 1374 return &to_vvq(vq)->split.vring; 1375 } 1376 EXPORT_SYMBOL_GPL(virtqueue_get_vring); 1377 1378 MODULE_LICENSE("GPL"); 1379