1 /* Virtio ring implementation. 2 * 3 * Copyright 2007 Rusty Russell IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include <linux/virtio.h> 20 #include <linux/virtio_ring.h> 21 #include <linux/virtio_config.h> 22 #include <linux/device.h> 23 #include <linux/slab.h> 24 #include <linux/module.h> 25 #include <linux/hrtimer.h> 26 #include <linux/kmemleak.h> 27 28 #ifdef DEBUG 29 /* For development, we want to crash whenever the ring is screwed. */ 30 #define BAD_RING(_vq, fmt, args...) \ 31 do { \ 32 dev_err(&(_vq)->vq.vdev->dev, \ 33 "%s:"fmt, (_vq)->vq.name, ##args); \ 34 BUG(); \ 35 } while (0) 36 /* Caller is supposed to guarantee no reentry. */ 37 #define START_USE(_vq) \ 38 do { \ 39 if ((_vq)->in_use) \ 40 panic("%s:in_use = %i\n", \ 41 (_vq)->vq.name, (_vq)->in_use); \ 42 (_vq)->in_use = __LINE__; \ 43 } while (0) 44 #define END_USE(_vq) \ 45 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 46 #else 47 #define BAD_RING(_vq, fmt, args...) \ 48 do { \ 49 dev_err(&_vq->vq.vdev->dev, \ 50 "%s:"fmt, (_vq)->vq.name, ##args); \ 51 (_vq)->broken = true; \ 52 } while (0) 53 #define START_USE(vq) 54 #define END_USE(vq) 55 #endif 56 57 struct vring_virtqueue 58 { 59 struct virtqueue vq; 60 61 /* Actual memory layout for this queue */ 62 struct vring vring; 63 64 /* Can we use weak barriers? */ 65 bool weak_barriers; 66 67 /* Other side has made a mess, don't try any more. */ 68 bool broken; 69 70 /* Host supports indirect buffers */ 71 bool indirect; 72 73 /* Host publishes avail event idx */ 74 bool event; 75 76 /* Head of free buffer list. */ 77 unsigned int free_head; 78 /* Number we've added since last sync. */ 79 unsigned int num_added; 80 81 /* Last used index we've seen. */ 82 u16 last_used_idx; 83 84 /* How to notify other side. FIXME: commonalize hcalls! */ 85 bool (*notify)(struct virtqueue *vq); 86 87 #ifdef DEBUG 88 /* They're supposed to lock for us. */ 89 unsigned int in_use; 90 91 /* Figure out if their kicks are too delayed. */ 92 bool last_add_time_valid; 93 ktime_t last_add_time; 94 #endif 95 96 /* Tokens for callbacks. */ 97 void *data[]; 98 }; 99 100 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 101 102 static inline struct scatterlist *sg_next_chained(struct scatterlist *sg, 103 unsigned int *count) 104 { 105 return sg_next(sg); 106 } 107 108 static inline struct scatterlist *sg_next_arr(struct scatterlist *sg, 109 unsigned int *count) 110 { 111 if (--(*count) == 0) 112 return NULL; 113 return sg + 1; 114 } 115 116 /* Set up an indirect table of descriptors and add it to the queue. */ 117 static inline int vring_add_indirect(struct vring_virtqueue *vq, 118 struct scatterlist *sgs[], 119 struct scatterlist *(*next) 120 (struct scatterlist *, unsigned int *), 121 unsigned int total_sg, 122 unsigned int total_out, 123 unsigned int total_in, 124 unsigned int out_sgs, 125 unsigned int in_sgs, 126 gfp_t gfp) 127 { 128 struct vring_desc *desc; 129 unsigned head; 130 struct scatterlist *sg; 131 int i, n; 132 133 /* 134 * We require lowmem mappings for the descriptors because 135 * otherwise virt_to_phys will give us bogus addresses in the 136 * virtqueue. 137 */ 138 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); 139 140 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); 141 if (!desc) 142 return -ENOMEM; 143 144 /* Transfer entries from the sg lists into the indirect page */ 145 i = 0; 146 for (n = 0; n < out_sgs; n++) { 147 for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { 148 desc[i].flags = VRING_DESC_F_NEXT; 149 desc[i].addr = sg_phys(sg); 150 desc[i].len = sg->length; 151 desc[i].next = i+1; 152 i++; 153 } 154 } 155 for (; n < (out_sgs + in_sgs); n++) { 156 for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { 157 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 158 desc[i].addr = sg_phys(sg); 159 desc[i].len = sg->length; 160 desc[i].next = i+1; 161 i++; 162 } 163 } 164 BUG_ON(i != total_sg); 165 166 /* Last one doesn't continue. */ 167 desc[i-1].flags &= ~VRING_DESC_F_NEXT; 168 desc[i-1].next = 0; 169 170 /* We're about to use a buffer */ 171 vq->vq.num_free--; 172 173 /* Use a single buffer which doesn't continue */ 174 head = vq->free_head; 175 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; 176 vq->vring.desc[head].addr = virt_to_phys(desc); 177 /* kmemleak gives a false positive, as it's hidden by virt_to_phys */ 178 kmemleak_ignore(desc); 179 vq->vring.desc[head].len = i * sizeof(struct vring_desc); 180 181 /* Update free pointer */ 182 vq->free_head = vq->vring.desc[head].next; 183 184 return head; 185 } 186 187 static inline int virtqueue_add(struct virtqueue *_vq, 188 struct scatterlist *sgs[], 189 struct scatterlist *(*next) 190 (struct scatterlist *, unsigned int *), 191 unsigned int total_out, 192 unsigned int total_in, 193 unsigned int out_sgs, 194 unsigned int in_sgs, 195 void *data, 196 gfp_t gfp) 197 { 198 struct vring_virtqueue *vq = to_vvq(_vq); 199 struct scatterlist *sg; 200 unsigned int i, n, avail, uninitialized_var(prev), total_sg; 201 int head; 202 203 START_USE(vq); 204 205 BUG_ON(data == NULL); 206 207 if (unlikely(vq->broken)) { 208 END_USE(vq); 209 return -EIO; 210 } 211 212 #ifdef DEBUG 213 { 214 ktime_t now = ktime_get(); 215 216 /* No kick or get, with .1 second between? Warn. */ 217 if (vq->last_add_time_valid) 218 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) 219 > 100); 220 vq->last_add_time = now; 221 vq->last_add_time_valid = true; 222 } 223 #endif 224 225 total_sg = total_in + total_out; 226 227 /* If the host supports indirect descriptor tables, and we have multiple 228 * buffers, then go indirect. FIXME: tune this threshold */ 229 if (vq->indirect && total_sg > 1 && vq->vq.num_free) { 230 head = vring_add_indirect(vq, sgs, next, total_sg, total_out, 231 total_in, 232 out_sgs, in_sgs, gfp); 233 if (likely(head >= 0)) 234 goto add_head; 235 } 236 237 BUG_ON(total_sg > vq->vring.num); 238 BUG_ON(total_sg == 0); 239 240 if (vq->vq.num_free < total_sg) { 241 pr_debug("Can't add buf len %i - avail = %i\n", 242 total_sg, vq->vq.num_free); 243 /* FIXME: for historical reasons, we force a notify here if 244 * there are outgoing parts to the buffer. Presumably the 245 * host should service the ring ASAP. */ 246 if (out_sgs) 247 vq->notify(&vq->vq); 248 END_USE(vq); 249 return -ENOSPC; 250 } 251 252 /* We're about to use some buffers from the free list. */ 253 vq->vq.num_free -= total_sg; 254 255 head = i = vq->free_head; 256 for (n = 0; n < out_sgs; n++) { 257 for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { 258 vq->vring.desc[i].flags = VRING_DESC_F_NEXT; 259 vq->vring.desc[i].addr = sg_phys(sg); 260 vq->vring.desc[i].len = sg->length; 261 prev = i; 262 i = vq->vring.desc[i].next; 263 } 264 } 265 for (; n < (out_sgs + in_sgs); n++) { 266 for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { 267 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 268 vq->vring.desc[i].addr = sg_phys(sg); 269 vq->vring.desc[i].len = sg->length; 270 prev = i; 271 i = vq->vring.desc[i].next; 272 } 273 } 274 /* Last one doesn't continue. */ 275 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; 276 277 /* Update free pointer */ 278 vq->free_head = i; 279 280 add_head: 281 /* Set token. */ 282 vq->data[head] = data; 283 284 /* Put entry in available array (but don't update avail->idx until they 285 * do sync). */ 286 avail = (vq->vring.avail->idx & (vq->vring.num-1)); 287 vq->vring.avail->ring[avail] = head; 288 289 /* Descriptors and available array need to be set before we expose the 290 * new available array entries. */ 291 virtio_wmb(vq->weak_barriers); 292 vq->vring.avail->idx++; 293 vq->num_added++; 294 295 /* This is very unlikely, but theoretically possible. Kick 296 * just in case. */ 297 if (unlikely(vq->num_added == (1 << 16) - 1)) 298 virtqueue_kick(_vq); 299 300 pr_debug("Added buffer head %i to %p\n", head, vq); 301 END_USE(vq); 302 303 return 0; 304 } 305 306 /** 307 * virtqueue_add_sgs - expose buffers to other end 308 * @vq: the struct virtqueue we're talking about. 309 * @sgs: array of terminated scatterlists. 310 * @out_num: the number of scatterlists readable by other side 311 * @in_num: the number of scatterlists which are writable (after readable ones) 312 * @data: the token identifying the buffer. 313 * @gfp: how to do memory allocations (if necessary). 314 * 315 * Caller must ensure we don't call this with other virtqueue operations 316 * at the same time (except where noted). 317 * 318 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 319 */ 320 int virtqueue_add_sgs(struct virtqueue *_vq, 321 struct scatterlist *sgs[], 322 unsigned int out_sgs, 323 unsigned int in_sgs, 324 void *data, 325 gfp_t gfp) 326 { 327 unsigned int i, total_out, total_in; 328 329 /* Count them first. */ 330 for (i = total_out = total_in = 0; i < out_sgs; i++) { 331 struct scatterlist *sg; 332 for (sg = sgs[i]; sg; sg = sg_next(sg)) 333 total_out++; 334 } 335 for (; i < out_sgs + in_sgs; i++) { 336 struct scatterlist *sg; 337 for (sg = sgs[i]; sg; sg = sg_next(sg)) 338 total_in++; 339 } 340 return virtqueue_add(_vq, sgs, sg_next_chained, 341 total_out, total_in, out_sgs, in_sgs, data, gfp); 342 } 343 EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 344 345 /** 346 * virtqueue_add_outbuf - expose output buffers to other end 347 * @vq: the struct virtqueue we're talking about. 348 * @sgs: array of scatterlists (need not be terminated!) 349 * @num: the number of scatterlists readable by other side 350 * @data: the token identifying the buffer. 351 * @gfp: how to do memory allocations (if necessary). 352 * 353 * Caller must ensure we don't call this with other virtqueue operations 354 * at the same time (except where noted). 355 * 356 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 357 */ 358 int virtqueue_add_outbuf(struct virtqueue *vq, 359 struct scatterlist sg[], unsigned int num, 360 void *data, 361 gfp_t gfp) 362 { 363 return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp); 364 } 365 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 366 367 /** 368 * virtqueue_add_inbuf - expose input buffers to other end 369 * @vq: the struct virtqueue we're talking about. 370 * @sgs: array of scatterlists (need not be terminated!) 371 * @num: the number of scatterlists writable by other side 372 * @data: the token identifying the buffer. 373 * @gfp: how to do memory allocations (if necessary). 374 * 375 * Caller must ensure we don't call this with other virtqueue operations 376 * at the same time (except where noted). 377 * 378 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). 379 */ 380 int virtqueue_add_inbuf(struct virtqueue *vq, 381 struct scatterlist sg[], unsigned int num, 382 void *data, 383 gfp_t gfp) 384 { 385 return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp); 386 } 387 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 388 389 /** 390 * virtqueue_kick_prepare - first half of split virtqueue_kick call. 391 * @vq: the struct virtqueue 392 * 393 * Instead of virtqueue_kick(), you can do: 394 * if (virtqueue_kick_prepare(vq)) 395 * virtqueue_notify(vq); 396 * 397 * This is sometimes useful because the virtqueue_kick_prepare() needs 398 * to be serialized, but the actual virtqueue_notify() call does not. 399 */ 400 bool virtqueue_kick_prepare(struct virtqueue *_vq) 401 { 402 struct vring_virtqueue *vq = to_vvq(_vq); 403 u16 new, old; 404 bool needs_kick; 405 406 START_USE(vq); 407 /* We need to expose available array entries before checking avail 408 * event. */ 409 virtio_mb(vq->weak_barriers); 410 411 old = vq->vring.avail->idx - vq->num_added; 412 new = vq->vring.avail->idx; 413 vq->num_added = 0; 414 415 #ifdef DEBUG 416 if (vq->last_add_time_valid) { 417 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), 418 vq->last_add_time)) > 100); 419 } 420 vq->last_add_time_valid = false; 421 #endif 422 423 if (vq->event) { 424 needs_kick = vring_need_event(vring_avail_event(&vq->vring), 425 new, old); 426 } else { 427 needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); 428 } 429 END_USE(vq); 430 return needs_kick; 431 } 432 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 433 434 /** 435 * virtqueue_notify - second half of split virtqueue_kick call. 436 * @vq: the struct virtqueue 437 * 438 * This does not need to be serialized. 439 * 440 * Returns false if host notify failed or queue is broken, otherwise true. 441 */ 442 bool virtqueue_notify(struct virtqueue *_vq) 443 { 444 struct vring_virtqueue *vq = to_vvq(_vq); 445 446 if (unlikely(vq->broken)) 447 return false; 448 449 /* Prod other side to tell it about changes. */ 450 if (!vq->notify(_vq)) { 451 vq->broken = true; 452 return false; 453 } 454 return true; 455 } 456 EXPORT_SYMBOL_GPL(virtqueue_notify); 457 458 /** 459 * virtqueue_kick - update after add_buf 460 * @vq: the struct virtqueue 461 * 462 * After one or more virtqueue_add_* calls, invoke this to kick 463 * the other side. 464 * 465 * Caller must ensure we don't call this with other virtqueue 466 * operations at the same time (except where noted). 467 * 468 * Returns false if kick failed, otherwise true. 469 */ 470 bool virtqueue_kick(struct virtqueue *vq) 471 { 472 if (virtqueue_kick_prepare(vq)) 473 return virtqueue_notify(vq); 474 return true; 475 } 476 EXPORT_SYMBOL_GPL(virtqueue_kick); 477 478 static void detach_buf(struct vring_virtqueue *vq, unsigned int head) 479 { 480 unsigned int i; 481 482 /* Clear data ptr. */ 483 vq->data[head] = NULL; 484 485 /* Put back on free list: find end */ 486 i = head; 487 488 /* Free the indirect table */ 489 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) 490 kfree(phys_to_virt(vq->vring.desc[i].addr)); 491 492 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { 493 i = vq->vring.desc[i].next; 494 vq->vq.num_free++; 495 } 496 497 vq->vring.desc[i].next = vq->free_head; 498 vq->free_head = head; 499 /* Plus final descriptor */ 500 vq->vq.num_free++; 501 } 502 503 static inline bool more_used(const struct vring_virtqueue *vq) 504 { 505 return vq->last_used_idx != vq->vring.used->idx; 506 } 507 508 /** 509 * virtqueue_get_buf - get the next used buffer 510 * @vq: the struct virtqueue we're talking about. 511 * @len: the length written into the buffer 512 * 513 * If the driver wrote data into the buffer, @len will be set to the 514 * amount written. This means you don't need to clear the buffer 515 * beforehand to ensure there's no data leakage in the case of short 516 * writes. 517 * 518 * Caller must ensure we don't call this with other virtqueue 519 * operations at the same time (except where noted). 520 * 521 * Returns NULL if there are no used buffers, or the "data" token 522 * handed to virtqueue_add_*(). 523 */ 524 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 525 { 526 struct vring_virtqueue *vq = to_vvq(_vq); 527 void *ret; 528 unsigned int i; 529 u16 last_used; 530 531 START_USE(vq); 532 533 if (unlikely(vq->broken)) { 534 END_USE(vq); 535 return NULL; 536 } 537 538 if (!more_used(vq)) { 539 pr_debug("No more buffers in queue\n"); 540 END_USE(vq); 541 return NULL; 542 } 543 544 /* Only get used array entries after they have been exposed by host. */ 545 virtio_rmb(vq->weak_barriers); 546 547 last_used = (vq->last_used_idx & (vq->vring.num - 1)); 548 i = vq->vring.used->ring[last_used].id; 549 *len = vq->vring.used->ring[last_used].len; 550 551 if (unlikely(i >= vq->vring.num)) { 552 BAD_RING(vq, "id %u out of range\n", i); 553 return NULL; 554 } 555 if (unlikely(!vq->data[i])) { 556 BAD_RING(vq, "id %u is not a head!\n", i); 557 return NULL; 558 } 559 560 /* detach_buf clears data, so grab it now. */ 561 ret = vq->data[i]; 562 detach_buf(vq, i); 563 vq->last_used_idx++; 564 /* If we expect an interrupt for the next entry, tell host 565 * by writing event index and flush out the write before 566 * the read in the next get_buf call. */ 567 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { 568 vring_used_event(&vq->vring) = vq->last_used_idx; 569 virtio_mb(vq->weak_barriers); 570 } 571 572 #ifdef DEBUG 573 vq->last_add_time_valid = false; 574 #endif 575 576 END_USE(vq); 577 return ret; 578 } 579 EXPORT_SYMBOL_GPL(virtqueue_get_buf); 580 581 /** 582 * virtqueue_disable_cb - disable callbacks 583 * @vq: the struct virtqueue we're talking about. 584 * 585 * Note that this is not necessarily synchronous, hence unreliable and only 586 * useful as an optimization. 587 * 588 * Unlike other operations, this need not be serialized. 589 */ 590 void virtqueue_disable_cb(struct virtqueue *_vq) 591 { 592 struct vring_virtqueue *vq = to_vvq(_vq); 593 594 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 595 } 596 EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 597 598 /** 599 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb 600 * @vq: the struct virtqueue we're talking about. 601 * 602 * This re-enables callbacks; it returns current queue state 603 * in an opaque unsigned value. This value should be later tested by 604 * virtqueue_poll, to detect a possible race between the driver checking for 605 * more work, and enabling callbacks. 606 * 607 * Caller must ensure we don't call this with other virtqueue 608 * operations at the same time (except where noted). 609 */ 610 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) 611 { 612 struct vring_virtqueue *vq = to_vvq(_vq); 613 u16 last_used_idx; 614 615 START_USE(vq); 616 617 /* We optimistically turn back on interrupts, then check if there was 618 * more to do. */ 619 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 620 * either clear the flags bit or point the event index at the next 621 * entry. Always do both to keep code simple. */ 622 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 623 vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; 624 END_USE(vq); 625 return last_used_idx; 626 } 627 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); 628 629 /** 630 * virtqueue_poll - query pending used buffers 631 * @vq: the struct virtqueue we're talking about. 632 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). 633 * 634 * Returns "true" if there are pending used buffers in the queue. 635 * 636 * This does not need to be serialized. 637 */ 638 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) 639 { 640 struct vring_virtqueue *vq = to_vvq(_vq); 641 642 virtio_mb(vq->weak_barriers); 643 return (u16)last_used_idx != vq->vring.used->idx; 644 } 645 EXPORT_SYMBOL_GPL(virtqueue_poll); 646 647 /** 648 * virtqueue_enable_cb - restart callbacks after disable_cb. 649 * @vq: the struct virtqueue we're talking about. 650 * 651 * This re-enables callbacks; it returns "false" if there are pending 652 * buffers in the queue, to detect a possible race between the driver 653 * checking for more work, and enabling callbacks. 654 * 655 * Caller must ensure we don't call this with other virtqueue 656 * operations at the same time (except where noted). 657 */ 658 bool virtqueue_enable_cb(struct virtqueue *_vq) 659 { 660 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); 661 return !virtqueue_poll(_vq, last_used_idx); 662 } 663 EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 664 665 /** 666 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 667 * @vq: the struct virtqueue we're talking about. 668 * 669 * This re-enables callbacks but hints to the other side to delay 670 * interrupts until most of the available buffers have been processed; 671 * it returns "false" if there are many pending buffers in the queue, 672 * to detect a possible race between the driver checking for more work, 673 * and enabling callbacks. 674 * 675 * Caller must ensure we don't call this with other virtqueue 676 * operations at the same time (except where noted). 677 */ 678 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 679 { 680 struct vring_virtqueue *vq = to_vvq(_vq); 681 u16 bufs; 682 683 START_USE(vq); 684 685 /* We optimistically turn back on interrupts, then check if there was 686 * more to do. */ 687 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 688 * either clear the flags bit or point the event index at the next 689 * entry. Always do both to keep code simple. */ 690 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 691 /* TODO: tune this threshold */ 692 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; 693 vring_used_event(&vq->vring) = vq->last_used_idx + bufs; 694 virtio_mb(vq->weak_barriers); 695 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { 696 END_USE(vq); 697 return false; 698 } 699 700 END_USE(vq); 701 return true; 702 } 703 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 704 705 /** 706 * virtqueue_detach_unused_buf - detach first unused buffer 707 * @vq: the struct virtqueue we're talking about. 708 * 709 * Returns NULL or the "data" token handed to virtqueue_add_*(). 710 * This is not valid on an active queue; it is useful only for device 711 * shutdown. 712 */ 713 void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 714 { 715 struct vring_virtqueue *vq = to_vvq(_vq); 716 unsigned int i; 717 void *buf; 718 719 START_USE(vq); 720 721 for (i = 0; i < vq->vring.num; i++) { 722 if (!vq->data[i]) 723 continue; 724 /* detach_buf clears data, so grab it now. */ 725 buf = vq->data[i]; 726 detach_buf(vq, i); 727 vq->vring.avail->idx--; 728 END_USE(vq); 729 return buf; 730 } 731 /* That should have freed everything. */ 732 BUG_ON(vq->vq.num_free != vq->vring.num); 733 734 END_USE(vq); 735 return NULL; 736 } 737 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 738 739 irqreturn_t vring_interrupt(int irq, void *_vq) 740 { 741 struct vring_virtqueue *vq = to_vvq(_vq); 742 743 if (!more_used(vq)) { 744 pr_debug("virtqueue interrupt with no work for %p\n", vq); 745 return IRQ_NONE; 746 } 747 748 if (unlikely(vq->broken)) 749 return IRQ_HANDLED; 750 751 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 752 if (vq->vq.callback) 753 vq->vq.callback(&vq->vq); 754 755 return IRQ_HANDLED; 756 } 757 EXPORT_SYMBOL_GPL(vring_interrupt); 758 759 struct virtqueue *vring_new_virtqueue(unsigned int index, 760 unsigned int num, 761 unsigned int vring_align, 762 struct virtio_device *vdev, 763 bool weak_barriers, 764 void *pages, 765 bool (*notify)(struct virtqueue *), 766 void (*callback)(struct virtqueue *), 767 const char *name) 768 { 769 struct vring_virtqueue *vq; 770 unsigned int i; 771 772 /* We assume num is a power of 2. */ 773 if (num & (num - 1)) { 774 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 775 return NULL; 776 } 777 778 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); 779 if (!vq) 780 return NULL; 781 782 vring_init(&vq->vring, num, pages, vring_align); 783 vq->vq.callback = callback; 784 vq->vq.vdev = vdev; 785 vq->vq.name = name; 786 vq->vq.num_free = num; 787 vq->vq.index = index; 788 vq->notify = notify; 789 vq->weak_barriers = weak_barriers; 790 vq->broken = false; 791 vq->last_used_idx = 0; 792 vq->num_added = 0; 793 list_add_tail(&vq->vq.list, &vdev->vqs); 794 #ifdef DEBUG 795 vq->in_use = false; 796 vq->last_add_time_valid = false; 797 #endif 798 799 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 800 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 801 802 /* No callback? Tell other side not to bother us. */ 803 if (!callback) 804 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 805 806 /* Put everything in free lists. */ 807 vq->free_head = 0; 808 for (i = 0; i < num-1; i++) { 809 vq->vring.desc[i].next = i+1; 810 vq->data[i] = NULL; 811 } 812 vq->data[i] = NULL; 813 814 return &vq->vq; 815 } 816 EXPORT_SYMBOL_GPL(vring_new_virtqueue); 817 818 void vring_del_virtqueue(struct virtqueue *vq) 819 { 820 list_del(&vq->list); 821 kfree(to_vvq(vq)); 822 } 823 EXPORT_SYMBOL_GPL(vring_del_virtqueue); 824 825 /* Manipulates transport-specific feature bits. */ 826 void vring_transport_features(struct virtio_device *vdev) 827 { 828 unsigned int i; 829 830 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 831 switch (i) { 832 case VIRTIO_RING_F_INDIRECT_DESC: 833 break; 834 case VIRTIO_RING_F_EVENT_IDX: 835 break; 836 default: 837 /* We don't understand this bit. */ 838 clear_bit(i, vdev->features); 839 } 840 } 841 } 842 EXPORT_SYMBOL_GPL(vring_transport_features); 843 844 /** 845 * virtqueue_get_vring_size - return the size of the virtqueue's vring 846 * @vq: the struct virtqueue containing the vring of interest. 847 * 848 * Returns the size of the vring. This is mainly used for boasting to 849 * userspace. Unlike other operations, this need not be serialized. 850 */ 851 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 852 { 853 854 struct vring_virtqueue *vq = to_vvq(_vq); 855 856 return vq->vring.num; 857 } 858 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 859 860 bool virtqueue_is_broken(struct virtqueue *_vq) 861 { 862 struct vring_virtqueue *vq = to_vvq(_vq); 863 864 return vq->broken; 865 } 866 EXPORT_SYMBOL_GPL(virtqueue_is_broken); 867 868 /* 869 * This should prevent the device from being used, allowing drivers to 870 * recover. You may need to grab appropriate locks to flush. 871 */ 872 void virtio_break_device(struct virtio_device *dev) 873 { 874 struct virtqueue *_vq; 875 876 list_for_each_entry(_vq, &dev->vqs, list) { 877 struct vring_virtqueue *vq = to_vvq(_vq); 878 vq->broken = true; 879 } 880 } 881 EXPORT_SYMBOL_GPL(virtio_break_device); 882 883 MODULE_LICENSE("GPL"); 884