1 /* Virtio ring implementation. 2 * 3 * Copyright 2007 Rusty Russell IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include <linux/virtio.h> 20 #include <linux/virtio_ring.h> 21 #include <linux/virtio_config.h> 22 #include <linux/device.h> 23 #include <linux/slab.h> 24 #include <linux/module.h> 25 #include <linux/hrtimer.h> 26 27 #ifdef DEBUG 28 /* For development, we want to crash whenever the ring is screwed. */ 29 #define BAD_RING(_vq, fmt, args...) \ 30 do { \ 31 dev_err(&(_vq)->vq.vdev->dev, \ 32 "%s:"fmt, (_vq)->vq.name, ##args); \ 33 BUG(); \ 34 } while (0) 35 /* Caller is supposed to guarantee no reentry. */ 36 #define START_USE(_vq) \ 37 do { \ 38 if ((_vq)->in_use) \ 39 panic("%s:in_use = %i\n", \ 40 (_vq)->vq.name, (_vq)->in_use); \ 41 (_vq)->in_use = __LINE__; \ 42 } while (0) 43 #define END_USE(_vq) \ 44 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 45 #else 46 #define BAD_RING(_vq, fmt, args...) \ 47 do { \ 48 dev_err(&_vq->vq.vdev->dev, \ 49 "%s:"fmt, (_vq)->vq.name, ##args); \ 50 (_vq)->broken = true; \ 51 } while (0) 52 #define START_USE(vq) 53 #define END_USE(vq) 54 #endif 55 56 struct vring_virtqueue 57 { 58 struct virtqueue vq; 59 60 /* Actual memory layout for this queue */ 61 struct vring vring; 62 63 /* Can we use weak barriers? */ 64 bool weak_barriers; 65 66 /* Other side has made a mess, don't try any more. */ 67 bool broken; 68 69 /* Host supports indirect buffers */ 70 bool indirect; 71 72 /* Host publishes avail event idx */ 73 bool event; 74 75 /* Head of free buffer list. */ 76 unsigned int free_head; 77 /* Number we've added since last sync. */ 78 unsigned int num_added; 79 80 /* Last used index we've seen. */ 81 u16 last_used_idx; 82 83 /* How to notify other side. FIXME: commonalize hcalls! */ 84 void (*notify)(struct virtqueue *vq); 85 86 #ifdef DEBUG 87 /* They're supposed to lock for us. */ 88 unsigned int in_use; 89 90 /* Figure out if their kicks are too delayed. */ 91 bool last_add_time_valid; 92 ktime_t last_add_time; 93 #endif 94 95 /* Tokens for callbacks. */ 96 void *data[]; 97 }; 98 99 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 100 101 static inline struct scatterlist *sg_next_chained(struct scatterlist *sg, 102 unsigned int *count) 103 { 104 return sg_next(sg); 105 } 106 107 static inline struct scatterlist *sg_next_arr(struct scatterlist *sg, 108 unsigned int *count) 109 { 110 if (--(*count) == 0) 111 return NULL; 112 return sg + 1; 113 } 114 115 /* Set up an indirect table of descriptors and add it to the queue. */ 116 static inline int vring_add_indirect(struct vring_virtqueue *vq, 117 struct scatterlist *sgs[], 118 struct scatterlist *(*next) 119 (struct scatterlist *, unsigned int *), 120 unsigned int total_sg, 121 unsigned int total_out, 122 unsigned int total_in, 123 unsigned int out_sgs, 124 unsigned int in_sgs, 125 gfp_t gfp) 126 { 127 struct vring_desc *desc; 128 unsigned head; 129 struct scatterlist *sg; 130 int i, n; 131 132 /* 133 * We require lowmem mappings for the descriptors because 134 * otherwise virt_to_phys will give us bogus addresses in the 135 * virtqueue. 136 */ 137 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); 138 139 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); 140 if (!desc) 141 return -ENOMEM; 142 143 /* Transfer entries from the sg lists into the indirect page */ 144 i = 0; 145 for (n = 0; n < out_sgs; n++) { 146 for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { 147 desc[i].flags = VRING_DESC_F_NEXT; 148 desc[i].addr = sg_phys(sg); 149 desc[i].len = sg->length; 150 desc[i].next = i+1; 151 i++; 152 } 153 } 154 for (; n < (out_sgs + in_sgs); n++) { 155 for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { 156 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 157 desc[i].addr = sg_phys(sg); 158 desc[i].len = sg->length; 159 desc[i].next = i+1; 160 i++; 161 } 162 } 163 BUG_ON(i != total_sg); 164 165 /* Last one doesn't continue. */ 166 desc[i-1].flags &= ~VRING_DESC_F_NEXT; 167 desc[i-1].next = 0; 168 169 /* We're about to use a buffer */ 170 vq->vq.num_free--; 171 172 /* Use a single buffer which doesn't continue */ 173 head = vq->free_head; 174 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; 175 vq->vring.desc[head].addr = virt_to_phys(desc); 176 vq->vring.desc[head].len = i * sizeof(struct vring_desc); 177 178 /* Update free pointer */ 179 vq->free_head = vq->vring.desc[head].next; 180 181 return head; 182 } 183 184 static inline int virtqueue_add(struct virtqueue *_vq, 185 struct scatterlist *sgs[], 186 struct scatterlist *(*next) 187 (struct scatterlist *, unsigned int *), 188 unsigned int total_out, 189 unsigned int total_in, 190 unsigned int out_sgs, 191 unsigned int in_sgs, 192 void *data, 193 gfp_t gfp) 194 { 195 struct vring_virtqueue *vq = to_vvq(_vq); 196 struct scatterlist *sg; 197 unsigned int i, n, avail, uninitialized_var(prev), total_sg; 198 int head; 199 200 START_USE(vq); 201 202 BUG_ON(data == NULL); 203 204 #ifdef DEBUG 205 { 206 ktime_t now = ktime_get(); 207 208 /* No kick or get, with .1 second between? Warn. */ 209 if (vq->last_add_time_valid) 210 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) 211 > 100); 212 vq->last_add_time = now; 213 vq->last_add_time_valid = true; 214 } 215 #endif 216 217 total_sg = total_in + total_out; 218 219 /* If the host supports indirect descriptor tables, and we have multiple 220 * buffers, then go indirect. FIXME: tune this threshold */ 221 if (vq->indirect && total_sg > 1 && vq->vq.num_free) { 222 head = vring_add_indirect(vq, sgs, next, total_sg, total_out, 223 total_in, 224 out_sgs, in_sgs, gfp); 225 if (likely(head >= 0)) 226 goto add_head; 227 } 228 229 BUG_ON(total_sg > vq->vring.num); 230 BUG_ON(total_sg == 0); 231 232 if (vq->vq.num_free < total_sg) { 233 pr_debug("Can't add buf len %i - avail = %i\n", 234 total_sg, vq->vq.num_free); 235 /* FIXME: for historical reasons, we force a notify here if 236 * there are outgoing parts to the buffer. Presumably the 237 * host should service the ring ASAP. */ 238 if (out_sgs) 239 vq->notify(&vq->vq); 240 END_USE(vq); 241 return -ENOSPC; 242 } 243 244 /* We're about to use some buffers from the free list. */ 245 vq->vq.num_free -= total_sg; 246 247 head = i = vq->free_head; 248 for (n = 0; n < out_sgs; n++) { 249 for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { 250 vq->vring.desc[i].flags = VRING_DESC_F_NEXT; 251 vq->vring.desc[i].addr = sg_phys(sg); 252 vq->vring.desc[i].len = sg->length; 253 prev = i; 254 i = vq->vring.desc[i].next; 255 } 256 } 257 for (; n < (out_sgs + in_sgs); n++) { 258 for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { 259 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 260 vq->vring.desc[i].addr = sg_phys(sg); 261 vq->vring.desc[i].len = sg->length; 262 prev = i; 263 i = vq->vring.desc[i].next; 264 } 265 } 266 /* Last one doesn't continue. */ 267 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; 268 269 /* Update free pointer */ 270 vq->free_head = i; 271 272 add_head: 273 /* Set token. */ 274 vq->data[head] = data; 275 276 /* Put entry in available array (but don't update avail->idx until they 277 * do sync). */ 278 avail = (vq->vring.avail->idx & (vq->vring.num-1)); 279 vq->vring.avail->ring[avail] = head; 280 281 /* Descriptors and available array need to be set before we expose the 282 * new available array entries. */ 283 virtio_wmb(vq->weak_barriers); 284 vq->vring.avail->idx++; 285 vq->num_added++; 286 287 /* This is very unlikely, but theoretically possible. Kick 288 * just in case. */ 289 if (unlikely(vq->num_added == (1 << 16) - 1)) 290 virtqueue_kick(_vq); 291 292 pr_debug("Added buffer head %i to %p\n", head, vq); 293 END_USE(vq); 294 295 return 0; 296 } 297 298 /** 299 * virtqueue_add_sgs - expose buffers to other end 300 * @vq: the struct virtqueue we're talking about. 301 * @sgs: array of terminated scatterlists. 302 * @out_num: the number of scatterlists readable by other side 303 * @in_num: the number of scatterlists which are writable (after readable ones) 304 * @data: the token identifying the buffer. 305 * @gfp: how to do memory allocations (if necessary). 306 * 307 * Caller must ensure we don't call this with other virtqueue operations 308 * at the same time (except where noted). 309 * 310 * Returns zero or a negative error (ie. ENOSPC, ENOMEM). 311 */ 312 int virtqueue_add_sgs(struct virtqueue *_vq, 313 struct scatterlist *sgs[], 314 unsigned int out_sgs, 315 unsigned int in_sgs, 316 void *data, 317 gfp_t gfp) 318 { 319 unsigned int i, total_out, total_in; 320 321 /* Count them first. */ 322 for (i = total_out = total_in = 0; i < out_sgs; i++) { 323 struct scatterlist *sg; 324 for (sg = sgs[i]; sg; sg = sg_next(sg)) 325 total_out++; 326 } 327 for (; i < out_sgs + in_sgs; i++) { 328 struct scatterlist *sg; 329 for (sg = sgs[i]; sg; sg = sg_next(sg)) 330 total_in++; 331 } 332 return virtqueue_add(_vq, sgs, sg_next_chained, 333 total_out, total_in, out_sgs, in_sgs, data, gfp); 334 } 335 EXPORT_SYMBOL_GPL(virtqueue_add_sgs); 336 337 /** 338 * virtqueue_add_outbuf - expose output buffers to other end 339 * @vq: the struct virtqueue we're talking about. 340 * @sgs: array of scatterlists (need not be terminated!) 341 * @num: the number of scatterlists readable by other side 342 * @data: the token identifying the buffer. 343 * @gfp: how to do memory allocations (if necessary). 344 * 345 * Caller must ensure we don't call this with other virtqueue operations 346 * at the same time (except where noted). 347 * 348 * Returns zero or a negative error (ie. ENOSPC, ENOMEM). 349 */ 350 int virtqueue_add_outbuf(struct virtqueue *vq, 351 struct scatterlist sg[], unsigned int num, 352 void *data, 353 gfp_t gfp) 354 { 355 return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp); 356 } 357 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); 358 359 /** 360 * virtqueue_add_inbuf - expose input buffers to other end 361 * @vq: the struct virtqueue we're talking about. 362 * @sgs: array of scatterlists (need not be terminated!) 363 * @num: the number of scatterlists writable by other side 364 * @data: the token identifying the buffer. 365 * @gfp: how to do memory allocations (if necessary). 366 * 367 * Caller must ensure we don't call this with other virtqueue operations 368 * at the same time (except where noted). 369 * 370 * Returns zero or a negative error (ie. ENOSPC, ENOMEM). 371 */ 372 int virtqueue_add_inbuf(struct virtqueue *vq, 373 struct scatterlist sg[], unsigned int num, 374 void *data, 375 gfp_t gfp) 376 { 377 return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp); 378 } 379 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); 380 381 /** 382 * virtqueue_kick_prepare - first half of split virtqueue_kick call. 383 * @vq: the struct virtqueue 384 * 385 * Instead of virtqueue_kick(), you can do: 386 * if (virtqueue_kick_prepare(vq)) 387 * virtqueue_notify(vq); 388 * 389 * This is sometimes useful because the virtqueue_kick_prepare() needs 390 * to be serialized, but the actual virtqueue_notify() call does not. 391 */ 392 bool virtqueue_kick_prepare(struct virtqueue *_vq) 393 { 394 struct vring_virtqueue *vq = to_vvq(_vq); 395 u16 new, old; 396 bool needs_kick; 397 398 START_USE(vq); 399 /* We need to expose available array entries before checking avail 400 * event. */ 401 virtio_mb(vq->weak_barriers); 402 403 old = vq->vring.avail->idx - vq->num_added; 404 new = vq->vring.avail->idx; 405 vq->num_added = 0; 406 407 #ifdef DEBUG 408 if (vq->last_add_time_valid) { 409 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), 410 vq->last_add_time)) > 100); 411 } 412 vq->last_add_time_valid = false; 413 #endif 414 415 if (vq->event) { 416 needs_kick = vring_need_event(vring_avail_event(&vq->vring), 417 new, old); 418 } else { 419 needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); 420 } 421 END_USE(vq); 422 return needs_kick; 423 } 424 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); 425 426 /** 427 * virtqueue_notify - second half of split virtqueue_kick call. 428 * @vq: the struct virtqueue 429 * 430 * This does not need to be serialized. 431 */ 432 void virtqueue_notify(struct virtqueue *_vq) 433 { 434 struct vring_virtqueue *vq = to_vvq(_vq); 435 436 /* Prod other side to tell it about changes. */ 437 vq->notify(_vq); 438 } 439 EXPORT_SYMBOL_GPL(virtqueue_notify); 440 441 /** 442 * virtqueue_kick - update after add_buf 443 * @vq: the struct virtqueue 444 * 445 * After one or more virtqueue_add_* calls, invoke this to kick 446 * the other side. 447 * 448 * Caller must ensure we don't call this with other virtqueue 449 * operations at the same time (except where noted). 450 */ 451 void virtqueue_kick(struct virtqueue *vq) 452 { 453 if (virtqueue_kick_prepare(vq)) 454 virtqueue_notify(vq); 455 } 456 EXPORT_SYMBOL_GPL(virtqueue_kick); 457 458 static void detach_buf(struct vring_virtqueue *vq, unsigned int head) 459 { 460 unsigned int i; 461 462 /* Clear data ptr. */ 463 vq->data[head] = NULL; 464 465 /* Put back on free list: find end */ 466 i = head; 467 468 /* Free the indirect table */ 469 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) 470 kfree(phys_to_virt(vq->vring.desc[i].addr)); 471 472 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { 473 i = vq->vring.desc[i].next; 474 vq->vq.num_free++; 475 } 476 477 vq->vring.desc[i].next = vq->free_head; 478 vq->free_head = head; 479 /* Plus final descriptor */ 480 vq->vq.num_free++; 481 } 482 483 static inline bool more_used(const struct vring_virtqueue *vq) 484 { 485 return vq->last_used_idx != vq->vring.used->idx; 486 } 487 488 /** 489 * virtqueue_get_buf - get the next used buffer 490 * @vq: the struct virtqueue we're talking about. 491 * @len: the length written into the buffer 492 * 493 * If the driver wrote data into the buffer, @len will be set to the 494 * amount written. This means you don't need to clear the buffer 495 * beforehand to ensure there's no data leakage in the case of short 496 * writes. 497 * 498 * Caller must ensure we don't call this with other virtqueue 499 * operations at the same time (except where noted). 500 * 501 * Returns NULL if there are no used buffers, or the "data" token 502 * handed to virtqueue_add_*(). 503 */ 504 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) 505 { 506 struct vring_virtqueue *vq = to_vvq(_vq); 507 void *ret; 508 unsigned int i; 509 u16 last_used; 510 511 START_USE(vq); 512 513 if (unlikely(vq->broken)) { 514 END_USE(vq); 515 return NULL; 516 } 517 518 if (!more_used(vq)) { 519 pr_debug("No more buffers in queue\n"); 520 END_USE(vq); 521 return NULL; 522 } 523 524 /* Only get used array entries after they have been exposed by host. */ 525 virtio_rmb(vq->weak_barriers); 526 527 last_used = (vq->last_used_idx & (vq->vring.num - 1)); 528 i = vq->vring.used->ring[last_used].id; 529 *len = vq->vring.used->ring[last_used].len; 530 531 if (unlikely(i >= vq->vring.num)) { 532 BAD_RING(vq, "id %u out of range\n", i); 533 return NULL; 534 } 535 if (unlikely(!vq->data[i])) { 536 BAD_RING(vq, "id %u is not a head!\n", i); 537 return NULL; 538 } 539 540 /* detach_buf clears data, so grab it now. */ 541 ret = vq->data[i]; 542 detach_buf(vq, i); 543 vq->last_used_idx++; 544 /* If we expect an interrupt for the next entry, tell host 545 * by writing event index and flush out the write before 546 * the read in the next get_buf call. */ 547 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { 548 vring_used_event(&vq->vring) = vq->last_used_idx; 549 virtio_mb(vq->weak_barriers); 550 } 551 552 #ifdef DEBUG 553 vq->last_add_time_valid = false; 554 #endif 555 556 END_USE(vq); 557 return ret; 558 } 559 EXPORT_SYMBOL_GPL(virtqueue_get_buf); 560 561 /** 562 * virtqueue_disable_cb - disable callbacks 563 * @vq: the struct virtqueue we're talking about. 564 * 565 * Note that this is not necessarily synchronous, hence unreliable and only 566 * useful as an optimization. 567 * 568 * Unlike other operations, this need not be serialized. 569 */ 570 void virtqueue_disable_cb(struct virtqueue *_vq) 571 { 572 struct vring_virtqueue *vq = to_vvq(_vq); 573 574 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 575 } 576 EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 577 578 /** 579 * virtqueue_enable_cb - restart callbacks after disable_cb. 580 * @vq: the struct virtqueue we're talking about. 581 * 582 * This re-enables callbacks; it returns "false" if there are pending 583 * buffers in the queue, to detect a possible race between the driver 584 * checking for more work, and enabling callbacks. 585 * 586 * Caller must ensure we don't call this with other virtqueue 587 * operations at the same time (except where noted). 588 */ 589 bool virtqueue_enable_cb(struct virtqueue *_vq) 590 { 591 struct vring_virtqueue *vq = to_vvq(_vq); 592 593 START_USE(vq); 594 595 /* We optimistically turn back on interrupts, then check if there was 596 * more to do. */ 597 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 598 * either clear the flags bit or point the event index at the next 599 * entry. Always do both to keep code simple. */ 600 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 601 vring_used_event(&vq->vring) = vq->last_used_idx; 602 virtio_mb(vq->weak_barriers); 603 if (unlikely(more_used(vq))) { 604 END_USE(vq); 605 return false; 606 } 607 608 END_USE(vq); 609 return true; 610 } 611 EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 612 613 /** 614 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. 615 * @vq: the struct virtqueue we're talking about. 616 * 617 * This re-enables callbacks but hints to the other side to delay 618 * interrupts until most of the available buffers have been processed; 619 * it returns "false" if there are many pending buffers in the queue, 620 * to detect a possible race between the driver checking for more work, 621 * and enabling callbacks. 622 * 623 * Caller must ensure we don't call this with other virtqueue 624 * operations at the same time (except where noted). 625 */ 626 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) 627 { 628 struct vring_virtqueue *vq = to_vvq(_vq); 629 u16 bufs; 630 631 START_USE(vq); 632 633 /* We optimistically turn back on interrupts, then check if there was 634 * more to do. */ 635 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 636 * either clear the flags bit or point the event index at the next 637 * entry. Always do both to keep code simple. */ 638 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 639 /* TODO: tune this threshold */ 640 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; 641 vring_used_event(&vq->vring) = vq->last_used_idx + bufs; 642 virtio_mb(vq->weak_barriers); 643 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { 644 END_USE(vq); 645 return false; 646 } 647 648 END_USE(vq); 649 return true; 650 } 651 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); 652 653 /** 654 * virtqueue_detach_unused_buf - detach first unused buffer 655 * @vq: the struct virtqueue we're talking about. 656 * 657 * Returns NULL or the "data" token handed to virtqueue_add_*(). 658 * This is not valid on an active queue; it is useful only for device 659 * shutdown. 660 */ 661 void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 662 { 663 struct vring_virtqueue *vq = to_vvq(_vq); 664 unsigned int i; 665 void *buf; 666 667 START_USE(vq); 668 669 for (i = 0; i < vq->vring.num; i++) { 670 if (!vq->data[i]) 671 continue; 672 /* detach_buf clears data, so grab it now. */ 673 buf = vq->data[i]; 674 detach_buf(vq, i); 675 vq->vring.avail->idx--; 676 END_USE(vq); 677 return buf; 678 } 679 /* That should have freed everything. */ 680 BUG_ON(vq->vq.num_free != vq->vring.num); 681 682 END_USE(vq); 683 return NULL; 684 } 685 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); 686 687 irqreturn_t vring_interrupt(int irq, void *_vq) 688 { 689 struct vring_virtqueue *vq = to_vvq(_vq); 690 691 if (!more_used(vq)) { 692 pr_debug("virtqueue interrupt with no work for %p\n", vq); 693 return IRQ_NONE; 694 } 695 696 if (unlikely(vq->broken)) 697 return IRQ_HANDLED; 698 699 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); 700 if (vq->vq.callback) 701 vq->vq.callback(&vq->vq); 702 703 return IRQ_HANDLED; 704 } 705 EXPORT_SYMBOL_GPL(vring_interrupt); 706 707 struct virtqueue *vring_new_virtqueue(unsigned int index, 708 unsigned int num, 709 unsigned int vring_align, 710 struct virtio_device *vdev, 711 bool weak_barriers, 712 void *pages, 713 void (*notify)(struct virtqueue *), 714 void (*callback)(struct virtqueue *), 715 const char *name) 716 { 717 struct vring_virtqueue *vq; 718 unsigned int i; 719 720 /* We assume num is a power of 2. */ 721 if (num & (num - 1)) { 722 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); 723 return NULL; 724 } 725 726 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); 727 if (!vq) 728 return NULL; 729 730 vring_init(&vq->vring, num, pages, vring_align); 731 vq->vq.callback = callback; 732 vq->vq.vdev = vdev; 733 vq->vq.name = name; 734 vq->vq.num_free = num; 735 vq->vq.index = index; 736 vq->notify = notify; 737 vq->weak_barriers = weak_barriers; 738 vq->broken = false; 739 vq->last_used_idx = 0; 740 vq->num_added = 0; 741 list_add_tail(&vq->vq.list, &vdev->vqs); 742 #ifdef DEBUG 743 vq->in_use = false; 744 vq->last_add_time_valid = false; 745 #endif 746 747 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 748 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 749 750 /* No callback? Tell other side not to bother us. */ 751 if (!callback) 752 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 753 754 /* Put everything in free lists. */ 755 vq->free_head = 0; 756 for (i = 0; i < num-1; i++) { 757 vq->vring.desc[i].next = i+1; 758 vq->data[i] = NULL; 759 } 760 vq->data[i] = NULL; 761 762 return &vq->vq; 763 } 764 EXPORT_SYMBOL_GPL(vring_new_virtqueue); 765 766 void vring_del_virtqueue(struct virtqueue *vq) 767 { 768 list_del(&vq->list); 769 kfree(to_vvq(vq)); 770 } 771 EXPORT_SYMBOL_GPL(vring_del_virtqueue); 772 773 /* Manipulates transport-specific feature bits. */ 774 void vring_transport_features(struct virtio_device *vdev) 775 { 776 unsigned int i; 777 778 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 779 switch (i) { 780 case VIRTIO_RING_F_INDIRECT_DESC: 781 break; 782 case VIRTIO_RING_F_EVENT_IDX: 783 break; 784 default: 785 /* We don't understand this bit. */ 786 clear_bit(i, vdev->features); 787 } 788 } 789 } 790 EXPORT_SYMBOL_GPL(vring_transport_features); 791 792 /** 793 * virtqueue_get_vring_size - return the size of the virtqueue's vring 794 * @vq: the struct virtqueue containing the vring of interest. 795 * 796 * Returns the size of the vring. This is mainly used for boasting to 797 * userspace. Unlike other operations, this need not be serialized. 798 */ 799 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) 800 { 801 802 struct vring_virtqueue *vq = to_vvq(_vq); 803 804 return vq->vring.num; 805 } 806 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); 807 808 MODULE_LICENSE("GPL"); 809