1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Authors: 6 * Dave Airlie <airlied@redhat.com> 7 * Gerd Hoffmann <kraxel@redhat.com> 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 26 * OTHER DEALINGS IN THE SOFTWARE. 27 */ 28 29 #include <linux/dma-mapping.h> 30 #include <linux/virtio.h> 31 #include <linux/virtio_config.h> 32 #include <linux/virtio_ring.h> 33 34 #include "virtgpu_drv.h" 35 #include "virtgpu_trace.h" 36 37 #define MAX_INLINE_CMD_SIZE 96 38 #define MAX_INLINE_RESP_SIZE 24 39 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ 40 + MAX_INLINE_CMD_SIZE \ 41 + MAX_INLINE_RESP_SIZE) 42 43 static void convert_to_hw_box(struct virtio_gpu_box *dst, 44 const struct drm_virtgpu_3d_box *src) 45 { 46 dst->x = cpu_to_le32(src->x); 47 dst->y = cpu_to_le32(src->y); 48 dst->z = cpu_to_le32(src->z); 49 dst->w = cpu_to_le32(src->w); 50 dst->h = cpu_to_le32(src->h); 51 dst->d = cpu_to_le32(src->d); 52 } 53 54 void virtio_gpu_ctrl_ack(struct virtqueue *vq) 55 { 56 struct drm_device *dev = vq->vdev->priv; 57 struct virtio_gpu_device *vgdev = dev->dev_private; 58 59 schedule_work(&vgdev->ctrlq.dequeue_work); 60 } 61 62 void virtio_gpu_cursor_ack(struct virtqueue *vq) 63 { 64 struct drm_device *dev = vq->vdev->priv; 65 struct virtio_gpu_device *vgdev = dev->dev_private; 66 67 schedule_work(&vgdev->cursorq.dequeue_work); 68 } 69 70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev) 71 { 72 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs", 73 VBUFFER_SIZE, 74 __alignof__(struct virtio_gpu_vbuffer), 75 0, NULL); 76 if (!vgdev->vbufs) 77 return -ENOMEM; 78 return 0; 79 } 80 81 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) 82 { 83 kmem_cache_destroy(vgdev->vbufs); 84 vgdev->vbufs = NULL; 85 } 86 87 static struct virtio_gpu_vbuffer* 88 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, 89 int size, int resp_size, void *resp_buf, 90 virtio_gpu_resp_cb resp_cb) 91 { 92 struct virtio_gpu_vbuffer *vbuf; 93 94 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL); 95 if (!vbuf) 96 return ERR_PTR(-ENOMEM); 97 98 BUG_ON(size > MAX_INLINE_CMD_SIZE || 99 size < sizeof(struct virtio_gpu_ctrl_hdr)); 100 vbuf->buf = (void *)vbuf + sizeof(*vbuf); 101 vbuf->size = size; 102 103 vbuf->resp_cb = resp_cb; 104 vbuf->resp_size = resp_size; 105 if (resp_size <= MAX_INLINE_RESP_SIZE) 106 vbuf->resp_buf = (void *)vbuf->buf + size; 107 else 108 vbuf->resp_buf = resp_buf; 109 BUG_ON(!vbuf->resp_buf); 110 return vbuf; 111 } 112 113 static struct virtio_gpu_ctrl_hdr * 114 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf) 115 { 116 /* this assumes a vbuf contains a command that starts with a 117 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor 118 * virtqueues. 119 */ 120 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf; 121 } 122 123 static struct virtio_gpu_update_cursor* 124 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, 125 struct virtio_gpu_vbuffer **vbuffer_p) 126 { 127 struct virtio_gpu_vbuffer *vbuf; 128 129 vbuf = virtio_gpu_get_vbuf 130 (vgdev, sizeof(struct virtio_gpu_update_cursor), 131 0, NULL, NULL); 132 if (IS_ERR(vbuf)) { 133 *vbuffer_p = NULL; 134 return ERR_CAST(vbuf); 135 } 136 *vbuffer_p = vbuf; 137 return (struct virtio_gpu_update_cursor *)vbuf->buf; 138 } 139 140 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, 141 virtio_gpu_resp_cb cb, 142 struct virtio_gpu_vbuffer **vbuffer_p, 143 int cmd_size, int resp_size, 144 void *resp_buf) 145 { 146 struct virtio_gpu_vbuffer *vbuf; 147 148 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size, 149 resp_size, resp_buf, cb); 150 if (IS_ERR(vbuf)) { 151 *vbuffer_p = NULL; 152 return ERR_CAST(vbuf); 153 } 154 *vbuffer_p = vbuf; 155 return (struct virtio_gpu_command *)vbuf->buf; 156 } 157 158 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, 159 struct virtio_gpu_vbuffer **vbuffer_p, 160 int size) 161 { 162 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size, 163 sizeof(struct virtio_gpu_ctrl_hdr), 164 NULL); 165 } 166 167 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev, 168 struct virtio_gpu_vbuffer **vbuffer_p, 169 int size, 170 virtio_gpu_resp_cb cb) 171 { 172 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size, 173 sizeof(struct virtio_gpu_ctrl_hdr), 174 NULL); 175 } 176 177 static void free_vbuf(struct virtio_gpu_device *vgdev, 178 struct virtio_gpu_vbuffer *vbuf) 179 { 180 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) 181 kfree(vbuf->resp_buf); 182 kvfree(vbuf->data_buf); 183 kmem_cache_free(vgdev->vbufs, vbuf); 184 } 185 186 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) 187 { 188 struct virtio_gpu_vbuffer *vbuf; 189 unsigned int len; 190 int freed = 0; 191 192 while ((vbuf = virtqueue_get_buf(vq, &len))) { 193 list_add_tail(&vbuf->list, reclaim_list); 194 freed++; 195 } 196 if (freed == 0) 197 DRM_DEBUG("Huh? zero vbufs reclaimed"); 198 } 199 200 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) 201 { 202 struct virtio_gpu_device *vgdev = 203 container_of(work, struct virtio_gpu_device, 204 ctrlq.dequeue_work); 205 struct list_head reclaim_list; 206 struct virtio_gpu_vbuffer *entry, *tmp; 207 struct virtio_gpu_ctrl_hdr *resp; 208 u64 fence_id = 0; 209 210 INIT_LIST_HEAD(&reclaim_list); 211 spin_lock(&vgdev->ctrlq.qlock); 212 do { 213 virtqueue_disable_cb(vgdev->ctrlq.vq); 214 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list); 215 216 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); 217 spin_unlock(&vgdev->ctrlq.qlock); 218 219 list_for_each_entry(entry, &reclaim_list, list) { 220 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; 221 222 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); 223 224 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { 225 if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) { 226 struct virtio_gpu_ctrl_hdr *cmd; 227 cmd = virtio_gpu_vbuf_ctrl_hdr(entry); 228 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n", 229 le32_to_cpu(resp->type), 230 le32_to_cpu(cmd->type)); 231 } else 232 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); 233 } 234 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { 235 u64 f = le64_to_cpu(resp->fence_id); 236 237 if (fence_id > f) { 238 DRM_ERROR("%s: Oops: fence %llx -> %llx\n", 239 __func__, fence_id, f); 240 } else { 241 fence_id = f; 242 } 243 } 244 if (entry->resp_cb) 245 entry->resp_cb(vgdev, entry); 246 } 247 wake_up(&vgdev->ctrlq.ack_queue); 248 249 if (fence_id) 250 virtio_gpu_fence_event_process(vgdev, fence_id); 251 252 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { 253 if (entry->objs) 254 virtio_gpu_array_put_free_delayed(vgdev, entry->objs); 255 list_del(&entry->list); 256 free_vbuf(vgdev, entry); 257 } 258 } 259 260 void virtio_gpu_dequeue_cursor_func(struct work_struct *work) 261 { 262 struct virtio_gpu_device *vgdev = 263 container_of(work, struct virtio_gpu_device, 264 cursorq.dequeue_work); 265 struct list_head reclaim_list; 266 struct virtio_gpu_vbuffer *entry, *tmp; 267 268 INIT_LIST_HEAD(&reclaim_list); 269 spin_lock(&vgdev->cursorq.qlock); 270 do { 271 virtqueue_disable_cb(vgdev->cursorq.vq); 272 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list); 273 } while (!virtqueue_enable_cb(vgdev->cursorq.vq)); 274 spin_unlock(&vgdev->cursorq.qlock); 275 276 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { 277 list_del(&entry->list); 278 free_vbuf(vgdev, entry); 279 } 280 wake_up(&vgdev->cursorq.ack_queue); 281 } 282 283 /* Create sg_table from a vmalloc'd buffer. */ 284 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) 285 { 286 int ret, s, i; 287 struct sg_table *sgt; 288 struct scatterlist *sg; 289 struct page *pg; 290 291 if (WARN_ON(!PAGE_ALIGNED(data))) 292 return NULL; 293 294 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); 295 if (!sgt) 296 return NULL; 297 298 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE); 299 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL); 300 if (ret) { 301 kfree(sgt); 302 return NULL; 303 } 304 305 for_each_sg(sgt->sgl, sg, *sg_ents, i) { 306 pg = vmalloc_to_page(data); 307 if (!pg) { 308 sg_free_table(sgt); 309 kfree(sgt); 310 return NULL; 311 } 312 313 s = min_t(int, PAGE_SIZE, size); 314 sg_set_page(sg, pg, s, 0); 315 316 size -= s; 317 data += s; 318 } 319 320 return sgt; 321 } 322 323 static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, 324 struct virtio_gpu_vbuffer *vbuf, 325 struct virtio_gpu_fence *fence, 326 int elemcnt, 327 struct scatterlist **sgs, 328 int outcnt, 329 int incnt) 330 { 331 struct virtqueue *vq = vgdev->ctrlq.vq; 332 int ret, idx; 333 334 if (!drm_dev_enter(vgdev->ddev, &idx)) { 335 if (fence && vbuf->objs) 336 virtio_gpu_array_unlock_resv(vbuf->objs); 337 free_vbuf(vgdev, vbuf); 338 return; 339 } 340 341 if (vgdev->has_indirect) 342 elemcnt = 1; 343 344 again: 345 spin_lock(&vgdev->ctrlq.qlock); 346 347 if (vq->num_free < elemcnt) { 348 spin_unlock(&vgdev->ctrlq.qlock); 349 virtio_gpu_notify(vgdev); 350 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt); 351 goto again; 352 } 353 354 /* now that the position of the vbuf in the virtqueue is known, we can 355 * finally set the fence id 356 */ 357 if (fence) { 358 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf), 359 fence); 360 if (vbuf->objs) { 361 virtio_gpu_array_add_fence(vbuf->objs, &fence->f); 362 virtio_gpu_array_unlock_resv(vbuf->objs); 363 } 364 } 365 366 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); 367 WARN_ON(ret); 368 369 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf)); 370 371 atomic_inc(&vgdev->pending_commands); 372 373 spin_unlock(&vgdev->ctrlq.qlock); 374 375 drm_dev_exit(idx); 376 } 377 378 static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, 379 struct virtio_gpu_vbuffer *vbuf, 380 struct virtio_gpu_fence *fence) 381 { 382 struct scatterlist *sgs[3], vcmd, vout, vresp; 383 struct sg_table *sgt = NULL; 384 int elemcnt = 0, outcnt = 0, incnt = 0; 385 386 /* set up vcmd */ 387 sg_init_one(&vcmd, vbuf->buf, vbuf->size); 388 elemcnt++; 389 sgs[outcnt] = &vcmd; 390 outcnt++; 391 392 /* set up vout */ 393 if (vbuf->data_size) { 394 if (is_vmalloc_addr(vbuf->data_buf)) { 395 int sg_ents; 396 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, 397 &sg_ents); 398 if (!sgt) { 399 if (fence && vbuf->objs) 400 virtio_gpu_array_unlock_resv(vbuf->objs); 401 return; 402 } 403 404 elemcnt += sg_ents; 405 sgs[outcnt] = sgt->sgl; 406 } else { 407 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); 408 elemcnt++; 409 sgs[outcnt] = &vout; 410 } 411 outcnt++; 412 } 413 414 /* set up vresp */ 415 if (vbuf->resp_size) { 416 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); 417 elemcnt++; 418 sgs[outcnt + incnt] = &vresp; 419 incnt++; 420 } 421 422 virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt, 423 incnt); 424 425 if (sgt) { 426 sg_free_table(sgt); 427 kfree(sgt); 428 } 429 } 430 431 void virtio_gpu_notify(struct virtio_gpu_device *vgdev) 432 { 433 bool notify; 434 435 if (!atomic_read(&vgdev->pending_commands)) 436 return; 437 438 spin_lock(&vgdev->ctrlq.qlock); 439 atomic_set(&vgdev->pending_commands, 0); 440 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq); 441 spin_unlock(&vgdev->ctrlq.qlock); 442 443 if (notify) 444 virtqueue_notify(vgdev->ctrlq.vq); 445 } 446 447 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, 448 struct virtio_gpu_vbuffer *vbuf) 449 { 450 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL); 451 } 452 453 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, 454 struct virtio_gpu_vbuffer *vbuf) 455 { 456 struct virtqueue *vq = vgdev->cursorq.vq; 457 struct scatterlist *sgs[1], ccmd; 458 int idx, ret, outcnt; 459 bool notify; 460 461 if (!drm_dev_enter(vgdev->ddev, &idx)) { 462 free_vbuf(vgdev, vbuf); 463 return; 464 } 465 466 sg_init_one(&ccmd, vbuf->buf, vbuf->size); 467 sgs[0] = &ccmd; 468 outcnt = 1; 469 470 spin_lock(&vgdev->cursorq.qlock); 471 retry: 472 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); 473 if (ret == -ENOSPC) { 474 spin_unlock(&vgdev->cursorq.qlock); 475 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); 476 spin_lock(&vgdev->cursorq.qlock); 477 goto retry; 478 } else { 479 trace_virtio_gpu_cmd_queue(vq, 480 virtio_gpu_vbuf_ctrl_hdr(vbuf)); 481 482 notify = virtqueue_kick_prepare(vq); 483 } 484 485 spin_unlock(&vgdev->cursorq.qlock); 486 487 if (notify) 488 virtqueue_notify(vq); 489 490 drm_dev_exit(idx); 491 } 492 493 /* just create gem objects for userspace and long lived objects, 494 * just use dma_alloced pages for the queue objects? 495 */ 496 497 /* create a basic resource */ 498 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, 499 struct virtio_gpu_object *bo, 500 struct virtio_gpu_object_params *params, 501 struct virtio_gpu_object_array *objs, 502 struct virtio_gpu_fence *fence) 503 { 504 struct virtio_gpu_resource_create_2d *cmd_p; 505 struct virtio_gpu_vbuffer *vbuf; 506 507 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 508 memset(cmd_p, 0, sizeof(*cmd_p)); 509 vbuf->objs = objs; 510 511 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); 512 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 513 cmd_p->format = cpu_to_le32(params->format); 514 cmd_p->width = cpu_to_le32(params->width); 515 cmd_p->height = cpu_to_le32(params->height); 516 517 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); 518 bo->created = true; 519 } 520 521 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev, 522 struct virtio_gpu_vbuffer *vbuf) 523 { 524 struct virtio_gpu_object *bo; 525 526 bo = vbuf->resp_cb_data; 527 vbuf->resp_cb_data = NULL; 528 529 virtio_gpu_cleanup_object(bo); 530 } 531 532 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, 533 struct virtio_gpu_object *bo) 534 { 535 struct virtio_gpu_resource_unref *cmd_p; 536 struct virtio_gpu_vbuffer *vbuf; 537 538 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p), 539 virtio_gpu_cmd_unref_cb); 540 memset(cmd_p, 0, sizeof(*cmd_p)); 541 542 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); 543 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 544 545 vbuf->resp_cb_data = bo; 546 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 547 } 548 549 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, 550 uint32_t scanout_id, uint32_t resource_id, 551 uint32_t width, uint32_t height, 552 uint32_t x, uint32_t y) 553 { 554 struct virtio_gpu_set_scanout *cmd_p; 555 struct virtio_gpu_vbuffer *vbuf; 556 557 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 558 memset(cmd_p, 0, sizeof(*cmd_p)); 559 560 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT); 561 cmd_p->resource_id = cpu_to_le32(resource_id); 562 cmd_p->scanout_id = cpu_to_le32(scanout_id); 563 cmd_p->r.width = cpu_to_le32(width); 564 cmd_p->r.height = cpu_to_le32(height); 565 cmd_p->r.x = cpu_to_le32(x); 566 cmd_p->r.y = cpu_to_le32(y); 567 568 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 569 } 570 571 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, 572 uint32_t resource_id, 573 uint32_t x, uint32_t y, 574 uint32_t width, uint32_t height) 575 { 576 struct virtio_gpu_resource_flush *cmd_p; 577 struct virtio_gpu_vbuffer *vbuf; 578 579 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 580 memset(cmd_p, 0, sizeof(*cmd_p)); 581 582 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); 583 cmd_p->resource_id = cpu_to_le32(resource_id); 584 cmd_p->r.width = cpu_to_le32(width); 585 cmd_p->r.height = cpu_to_le32(height); 586 cmd_p->r.x = cpu_to_le32(x); 587 cmd_p->r.y = cpu_to_le32(y); 588 589 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 590 } 591 592 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, 593 uint64_t offset, 594 uint32_t width, uint32_t height, 595 uint32_t x, uint32_t y, 596 struct virtio_gpu_object_array *objs, 597 struct virtio_gpu_fence *fence) 598 { 599 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); 600 struct virtio_gpu_transfer_to_host_2d *cmd_p; 601 struct virtio_gpu_vbuffer *vbuf; 602 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 603 604 if (use_dma_api) 605 dma_sync_sg_for_device(vgdev->vdev->dev.parent, 606 bo->pages->sgl, bo->pages->nents, 607 DMA_TO_DEVICE); 608 609 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 610 memset(cmd_p, 0, sizeof(*cmd_p)); 611 vbuf->objs = objs; 612 613 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); 614 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 615 cmd_p->offset = cpu_to_le64(offset); 616 cmd_p->r.width = cpu_to_le32(width); 617 cmd_p->r.height = cpu_to_le32(height); 618 cmd_p->r.x = cpu_to_le32(x); 619 cmd_p->r.y = cpu_to_le32(y); 620 621 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); 622 } 623 624 static void 625 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, 626 uint32_t resource_id, 627 struct virtio_gpu_mem_entry *ents, 628 uint32_t nents, 629 struct virtio_gpu_fence *fence) 630 { 631 struct virtio_gpu_resource_attach_backing *cmd_p; 632 struct virtio_gpu_vbuffer *vbuf; 633 634 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 635 memset(cmd_p, 0, sizeof(*cmd_p)); 636 637 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); 638 cmd_p->resource_id = cpu_to_le32(resource_id); 639 cmd_p->nr_entries = cpu_to_le32(nents); 640 641 vbuf->data_buf = ents; 642 vbuf->data_size = sizeof(*ents) * nents; 643 644 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); 645 } 646 647 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, 648 struct virtio_gpu_vbuffer *vbuf) 649 { 650 struct virtio_gpu_resp_display_info *resp = 651 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf; 652 int i; 653 654 spin_lock(&vgdev->display_info_lock); 655 for (i = 0; i < vgdev->num_scanouts; i++) { 656 vgdev->outputs[i].info = resp->pmodes[i]; 657 if (resp->pmodes[i].enabled) { 658 DRM_DEBUG("output %d: %dx%d+%d+%d", i, 659 le32_to_cpu(resp->pmodes[i].r.width), 660 le32_to_cpu(resp->pmodes[i].r.height), 661 le32_to_cpu(resp->pmodes[i].r.x), 662 le32_to_cpu(resp->pmodes[i].r.y)); 663 } else { 664 DRM_DEBUG("output %d: disabled", i); 665 } 666 } 667 668 vgdev->display_info_pending = false; 669 spin_unlock(&vgdev->display_info_lock); 670 wake_up(&vgdev->resp_wq); 671 672 if (!drm_helper_hpd_irq_event(vgdev->ddev)) 673 drm_kms_helper_hotplug_event(vgdev->ddev); 674 } 675 676 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, 677 struct virtio_gpu_vbuffer *vbuf) 678 { 679 struct virtio_gpu_get_capset_info *cmd = 680 (struct virtio_gpu_get_capset_info *)vbuf->buf; 681 struct virtio_gpu_resp_capset_info *resp = 682 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf; 683 int i = le32_to_cpu(cmd->capset_index); 684 685 spin_lock(&vgdev->display_info_lock); 686 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); 687 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); 688 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); 689 spin_unlock(&vgdev->display_info_lock); 690 wake_up(&vgdev->resp_wq); 691 } 692 693 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, 694 struct virtio_gpu_vbuffer *vbuf) 695 { 696 struct virtio_gpu_get_capset *cmd = 697 (struct virtio_gpu_get_capset *)vbuf->buf; 698 struct virtio_gpu_resp_capset *resp = 699 (struct virtio_gpu_resp_capset *)vbuf->resp_buf; 700 struct virtio_gpu_drv_cap_cache *cache_ent; 701 702 spin_lock(&vgdev->display_info_lock); 703 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { 704 if (cache_ent->version == le32_to_cpu(cmd->capset_version) && 705 cache_ent->id == le32_to_cpu(cmd->capset_id)) { 706 memcpy(cache_ent->caps_cache, resp->capset_data, 707 cache_ent->size); 708 /* Copy must occur before is_valid is signalled. */ 709 smp_wmb(); 710 atomic_set(&cache_ent->is_valid, 1); 711 break; 712 } 713 } 714 spin_unlock(&vgdev->display_info_lock); 715 wake_up_all(&vgdev->resp_wq); 716 } 717 718 static int virtio_get_edid_block(void *data, u8 *buf, 719 unsigned int block, size_t len) 720 { 721 struct virtio_gpu_resp_edid *resp = data; 722 size_t start = block * EDID_LENGTH; 723 724 if (start + len > le32_to_cpu(resp->size)) 725 return -1; 726 memcpy(buf, resp->edid + start, len); 727 return 0; 728 } 729 730 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, 731 struct virtio_gpu_vbuffer *vbuf) 732 { 733 struct virtio_gpu_cmd_get_edid *cmd = 734 (struct virtio_gpu_cmd_get_edid *)vbuf->buf; 735 struct virtio_gpu_resp_edid *resp = 736 (struct virtio_gpu_resp_edid *)vbuf->resp_buf; 737 uint32_t scanout = le32_to_cpu(cmd->scanout); 738 struct virtio_gpu_output *output; 739 struct edid *new_edid, *old_edid; 740 741 if (scanout >= vgdev->num_scanouts) 742 return; 743 output = vgdev->outputs + scanout; 744 745 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); 746 drm_connector_update_edid_property(&output->conn, new_edid); 747 748 spin_lock(&vgdev->display_info_lock); 749 old_edid = output->edid; 750 output->edid = new_edid; 751 spin_unlock(&vgdev->display_info_lock); 752 753 kfree(old_edid); 754 wake_up(&vgdev->resp_wq); 755 } 756 757 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) 758 { 759 struct virtio_gpu_ctrl_hdr *cmd_p; 760 struct virtio_gpu_vbuffer *vbuf; 761 void *resp_buf; 762 763 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info), 764 GFP_KERNEL); 765 if (!resp_buf) 766 return -ENOMEM; 767 768 cmd_p = virtio_gpu_alloc_cmd_resp 769 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf, 770 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info), 771 resp_buf); 772 memset(cmd_p, 0, sizeof(*cmd_p)); 773 774 vgdev->display_info_pending = true; 775 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); 776 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 777 return 0; 778 } 779 780 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx) 781 { 782 struct virtio_gpu_get_capset_info *cmd_p; 783 struct virtio_gpu_vbuffer *vbuf; 784 void *resp_buf; 785 786 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info), 787 GFP_KERNEL); 788 if (!resp_buf) 789 return -ENOMEM; 790 791 cmd_p = virtio_gpu_alloc_cmd_resp 792 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf, 793 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info), 794 resp_buf); 795 memset(cmd_p, 0, sizeof(*cmd_p)); 796 797 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO); 798 cmd_p->capset_index = cpu_to_le32(idx); 799 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 800 return 0; 801 } 802 803 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, 804 int idx, int version, 805 struct virtio_gpu_drv_cap_cache **cache_p) 806 { 807 struct virtio_gpu_get_capset *cmd_p; 808 struct virtio_gpu_vbuffer *vbuf; 809 int max_size; 810 struct virtio_gpu_drv_cap_cache *cache_ent; 811 struct virtio_gpu_drv_cap_cache *search_ent; 812 void *resp_buf; 813 814 *cache_p = NULL; 815 816 if (idx >= vgdev->num_capsets) 817 return -EINVAL; 818 819 if (version > vgdev->capsets[idx].max_version) 820 return -EINVAL; 821 822 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL); 823 if (!cache_ent) 824 return -ENOMEM; 825 826 max_size = vgdev->capsets[idx].max_size; 827 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL); 828 if (!cache_ent->caps_cache) { 829 kfree(cache_ent); 830 return -ENOMEM; 831 } 832 833 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size, 834 GFP_KERNEL); 835 if (!resp_buf) { 836 kfree(cache_ent->caps_cache); 837 kfree(cache_ent); 838 return -ENOMEM; 839 } 840 841 cache_ent->version = version; 842 cache_ent->id = vgdev->capsets[idx].id; 843 atomic_set(&cache_ent->is_valid, 0); 844 cache_ent->size = max_size; 845 spin_lock(&vgdev->display_info_lock); 846 /* Search while under lock in case it was added by another task. */ 847 list_for_each_entry(search_ent, &vgdev->cap_cache, head) { 848 if (search_ent->id == vgdev->capsets[idx].id && 849 search_ent->version == version) { 850 *cache_p = search_ent; 851 break; 852 } 853 } 854 if (!*cache_p) 855 list_add_tail(&cache_ent->head, &vgdev->cap_cache); 856 spin_unlock(&vgdev->display_info_lock); 857 858 if (*cache_p) { 859 /* Entry was found, so free everything that was just created. */ 860 kfree(resp_buf); 861 kfree(cache_ent->caps_cache); 862 kfree(cache_ent); 863 return 0; 864 } 865 866 cmd_p = virtio_gpu_alloc_cmd_resp 867 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p), 868 sizeof(struct virtio_gpu_resp_capset) + max_size, 869 resp_buf); 870 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET); 871 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id); 872 cmd_p->capset_version = cpu_to_le32(version); 873 *cache_p = cache_ent; 874 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 875 876 return 0; 877 } 878 879 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev) 880 { 881 struct virtio_gpu_cmd_get_edid *cmd_p; 882 struct virtio_gpu_vbuffer *vbuf; 883 void *resp_buf; 884 int scanout; 885 886 if (WARN_ON(!vgdev->has_edid)) 887 return -EINVAL; 888 889 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) { 890 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid), 891 GFP_KERNEL); 892 if (!resp_buf) 893 return -ENOMEM; 894 895 cmd_p = virtio_gpu_alloc_cmd_resp 896 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf, 897 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid), 898 resp_buf); 899 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID); 900 cmd_p->scanout = cpu_to_le32(scanout); 901 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 902 } 903 904 return 0; 905 } 906 907 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, 908 uint32_t nlen, const char *name) 909 { 910 struct virtio_gpu_ctx_create *cmd_p; 911 struct virtio_gpu_vbuffer *vbuf; 912 913 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 914 memset(cmd_p, 0, sizeof(*cmd_p)); 915 916 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE); 917 cmd_p->hdr.ctx_id = cpu_to_le32(id); 918 cmd_p->nlen = cpu_to_le32(nlen); 919 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1); 920 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0; 921 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 922 } 923 924 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, 925 uint32_t id) 926 { 927 struct virtio_gpu_ctx_destroy *cmd_p; 928 struct virtio_gpu_vbuffer *vbuf; 929 930 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 931 memset(cmd_p, 0, sizeof(*cmd_p)); 932 933 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY); 934 cmd_p->hdr.ctx_id = cpu_to_le32(id); 935 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 936 } 937 938 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, 939 uint32_t ctx_id, 940 struct virtio_gpu_object_array *objs) 941 { 942 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); 943 struct virtio_gpu_ctx_resource *cmd_p; 944 struct virtio_gpu_vbuffer *vbuf; 945 946 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 947 memset(cmd_p, 0, sizeof(*cmd_p)); 948 vbuf->objs = objs; 949 950 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); 951 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 952 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 953 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 954 } 955 956 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, 957 uint32_t ctx_id, 958 struct virtio_gpu_object_array *objs) 959 { 960 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); 961 struct virtio_gpu_ctx_resource *cmd_p; 962 struct virtio_gpu_vbuffer *vbuf; 963 964 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 965 memset(cmd_p, 0, sizeof(*cmd_p)); 966 vbuf->objs = objs; 967 968 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); 969 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 970 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 971 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 972 } 973 974 void 975 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, 976 struct virtio_gpu_object *bo, 977 struct virtio_gpu_object_params *params, 978 struct virtio_gpu_object_array *objs, 979 struct virtio_gpu_fence *fence) 980 { 981 struct virtio_gpu_resource_create_3d *cmd_p; 982 struct virtio_gpu_vbuffer *vbuf; 983 984 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 985 memset(cmd_p, 0, sizeof(*cmd_p)); 986 vbuf->objs = objs; 987 988 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); 989 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 990 cmd_p->format = cpu_to_le32(params->format); 991 cmd_p->width = cpu_to_le32(params->width); 992 cmd_p->height = cpu_to_le32(params->height); 993 994 cmd_p->target = cpu_to_le32(params->target); 995 cmd_p->bind = cpu_to_le32(params->bind); 996 cmd_p->depth = cpu_to_le32(params->depth); 997 cmd_p->array_size = cpu_to_le32(params->array_size); 998 cmd_p->last_level = cpu_to_le32(params->last_level); 999 cmd_p->nr_samples = cpu_to_le32(params->nr_samples); 1000 cmd_p->flags = cpu_to_le32(params->flags); 1001 1002 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); 1003 1004 bo->created = true; 1005 } 1006 1007 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, 1008 uint32_t ctx_id, 1009 uint64_t offset, uint32_t level, 1010 struct drm_virtgpu_3d_box *box, 1011 struct virtio_gpu_object_array *objs, 1012 struct virtio_gpu_fence *fence) 1013 { 1014 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); 1015 struct virtio_gpu_transfer_host_3d *cmd_p; 1016 struct virtio_gpu_vbuffer *vbuf; 1017 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 1018 1019 if (use_dma_api) 1020 dma_sync_sg_for_device(vgdev->vdev->dev.parent, 1021 bo->pages->sgl, bo->pages->nents, 1022 DMA_TO_DEVICE); 1023 1024 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 1025 memset(cmd_p, 0, sizeof(*cmd_p)); 1026 1027 vbuf->objs = objs; 1028 1029 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); 1030 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 1031 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 1032 convert_to_hw_box(&cmd_p->box, box); 1033 cmd_p->offset = cpu_to_le64(offset); 1034 cmd_p->level = cpu_to_le32(level); 1035 1036 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); 1037 } 1038 1039 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, 1040 uint32_t ctx_id, 1041 uint64_t offset, uint32_t level, 1042 struct drm_virtgpu_3d_box *box, 1043 struct virtio_gpu_object_array *objs, 1044 struct virtio_gpu_fence *fence) 1045 { 1046 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); 1047 struct virtio_gpu_transfer_host_3d *cmd_p; 1048 struct virtio_gpu_vbuffer *vbuf; 1049 1050 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 1051 memset(cmd_p, 0, sizeof(*cmd_p)); 1052 1053 vbuf->objs = objs; 1054 1055 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); 1056 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 1057 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 1058 convert_to_hw_box(&cmd_p->box, box); 1059 cmd_p->offset = cpu_to_le64(offset); 1060 cmd_p->level = cpu_to_le32(level); 1061 1062 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); 1063 } 1064 1065 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, 1066 void *data, uint32_t data_size, 1067 uint32_t ctx_id, 1068 struct virtio_gpu_object_array *objs, 1069 struct virtio_gpu_fence *fence) 1070 { 1071 struct virtio_gpu_cmd_submit *cmd_p; 1072 struct virtio_gpu_vbuffer *vbuf; 1073 1074 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 1075 memset(cmd_p, 0, sizeof(*cmd_p)); 1076 1077 vbuf->data_buf = data; 1078 vbuf->data_size = data_size; 1079 vbuf->objs = objs; 1080 1081 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); 1082 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 1083 cmd_p->size = cpu_to_le32(data_size); 1084 1085 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); 1086 } 1087 1088 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, 1089 struct virtio_gpu_object *obj, 1090 struct virtio_gpu_mem_entry *ents, 1091 unsigned int nents) 1092 { 1093 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, 1094 ents, nents, NULL); 1095 return 0; 1096 } 1097 1098 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, 1099 struct virtio_gpu_output *output) 1100 { 1101 struct virtio_gpu_vbuffer *vbuf; 1102 struct virtio_gpu_update_cursor *cur_p; 1103 1104 output->cursor.pos.scanout_id = cpu_to_le32(output->index); 1105 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf); 1106 memcpy(cur_p, &output->cursor, sizeof(output->cursor)); 1107 virtio_gpu_queue_cursor(vgdev, vbuf); 1108 } 1109