1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Authors: 6 * Dave Airlie <airlied@redhat.com> 7 * Gerd Hoffmann <kraxel@redhat.com> 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 26 * OTHER DEALINGS IN THE SOFTWARE. 27 */ 28 29 #include <drm/drmP.h> 30 #include "virtgpu_drv.h" 31 #include <linux/virtio.h> 32 #include <linux/virtio_config.h> 33 #include <linux/virtio_ring.h> 34 35 #define MAX_INLINE_CMD_SIZE 96 36 #define MAX_INLINE_RESP_SIZE 24 37 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ 38 + MAX_INLINE_CMD_SIZE \ 39 + MAX_INLINE_RESP_SIZE) 40 41 void virtio_gpu_ctrl_ack(struct virtqueue *vq) 42 { 43 struct drm_device *dev = vq->vdev->priv; 44 struct virtio_gpu_device *vgdev = dev->dev_private; 45 46 schedule_work(&vgdev->ctrlq.dequeue_work); 47 } 48 49 void virtio_gpu_cursor_ack(struct virtqueue *vq) 50 { 51 struct drm_device *dev = vq->vdev->priv; 52 struct virtio_gpu_device *vgdev = dev->dev_private; 53 54 schedule_work(&vgdev->cursorq.dequeue_work); 55 } 56 57 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev) 58 { 59 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs", 60 VBUFFER_SIZE, 61 __alignof__(struct virtio_gpu_vbuffer), 62 0, NULL); 63 if (!vgdev->vbufs) 64 return -ENOMEM; 65 return 0; 66 } 67 68 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) 69 { 70 kmem_cache_destroy(vgdev->vbufs); 71 vgdev->vbufs = NULL; 72 } 73 74 static struct virtio_gpu_vbuffer* 75 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, 76 int size, int resp_size, void *resp_buf, 77 virtio_gpu_resp_cb resp_cb) 78 { 79 struct virtio_gpu_vbuffer *vbuf; 80 81 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL); 82 if (!vbuf) 83 return ERR_PTR(-ENOMEM); 84 85 BUG_ON(size > MAX_INLINE_CMD_SIZE); 86 vbuf->buf = (void *)vbuf + sizeof(*vbuf); 87 vbuf->size = size; 88 89 vbuf->resp_cb = resp_cb; 90 vbuf->resp_size = resp_size; 91 if (resp_size <= MAX_INLINE_RESP_SIZE) 92 vbuf->resp_buf = (void *)vbuf->buf + size; 93 else 94 vbuf->resp_buf = resp_buf; 95 BUG_ON(!vbuf->resp_buf); 96 return vbuf; 97 } 98 99 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, 100 struct virtio_gpu_vbuffer **vbuffer_p, 101 int size) 102 { 103 struct virtio_gpu_vbuffer *vbuf; 104 105 vbuf = virtio_gpu_get_vbuf(vgdev, size, 106 sizeof(struct virtio_gpu_ctrl_hdr), 107 NULL, NULL); 108 if (IS_ERR(vbuf)) { 109 *vbuffer_p = NULL; 110 return ERR_CAST(vbuf); 111 } 112 *vbuffer_p = vbuf; 113 return vbuf->buf; 114 } 115 116 static struct virtio_gpu_update_cursor* 117 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, 118 struct virtio_gpu_vbuffer **vbuffer_p) 119 { 120 struct virtio_gpu_vbuffer *vbuf; 121 122 vbuf = virtio_gpu_get_vbuf 123 (vgdev, sizeof(struct virtio_gpu_update_cursor), 124 0, NULL, NULL); 125 if (IS_ERR(vbuf)) { 126 *vbuffer_p = NULL; 127 return ERR_CAST(vbuf); 128 } 129 *vbuffer_p = vbuf; 130 return (struct virtio_gpu_update_cursor *)vbuf->buf; 131 } 132 133 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, 134 virtio_gpu_resp_cb cb, 135 struct virtio_gpu_vbuffer **vbuffer_p, 136 int cmd_size, int resp_size, 137 void *resp_buf) 138 { 139 struct virtio_gpu_vbuffer *vbuf; 140 141 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size, 142 resp_size, resp_buf, cb); 143 if (IS_ERR(vbuf)) { 144 *vbuffer_p = NULL; 145 return ERR_CAST(vbuf); 146 } 147 *vbuffer_p = vbuf; 148 return (struct virtio_gpu_command *)vbuf->buf; 149 } 150 151 static void free_vbuf(struct virtio_gpu_device *vgdev, 152 struct virtio_gpu_vbuffer *vbuf) 153 { 154 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) 155 kfree(vbuf->resp_buf); 156 kfree(vbuf->data_buf); 157 kmem_cache_free(vgdev->vbufs, vbuf); 158 } 159 160 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) 161 { 162 struct virtio_gpu_vbuffer *vbuf; 163 unsigned int len; 164 int freed = 0; 165 166 while ((vbuf = virtqueue_get_buf(vq, &len))) { 167 list_add_tail(&vbuf->list, reclaim_list); 168 freed++; 169 } 170 if (freed == 0) 171 DRM_DEBUG("Huh? zero vbufs reclaimed"); 172 } 173 174 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) 175 { 176 struct virtio_gpu_device *vgdev = 177 container_of(work, struct virtio_gpu_device, 178 ctrlq.dequeue_work); 179 struct list_head reclaim_list; 180 struct virtio_gpu_vbuffer *entry, *tmp; 181 struct virtio_gpu_ctrl_hdr *resp; 182 u64 fence_id = 0; 183 184 INIT_LIST_HEAD(&reclaim_list); 185 spin_lock(&vgdev->ctrlq.qlock); 186 do { 187 virtqueue_disable_cb(vgdev->ctrlq.vq); 188 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list); 189 190 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); 191 spin_unlock(&vgdev->ctrlq.qlock); 192 193 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { 194 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; 195 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) 196 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); 197 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { 198 u64 f = le64_to_cpu(resp->fence_id); 199 200 if (fence_id > f) { 201 DRM_ERROR("%s: Oops: fence %llx -> %llx\n", 202 __func__, fence_id, f); 203 } else { 204 fence_id = f; 205 } 206 } 207 if (entry->resp_cb) 208 entry->resp_cb(vgdev, entry); 209 210 list_del(&entry->list); 211 free_vbuf(vgdev, entry); 212 } 213 wake_up(&vgdev->ctrlq.ack_queue); 214 215 if (fence_id) 216 virtio_gpu_fence_event_process(vgdev, fence_id); 217 } 218 219 void virtio_gpu_dequeue_cursor_func(struct work_struct *work) 220 { 221 struct virtio_gpu_device *vgdev = 222 container_of(work, struct virtio_gpu_device, 223 cursorq.dequeue_work); 224 struct list_head reclaim_list; 225 struct virtio_gpu_vbuffer *entry, *tmp; 226 227 INIT_LIST_HEAD(&reclaim_list); 228 spin_lock(&vgdev->cursorq.qlock); 229 do { 230 virtqueue_disable_cb(vgdev->cursorq.vq); 231 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list); 232 } while (!virtqueue_enable_cb(vgdev->cursorq.vq)); 233 spin_unlock(&vgdev->cursorq.qlock); 234 235 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { 236 list_del(&entry->list); 237 free_vbuf(vgdev, entry); 238 } 239 wake_up(&vgdev->cursorq.ack_queue); 240 } 241 242 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, 243 struct virtio_gpu_vbuffer *vbuf) 244 __releases(&vgdev->ctrlq.qlock) 245 __acquires(&vgdev->ctrlq.qlock) 246 { 247 struct virtqueue *vq = vgdev->ctrlq.vq; 248 struct scatterlist *sgs[3], vcmd, vout, vresp; 249 int outcnt = 0, incnt = 0; 250 int ret; 251 252 if (!vgdev->vqs_ready) 253 return -ENODEV; 254 255 sg_init_one(&vcmd, vbuf->buf, vbuf->size); 256 sgs[outcnt + incnt] = &vcmd; 257 outcnt++; 258 259 if (vbuf->data_size) { 260 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); 261 sgs[outcnt + incnt] = &vout; 262 outcnt++; 263 } 264 265 if (vbuf->resp_size) { 266 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); 267 sgs[outcnt + incnt] = &vresp; 268 incnt++; 269 } 270 271 retry: 272 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); 273 if (ret == -ENOSPC) { 274 spin_unlock(&vgdev->ctrlq.qlock); 275 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt); 276 spin_lock(&vgdev->ctrlq.qlock); 277 goto retry; 278 } else { 279 virtqueue_kick(vq); 280 } 281 282 if (!ret) 283 ret = vq->num_free; 284 return ret; 285 } 286 287 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, 288 struct virtio_gpu_vbuffer *vbuf) 289 { 290 int rc; 291 292 spin_lock(&vgdev->ctrlq.qlock); 293 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); 294 spin_unlock(&vgdev->ctrlq.qlock); 295 return rc; 296 } 297 298 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, 299 struct virtio_gpu_vbuffer *vbuf, 300 struct virtio_gpu_ctrl_hdr *hdr, 301 struct virtio_gpu_fence *fence) 302 { 303 struct virtqueue *vq = vgdev->ctrlq.vq; 304 int rc; 305 306 again: 307 spin_lock(&vgdev->ctrlq.qlock); 308 309 /* 310 * Make sure we have enouth space in the virtqueue. If not 311 * wait here until we have. 312 * 313 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have 314 * to wait for free space, which can result in fence ids being 315 * submitted out-of-order. 316 */ 317 if (vq->num_free < 3) { 318 spin_unlock(&vgdev->ctrlq.qlock); 319 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3); 320 goto again; 321 } 322 323 if (fence) 324 virtio_gpu_fence_emit(vgdev, hdr, fence); 325 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); 326 spin_unlock(&vgdev->ctrlq.qlock); 327 return rc; 328 } 329 330 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, 331 struct virtio_gpu_vbuffer *vbuf) 332 { 333 struct virtqueue *vq = vgdev->cursorq.vq; 334 struct scatterlist *sgs[1], ccmd; 335 int ret; 336 int outcnt; 337 338 if (!vgdev->vqs_ready) 339 return -ENODEV; 340 341 sg_init_one(&ccmd, vbuf->buf, vbuf->size); 342 sgs[0] = &ccmd; 343 outcnt = 1; 344 345 spin_lock(&vgdev->cursorq.qlock); 346 retry: 347 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); 348 if (ret == -ENOSPC) { 349 spin_unlock(&vgdev->cursorq.qlock); 350 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); 351 spin_lock(&vgdev->cursorq.qlock); 352 goto retry; 353 } else { 354 virtqueue_kick(vq); 355 } 356 357 spin_unlock(&vgdev->cursorq.qlock); 358 359 if (!ret) 360 ret = vq->num_free; 361 return ret; 362 } 363 364 /* just create gem objects for userspace and long lived objects, 365 * just use dma_alloced pages for the queue objects? 366 */ 367 368 /* create a basic resource */ 369 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, 370 struct virtio_gpu_object *bo, 371 uint32_t format, 372 uint32_t width, 373 uint32_t height) 374 { 375 struct virtio_gpu_resource_create_2d *cmd_p; 376 struct virtio_gpu_vbuffer *vbuf; 377 378 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 379 memset(cmd_p, 0, sizeof(*cmd_p)); 380 381 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); 382 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 383 cmd_p->format = cpu_to_le32(format); 384 cmd_p->width = cpu_to_le32(width); 385 cmd_p->height = cpu_to_le32(height); 386 387 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 388 bo->created = true; 389 } 390 391 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, 392 uint32_t resource_id) 393 { 394 struct virtio_gpu_resource_unref *cmd_p; 395 struct virtio_gpu_vbuffer *vbuf; 396 397 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 398 memset(cmd_p, 0, sizeof(*cmd_p)); 399 400 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); 401 cmd_p->resource_id = cpu_to_le32(resource_id); 402 403 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 404 } 405 406 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, 407 uint32_t resource_id, 408 struct virtio_gpu_fence *fence) 409 { 410 struct virtio_gpu_resource_detach_backing *cmd_p; 411 struct virtio_gpu_vbuffer *vbuf; 412 413 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 414 memset(cmd_p, 0, sizeof(*cmd_p)); 415 416 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); 417 cmd_p->resource_id = cpu_to_le32(resource_id); 418 419 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 420 } 421 422 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, 423 uint32_t scanout_id, uint32_t resource_id, 424 uint32_t width, uint32_t height, 425 uint32_t x, uint32_t y) 426 { 427 struct virtio_gpu_set_scanout *cmd_p; 428 struct virtio_gpu_vbuffer *vbuf; 429 430 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 431 memset(cmd_p, 0, sizeof(*cmd_p)); 432 433 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT); 434 cmd_p->resource_id = cpu_to_le32(resource_id); 435 cmd_p->scanout_id = cpu_to_le32(scanout_id); 436 cmd_p->r.width = cpu_to_le32(width); 437 cmd_p->r.height = cpu_to_le32(height); 438 cmd_p->r.x = cpu_to_le32(x); 439 cmd_p->r.y = cpu_to_le32(y); 440 441 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 442 } 443 444 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, 445 uint32_t resource_id, 446 uint32_t x, uint32_t y, 447 uint32_t width, uint32_t height) 448 { 449 struct virtio_gpu_resource_flush *cmd_p; 450 struct virtio_gpu_vbuffer *vbuf; 451 452 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 453 memset(cmd_p, 0, sizeof(*cmd_p)); 454 455 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); 456 cmd_p->resource_id = cpu_to_le32(resource_id); 457 cmd_p->r.width = cpu_to_le32(width); 458 cmd_p->r.height = cpu_to_le32(height); 459 cmd_p->r.x = cpu_to_le32(x); 460 cmd_p->r.y = cpu_to_le32(y); 461 462 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 463 } 464 465 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, 466 struct virtio_gpu_object *bo, 467 uint64_t offset, 468 __le32 width, __le32 height, 469 __le32 x, __le32 y, 470 struct virtio_gpu_fence *fence) 471 { 472 struct virtio_gpu_transfer_to_host_2d *cmd_p; 473 struct virtio_gpu_vbuffer *vbuf; 474 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 475 476 if (use_dma_api) 477 dma_sync_sg_for_device(vgdev->vdev->dev.parent, 478 bo->pages->sgl, bo->pages->nents, 479 DMA_TO_DEVICE); 480 481 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 482 memset(cmd_p, 0, sizeof(*cmd_p)); 483 484 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); 485 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 486 cmd_p->offset = cpu_to_le64(offset); 487 cmd_p->r.width = width; 488 cmd_p->r.height = height; 489 cmd_p->r.x = x; 490 cmd_p->r.y = y; 491 492 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 493 } 494 495 static void 496 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, 497 uint32_t resource_id, 498 struct virtio_gpu_mem_entry *ents, 499 uint32_t nents, 500 struct virtio_gpu_fence *fence) 501 { 502 struct virtio_gpu_resource_attach_backing *cmd_p; 503 struct virtio_gpu_vbuffer *vbuf; 504 505 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 506 memset(cmd_p, 0, sizeof(*cmd_p)); 507 508 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); 509 cmd_p->resource_id = cpu_to_le32(resource_id); 510 cmd_p->nr_entries = cpu_to_le32(nents); 511 512 vbuf->data_buf = ents; 513 vbuf->data_size = sizeof(*ents) * nents; 514 515 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 516 } 517 518 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, 519 struct virtio_gpu_vbuffer *vbuf) 520 { 521 struct virtio_gpu_resp_display_info *resp = 522 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf; 523 int i; 524 525 spin_lock(&vgdev->display_info_lock); 526 for (i = 0; i < vgdev->num_scanouts; i++) { 527 vgdev->outputs[i].info = resp->pmodes[i]; 528 if (resp->pmodes[i].enabled) { 529 DRM_DEBUG("output %d: %dx%d+%d+%d", i, 530 le32_to_cpu(resp->pmodes[i].r.width), 531 le32_to_cpu(resp->pmodes[i].r.height), 532 le32_to_cpu(resp->pmodes[i].r.x), 533 le32_to_cpu(resp->pmodes[i].r.y)); 534 } else { 535 DRM_DEBUG("output %d: disabled", i); 536 } 537 } 538 539 vgdev->display_info_pending = false; 540 spin_unlock(&vgdev->display_info_lock); 541 wake_up(&vgdev->resp_wq); 542 543 if (!drm_helper_hpd_irq_event(vgdev->ddev)) 544 drm_kms_helper_hotplug_event(vgdev->ddev); 545 } 546 547 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, 548 struct virtio_gpu_vbuffer *vbuf) 549 { 550 struct virtio_gpu_get_capset_info *cmd = 551 (struct virtio_gpu_get_capset_info *)vbuf->buf; 552 struct virtio_gpu_resp_capset_info *resp = 553 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf; 554 int i = le32_to_cpu(cmd->capset_index); 555 556 spin_lock(&vgdev->display_info_lock); 557 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); 558 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); 559 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); 560 spin_unlock(&vgdev->display_info_lock); 561 wake_up(&vgdev->resp_wq); 562 } 563 564 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, 565 struct virtio_gpu_vbuffer *vbuf) 566 { 567 struct virtio_gpu_get_capset *cmd = 568 (struct virtio_gpu_get_capset *)vbuf->buf; 569 struct virtio_gpu_resp_capset *resp = 570 (struct virtio_gpu_resp_capset *)vbuf->resp_buf; 571 struct virtio_gpu_drv_cap_cache *cache_ent; 572 573 spin_lock(&vgdev->display_info_lock); 574 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { 575 if (cache_ent->version == le32_to_cpu(cmd->capset_version) && 576 cache_ent->id == le32_to_cpu(cmd->capset_id)) { 577 memcpy(cache_ent->caps_cache, resp->capset_data, 578 cache_ent->size); 579 atomic_set(&cache_ent->is_valid, 1); 580 break; 581 } 582 } 583 spin_unlock(&vgdev->display_info_lock); 584 wake_up(&vgdev->resp_wq); 585 } 586 587 static int virtio_get_edid_block(void *data, u8 *buf, 588 unsigned int block, size_t len) 589 { 590 struct virtio_gpu_resp_edid *resp = data; 591 size_t start = block * EDID_LENGTH; 592 593 if (start + len > le32_to_cpu(resp->size)) 594 return -1; 595 memcpy(buf, resp->edid + start, len); 596 return 0; 597 } 598 599 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, 600 struct virtio_gpu_vbuffer *vbuf) 601 { 602 struct virtio_gpu_cmd_get_edid *cmd = 603 (struct virtio_gpu_cmd_get_edid *)vbuf->buf; 604 struct virtio_gpu_resp_edid *resp = 605 (struct virtio_gpu_resp_edid *)vbuf->resp_buf; 606 uint32_t scanout = le32_to_cpu(cmd->scanout); 607 struct virtio_gpu_output *output; 608 struct edid *new_edid, *old_edid; 609 610 if (scanout >= vgdev->num_scanouts) 611 return; 612 output = vgdev->outputs + scanout; 613 614 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); 615 616 spin_lock(&vgdev->display_info_lock); 617 old_edid = output->edid; 618 output->edid = new_edid; 619 drm_connector_update_edid_property(&output->conn, output->edid); 620 spin_unlock(&vgdev->display_info_lock); 621 622 kfree(old_edid); 623 wake_up(&vgdev->resp_wq); 624 } 625 626 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) 627 { 628 struct virtio_gpu_ctrl_hdr *cmd_p; 629 struct virtio_gpu_vbuffer *vbuf; 630 void *resp_buf; 631 632 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info), 633 GFP_KERNEL); 634 if (!resp_buf) 635 return -ENOMEM; 636 637 cmd_p = virtio_gpu_alloc_cmd_resp 638 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf, 639 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info), 640 resp_buf); 641 memset(cmd_p, 0, sizeof(*cmd_p)); 642 643 vgdev->display_info_pending = true; 644 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); 645 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 646 return 0; 647 } 648 649 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx) 650 { 651 struct virtio_gpu_get_capset_info *cmd_p; 652 struct virtio_gpu_vbuffer *vbuf; 653 void *resp_buf; 654 655 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info), 656 GFP_KERNEL); 657 if (!resp_buf) 658 return -ENOMEM; 659 660 cmd_p = virtio_gpu_alloc_cmd_resp 661 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf, 662 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info), 663 resp_buf); 664 memset(cmd_p, 0, sizeof(*cmd_p)); 665 666 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO); 667 cmd_p->capset_index = cpu_to_le32(idx); 668 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 669 return 0; 670 } 671 672 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, 673 int idx, int version, 674 struct virtio_gpu_drv_cap_cache **cache_p) 675 { 676 struct virtio_gpu_get_capset *cmd_p; 677 struct virtio_gpu_vbuffer *vbuf; 678 int max_size; 679 struct virtio_gpu_drv_cap_cache *cache_ent; 680 void *resp_buf; 681 682 if (idx >= vgdev->num_capsets) 683 return -EINVAL; 684 685 if (version > vgdev->capsets[idx].max_version) 686 return -EINVAL; 687 688 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL); 689 if (!cache_ent) 690 return -ENOMEM; 691 692 max_size = vgdev->capsets[idx].max_size; 693 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL); 694 if (!cache_ent->caps_cache) { 695 kfree(cache_ent); 696 return -ENOMEM; 697 } 698 699 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size, 700 GFP_KERNEL); 701 if (!resp_buf) { 702 kfree(cache_ent->caps_cache); 703 kfree(cache_ent); 704 return -ENOMEM; 705 } 706 707 cache_ent->version = version; 708 cache_ent->id = vgdev->capsets[idx].id; 709 atomic_set(&cache_ent->is_valid, 0); 710 cache_ent->size = max_size; 711 spin_lock(&vgdev->display_info_lock); 712 list_add_tail(&cache_ent->head, &vgdev->cap_cache); 713 spin_unlock(&vgdev->display_info_lock); 714 715 cmd_p = virtio_gpu_alloc_cmd_resp 716 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p), 717 sizeof(struct virtio_gpu_resp_capset) + max_size, 718 resp_buf); 719 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET); 720 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id); 721 cmd_p->capset_version = cpu_to_le32(version); 722 *cache_p = cache_ent; 723 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 724 725 return 0; 726 } 727 728 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev) 729 { 730 struct virtio_gpu_cmd_get_edid *cmd_p; 731 struct virtio_gpu_vbuffer *vbuf; 732 void *resp_buf; 733 int scanout; 734 735 if (WARN_ON(!vgdev->has_edid)) 736 return -EINVAL; 737 738 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) { 739 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid), 740 GFP_KERNEL); 741 if (!resp_buf) 742 return -ENOMEM; 743 744 cmd_p = virtio_gpu_alloc_cmd_resp 745 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf, 746 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid), 747 resp_buf); 748 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID); 749 cmd_p->scanout = cpu_to_le32(scanout); 750 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 751 } 752 753 return 0; 754 } 755 756 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, 757 uint32_t nlen, const char *name) 758 { 759 struct virtio_gpu_ctx_create *cmd_p; 760 struct virtio_gpu_vbuffer *vbuf; 761 762 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 763 memset(cmd_p, 0, sizeof(*cmd_p)); 764 765 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE); 766 cmd_p->hdr.ctx_id = cpu_to_le32(id); 767 cmd_p->nlen = cpu_to_le32(nlen); 768 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1); 769 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0; 770 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 771 } 772 773 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, 774 uint32_t id) 775 { 776 struct virtio_gpu_ctx_destroy *cmd_p; 777 struct virtio_gpu_vbuffer *vbuf; 778 779 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 780 memset(cmd_p, 0, sizeof(*cmd_p)); 781 782 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY); 783 cmd_p->hdr.ctx_id = cpu_to_le32(id); 784 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 785 } 786 787 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, 788 uint32_t ctx_id, 789 uint32_t resource_id) 790 { 791 struct virtio_gpu_ctx_resource *cmd_p; 792 struct virtio_gpu_vbuffer *vbuf; 793 794 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 795 memset(cmd_p, 0, sizeof(*cmd_p)); 796 797 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); 798 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 799 cmd_p->resource_id = cpu_to_le32(resource_id); 800 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 801 802 } 803 804 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, 805 uint32_t ctx_id, 806 uint32_t resource_id) 807 { 808 struct virtio_gpu_ctx_resource *cmd_p; 809 struct virtio_gpu_vbuffer *vbuf; 810 811 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 812 memset(cmd_p, 0, sizeof(*cmd_p)); 813 814 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); 815 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 816 cmd_p->resource_id = cpu_to_le32(resource_id); 817 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 818 } 819 820 void 821 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, 822 struct virtio_gpu_object *bo, 823 struct virtio_gpu_resource_create_3d *rc_3d) 824 { 825 struct virtio_gpu_resource_create_3d *cmd_p; 826 struct virtio_gpu_vbuffer *vbuf; 827 828 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 829 memset(cmd_p, 0, sizeof(*cmd_p)); 830 831 *cmd_p = *rc_3d; 832 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); 833 cmd_p->hdr.flags = 0; 834 835 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 836 bo->created = true; 837 } 838 839 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, 840 struct virtio_gpu_object *bo, 841 uint32_t ctx_id, 842 uint64_t offset, uint32_t level, 843 struct virtio_gpu_box *box, 844 struct virtio_gpu_fence *fence) 845 { 846 struct virtio_gpu_transfer_host_3d *cmd_p; 847 struct virtio_gpu_vbuffer *vbuf; 848 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 849 850 if (use_dma_api) 851 dma_sync_sg_for_device(vgdev->vdev->dev.parent, 852 bo->pages->sgl, bo->pages->nents, 853 DMA_TO_DEVICE); 854 855 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 856 memset(cmd_p, 0, sizeof(*cmd_p)); 857 858 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); 859 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 860 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 861 cmd_p->box = *box; 862 cmd_p->offset = cpu_to_le64(offset); 863 cmd_p->level = cpu_to_le32(level); 864 865 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 866 } 867 868 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, 869 uint32_t resource_id, uint32_t ctx_id, 870 uint64_t offset, uint32_t level, 871 struct virtio_gpu_box *box, 872 struct virtio_gpu_fence *fence) 873 { 874 struct virtio_gpu_transfer_host_3d *cmd_p; 875 struct virtio_gpu_vbuffer *vbuf; 876 877 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 878 memset(cmd_p, 0, sizeof(*cmd_p)); 879 880 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); 881 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 882 cmd_p->resource_id = cpu_to_le32(resource_id); 883 cmd_p->box = *box; 884 cmd_p->offset = cpu_to_le64(offset); 885 cmd_p->level = cpu_to_le32(level); 886 887 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 888 } 889 890 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, 891 void *data, uint32_t data_size, 892 uint32_t ctx_id, struct virtio_gpu_fence *fence) 893 { 894 struct virtio_gpu_cmd_submit *cmd_p; 895 struct virtio_gpu_vbuffer *vbuf; 896 897 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 898 memset(cmd_p, 0, sizeof(*cmd_p)); 899 900 vbuf->data_buf = data; 901 vbuf->data_size = data_size; 902 903 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); 904 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 905 cmd_p->size = cpu_to_le32(data_size); 906 907 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 908 } 909 910 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, 911 struct virtio_gpu_object *obj, 912 struct virtio_gpu_fence *fence) 913 { 914 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 915 struct virtio_gpu_mem_entry *ents; 916 struct scatterlist *sg; 917 int si, nents; 918 919 if (!obj->created) 920 return 0; 921 922 if (!obj->pages) { 923 int ret; 924 925 ret = virtio_gpu_object_get_sg_table(vgdev, obj); 926 if (ret) 927 return ret; 928 } 929 930 if (use_dma_api) { 931 obj->mapped = dma_map_sg(vgdev->vdev->dev.parent, 932 obj->pages->sgl, obj->pages->nents, 933 DMA_TO_DEVICE); 934 nents = obj->mapped; 935 } else { 936 nents = obj->pages->nents; 937 } 938 939 /* gets freed when the ring has consumed it */ 940 ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry), 941 GFP_KERNEL); 942 if (!ents) { 943 DRM_ERROR("failed to allocate ent list\n"); 944 return -ENOMEM; 945 } 946 947 for_each_sg(obj->pages->sgl, sg, nents, si) { 948 ents[si].addr = cpu_to_le64(use_dma_api 949 ? sg_dma_address(sg) 950 : sg_phys(sg)); 951 ents[si].length = cpu_to_le32(sg->length); 952 ents[si].padding = 0; 953 } 954 955 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, 956 ents, nents, 957 fence); 958 return 0; 959 } 960 961 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, 962 struct virtio_gpu_object *obj) 963 { 964 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 965 966 if (use_dma_api && obj->mapped) { 967 struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); 968 /* detach backing and wait for the host process it ... */ 969 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence); 970 dma_fence_wait(&fence->f, true); 971 dma_fence_put(&fence->f); 972 973 /* ... then tear down iommu mappings */ 974 dma_unmap_sg(vgdev->vdev->dev.parent, 975 obj->pages->sgl, obj->mapped, 976 DMA_TO_DEVICE); 977 obj->mapped = 0; 978 } else { 979 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL); 980 } 981 } 982 983 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, 984 struct virtio_gpu_output *output) 985 { 986 struct virtio_gpu_vbuffer *vbuf; 987 struct virtio_gpu_update_cursor *cur_p; 988 989 output->cursor.pos.scanout_id = cpu_to_le32(output->index); 990 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf); 991 memcpy(cur_p, &output->cursor, sizeof(output->cursor)); 992 virtio_gpu_queue_cursor(vgdev, vbuf); 993 } 994