1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Authors: 6 * Dave Airlie <airlied@redhat.com> 7 * Gerd Hoffmann <kraxel@redhat.com> 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 26 * OTHER DEALINGS IN THE SOFTWARE. 27 */ 28 29 #include <drm/drmP.h> 30 #include "virtgpu_drv.h" 31 #include <linux/virtio.h> 32 #include <linux/virtio_config.h> 33 #include <linux/virtio_ring.h> 34 35 #define MAX_INLINE_CMD_SIZE 96 36 #define MAX_INLINE_RESP_SIZE 24 37 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ 38 + MAX_INLINE_CMD_SIZE \ 39 + MAX_INLINE_RESP_SIZE) 40 41 void virtio_gpu_ctrl_ack(struct virtqueue *vq) 42 { 43 struct drm_device *dev = vq->vdev->priv; 44 struct virtio_gpu_device *vgdev = dev->dev_private; 45 46 schedule_work(&vgdev->ctrlq.dequeue_work); 47 } 48 49 void virtio_gpu_cursor_ack(struct virtqueue *vq) 50 { 51 struct drm_device *dev = vq->vdev->priv; 52 struct virtio_gpu_device *vgdev = dev->dev_private; 53 54 schedule_work(&vgdev->cursorq.dequeue_work); 55 } 56 57 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev) 58 { 59 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs", 60 VBUFFER_SIZE, 61 __alignof__(struct virtio_gpu_vbuffer), 62 0, NULL); 63 if (!vgdev->vbufs) 64 return -ENOMEM; 65 return 0; 66 } 67 68 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) 69 { 70 kmem_cache_destroy(vgdev->vbufs); 71 vgdev->vbufs = NULL; 72 } 73 74 static struct virtio_gpu_vbuffer* 75 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, 76 int size, int resp_size, void *resp_buf, 77 virtio_gpu_resp_cb resp_cb) 78 { 79 struct virtio_gpu_vbuffer *vbuf; 80 81 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL); 82 if (!vbuf) 83 return ERR_PTR(-ENOMEM); 84 85 BUG_ON(size > MAX_INLINE_CMD_SIZE); 86 vbuf->buf = (void *)vbuf + sizeof(*vbuf); 87 vbuf->size = size; 88 89 vbuf->resp_cb = resp_cb; 90 vbuf->resp_size = resp_size; 91 if (resp_size <= MAX_INLINE_RESP_SIZE) 92 vbuf->resp_buf = (void *)vbuf->buf + size; 93 else 94 vbuf->resp_buf = resp_buf; 95 BUG_ON(!vbuf->resp_buf); 96 return vbuf; 97 } 98 99 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, 100 struct virtio_gpu_vbuffer **vbuffer_p, 101 int size) 102 { 103 struct virtio_gpu_vbuffer *vbuf; 104 105 vbuf = virtio_gpu_get_vbuf(vgdev, size, 106 sizeof(struct virtio_gpu_ctrl_hdr), 107 NULL, NULL); 108 if (IS_ERR(vbuf)) { 109 *vbuffer_p = NULL; 110 return ERR_CAST(vbuf); 111 } 112 *vbuffer_p = vbuf; 113 return vbuf->buf; 114 } 115 116 static struct virtio_gpu_update_cursor* 117 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, 118 struct virtio_gpu_vbuffer **vbuffer_p) 119 { 120 struct virtio_gpu_vbuffer *vbuf; 121 122 vbuf = virtio_gpu_get_vbuf 123 (vgdev, sizeof(struct virtio_gpu_update_cursor), 124 0, NULL, NULL); 125 if (IS_ERR(vbuf)) { 126 *vbuffer_p = NULL; 127 return ERR_CAST(vbuf); 128 } 129 *vbuffer_p = vbuf; 130 return (struct virtio_gpu_update_cursor *)vbuf->buf; 131 } 132 133 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, 134 virtio_gpu_resp_cb cb, 135 struct virtio_gpu_vbuffer **vbuffer_p, 136 int cmd_size, int resp_size, 137 void *resp_buf) 138 { 139 struct virtio_gpu_vbuffer *vbuf; 140 141 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size, 142 resp_size, resp_buf, cb); 143 if (IS_ERR(vbuf)) { 144 *vbuffer_p = NULL; 145 return ERR_CAST(vbuf); 146 } 147 *vbuffer_p = vbuf; 148 return (struct virtio_gpu_command *)vbuf->buf; 149 } 150 151 static void free_vbuf(struct virtio_gpu_device *vgdev, 152 struct virtio_gpu_vbuffer *vbuf) 153 { 154 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) 155 kfree(vbuf->resp_buf); 156 kfree(vbuf->data_buf); 157 kmem_cache_free(vgdev->vbufs, vbuf); 158 } 159 160 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) 161 { 162 struct virtio_gpu_vbuffer *vbuf; 163 unsigned int len; 164 int freed = 0; 165 166 while ((vbuf = virtqueue_get_buf(vq, &len))) { 167 list_add_tail(&vbuf->list, reclaim_list); 168 freed++; 169 } 170 if (freed == 0) 171 DRM_DEBUG("Huh? zero vbufs reclaimed"); 172 } 173 174 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) 175 { 176 struct virtio_gpu_device *vgdev = 177 container_of(work, struct virtio_gpu_device, 178 ctrlq.dequeue_work); 179 struct list_head reclaim_list; 180 struct virtio_gpu_vbuffer *entry, *tmp; 181 struct virtio_gpu_ctrl_hdr *resp; 182 u64 fence_id = 0; 183 184 INIT_LIST_HEAD(&reclaim_list); 185 spin_lock(&vgdev->ctrlq.qlock); 186 do { 187 virtqueue_disable_cb(vgdev->ctrlq.vq); 188 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list); 189 190 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); 191 spin_unlock(&vgdev->ctrlq.qlock); 192 193 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { 194 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; 195 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { 196 if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) { 197 struct virtio_gpu_ctrl_hdr *cmd; 198 cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf; 199 DRM_ERROR("response 0x%x (command 0x%x)\n", 200 le32_to_cpu(resp->type), 201 le32_to_cpu(cmd->type)); 202 } else 203 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); 204 } 205 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { 206 u64 f = le64_to_cpu(resp->fence_id); 207 208 if (fence_id > f) { 209 DRM_ERROR("%s: Oops: fence %llx -> %llx\n", 210 __func__, fence_id, f); 211 } else { 212 fence_id = f; 213 } 214 } 215 if (entry->resp_cb) 216 entry->resp_cb(vgdev, entry); 217 218 list_del(&entry->list); 219 free_vbuf(vgdev, entry); 220 } 221 wake_up(&vgdev->ctrlq.ack_queue); 222 223 if (fence_id) 224 virtio_gpu_fence_event_process(vgdev, fence_id); 225 } 226 227 void virtio_gpu_dequeue_cursor_func(struct work_struct *work) 228 { 229 struct virtio_gpu_device *vgdev = 230 container_of(work, struct virtio_gpu_device, 231 cursorq.dequeue_work); 232 struct list_head reclaim_list; 233 struct virtio_gpu_vbuffer *entry, *tmp; 234 235 INIT_LIST_HEAD(&reclaim_list); 236 spin_lock(&vgdev->cursorq.qlock); 237 do { 238 virtqueue_disable_cb(vgdev->cursorq.vq); 239 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list); 240 } while (!virtqueue_enable_cb(vgdev->cursorq.vq)); 241 spin_unlock(&vgdev->cursorq.qlock); 242 243 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { 244 list_del(&entry->list); 245 free_vbuf(vgdev, entry); 246 } 247 wake_up(&vgdev->cursorq.ack_queue); 248 } 249 250 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, 251 struct virtio_gpu_vbuffer *vbuf) 252 __releases(&vgdev->ctrlq.qlock) 253 __acquires(&vgdev->ctrlq.qlock) 254 { 255 struct virtqueue *vq = vgdev->ctrlq.vq; 256 struct scatterlist *sgs[3], vcmd, vout, vresp; 257 int outcnt = 0, incnt = 0; 258 int ret; 259 260 if (!vgdev->vqs_ready) 261 return -ENODEV; 262 263 sg_init_one(&vcmd, vbuf->buf, vbuf->size); 264 sgs[outcnt + incnt] = &vcmd; 265 outcnt++; 266 267 if (vbuf->data_size) { 268 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); 269 sgs[outcnt + incnt] = &vout; 270 outcnt++; 271 } 272 273 if (vbuf->resp_size) { 274 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); 275 sgs[outcnt + incnt] = &vresp; 276 incnt++; 277 } 278 279 retry: 280 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); 281 if (ret == -ENOSPC) { 282 spin_unlock(&vgdev->ctrlq.qlock); 283 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt); 284 spin_lock(&vgdev->ctrlq.qlock); 285 goto retry; 286 } else { 287 virtqueue_kick(vq); 288 } 289 290 if (!ret) 291 ret = vq->num_free; 292 return ret; 293 } 294 295 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, 296 struct virtio_gpu_vbuffer *vbuf) 297 { 298 int rc; 299 300 spin_lock(&vgdev->ctrlq.qlock); 301 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); 302 spin_unlock(&vgdev->ctrlq.qlock); 303 return rc; 304 } 305 306 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, 307 struct virtio_gpu_vbuffer *vbuf, 308 struct virtio_gpu_ctrl_hdr *hdr, 309 struct virtio_gpu_fence *fence) 310 { 311 struct virtqueue *vq = vgdev->ctrlq.vq; 312 int rc; 313 314 again: 315 spin_lock(&vgdev->ctrlq.qlock); 316 317 /* 318 * Make sure we have enouth space in the virtqueue. If not 319 * wait here until we have. 320 * 321 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have 322 * to wait for free space, which can result in fence ids being 323 * submitted out-of-order. 324 */ 325 if (vq->num_free < 3) { 326 spin_unlock(&vgdev->ctrlq.qlock); 327 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3); 328 goto again; 329 } 330 331 if (fence) 332 virtio_gpu_fence_emit(vgdev, hdr, fence); 333 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); 334 spin_unlock(&vgdev->ctrlq.qlock); 335 return rc; 336 } 337 338 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, 339 struct virtio_gpu_vbuffer *vbuf) 340 { 341 struct virtqueue *vq = vgdev->cursorq.vq; 342 struct scatterlist *sgs[1], ccmd; 343 int ret; 344 int outcnt; 345 346 if (!vgdev->vqs_ready) 347 return -ENODEV; 348 349 sg_init_one(&ccmd, vbuf->buf, vbuf->size); 350 sgs[0] = &ccmd; 351 outcnt = 1; 352 353 spin_lock(&vgdev->cursorq.qlock); 354 retry: 355 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); 356 if (ret == -ENOSPC) { 357 spin_unlock(&vgdev->cursorq.qlock); 358 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); 359 spin_lock(&vgdev->cursorq.qlock); 360 goto retry; 361 } else { 362 virtqueue_kick(vq); 363 } 364 365 spin_unlock(&vgdev->cursorq.qlock); 366 367 if (!ret) 368 ret = vq->num_free; 369 return ret; 370 } 371 372 /* just create gem objects for userspace and long lived objects, 373 * just use dma_alloced pages for the queue objects? 374 */ 375 376 /* create a basic resource */ 377 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, 378 struct virtio_gpu_object *bo, 379 uint32_t format, 380 uint32_t width, 381 uint32_t height) 382 { 383 struct virtio_gpu_resource_create_2d *cmd_p; 384 struct virtio_gpu_vbuffer *vbuf; 385 386 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 387 memset(cmd_p, 0, sizeof(*cmd_p)); 388 389 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); 390 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 391 cmd_p->format = cpu_to_le32(format); 392 cmd_p->width = cpu_to_le32(width); 393 cmd_p->height = cpu_to_le32(height); 394 395 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 396 bo->created = true; 397 } 398 399 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, 400 uint32_t resource_id) 401 { 402 struct virtio_gpu_resource_unref *cmd_p; 403 struct virtio_gpu_vbuffer *vbuf; 404 405 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 406 memset(cmd_p, 0, sizeof(*cmd_p)); 407 408 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); 409 cmd_p->resource_id = cpu_to_le32(resource_id); 410 411 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 412 } 413 414 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, 415 uint32_t resource_id, 416 struct virtio_gpu_fence *fence) 417 { 418 struct virtio_gpu_resource_detach_backing *cmd_p; 419 struct virtio_gpu_vbuffer *vbuf; 420 421 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 422 memset(cmd_p, 0, sizeof(*cmd_p)); 423 424 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); 425 cmd_p->resource_id = cpu_to_le32(resource_id); 426 427 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 428 } 429 430 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, 431 uint32_t scanout_id, uint32_t resource_id, 432 uint32_t width, uint32_t height, 433 uint32_t x, uint32_t y) 434 { 435 struct virtio_gpu_set_scanout *cmd_p; 436 struct virtio_gpu_vbuffer *vbuf; 437 438 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 439 memset(cmd_p, 0, sizeof(*cmd_p)); 440 441 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT); 442 cmd_p->resource_id = cpu_to_le32(resource_id); 443 cmd_p->scanout_id = cpu_to_le32(scanout_id); 444 cmd_p->r.width = cpu_to_le32(width); 445 cmd_p->r.height = cpu_to_le32(height); 446 cmd_p->r.x = cpu_to_le32(x); 447 cmd_p->r.y = cpu_to_le32(y); 448 449 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 450 } 451 452 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, 453 uint32_t resource_id, 454 uint32_t x, uint32_t y, 455 uint32_t width, uint32_t height) 456 { 457 struct virtio_gpu_resource_flush *cmd_p; 458 struct virtio_gpu_vbuffer *vbuf; 459 460 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 461 memset(cmd_p, 0, sizeof(*cmd_p)); 462 463 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); 464 cmd_p->resource_id = cpu_to_le32(resource_id); 465 cmd_p->r.width = cpu_to_le32(width); 466 cmd_p->r.height = cpu_to_le32(height); 467 cmd_p->r.x = cpu_to_le32(x); 468 cmd_p->r.y = cpu_to_le32(y); 469 470 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 471 } 472 473 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, 474 struct virtio_gpu_object *bo, 475 uint64_t offset, 476 __le32 width, __le32 height, 477 __le32 x, __le32 y, 478 struct virtio_gpu_fence *fence) 479 { 480 struct virtio_gpu_transfer_to_host_2d *cmd_p; 481 struct virtio_gpu_vbuffer *vbuf; 482 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 483 484 if (use_dma_api) 485 dma_sync_sg_for_device(vgdev->vdev->dev.parent, 486 bo->pages->sgl, bo->pages->nents, 487 DMA_TO_DEVICE); 488 489 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 490 memset(cmd_p, 0, sizeof(*cmd_p)); 491 492 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); 493 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 494 cmd_p->offset = cpu_to_le64(offset); 495 cmd_p->r.width = width; 496 cmd_p->r.height = height; 497 cmd_p->r.x = x; 498 cmd_p->r.y = y; 499 500 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 501 } 502 503 static void 504 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, 505 uint32_t resource_id, 506 struct virtio_gpu_mem_entry *ents, 507 uint32_t nents, 508 struct virtio_gpu_fence *fence) 509 { 510 struct virtio_gpu_resource_attach_backing *cmd_p; 511 struct virtio_gpu_vbuffer *vbuf; 512 513 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 514 memset(cmd_p, 0, sizeof(*cmd_p)); 515 516 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); 517 cmd_p->resource_id = cpu_to_le32(resource_id); 518 cmd_p->nr_entries = cpu_to_le32(nents); 519 520 vbuf->data_buf = ents; 521 vbuf->data_size = sizeof(*ents) * nents; 522 523 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 524 } 525 526 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, 527 struct virtio_gpu_vbuffer *vbuf) 528 { 529 struct virtio_gpu_resp_display_info *resp = 530 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf; 531 int i; 532 533 spin_lock(&vgdev->display_info_lock); 534 for (i = 0; i < vgdev->num_scanouts; i++) { 535 vgdev->outputs[i].info = resp->pmodes[i]; 536 if (resp->pmodes[i].enabled) { 537 DRM_DEBUG("output %d: %dx%d+%d+%d", i, 538 le32_to_cpu(resp->pmodes[i].r.width), 539 le32_to_cpu(resp->pmodes[i].r.height), 540 le32_to_cpu(resp->pmodes[i].r.x), 541 le32_to_cpu(resp->pmodes[i].r.y)); 542 } else { 543 DRM_DEBUG("output %d: disabled", i); 544 } 545 } 546 547 vgdev->display_info_pending = false; 548 spin_unlock(&vgdev->display_info_lock); 549 wake_up(&vgdev->resp_wq); 550 551 if (!drm_helper_hpd_irq_event(vgdev->ddev)) 552 drm_kms_helper_hotplug_event(vgdev->ddev); 553 } 554 555 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, 556 struct virtio_gpu_vbuffer *vbuf) 557 { 558 struct virtio_gpu_get_capset_info *cmd = 559 (struct virtio_gpu_get_capset_info *)vbuf->buf; 560 struct virtio_gpu_resp_capset_info *resp = 561 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf; 562 int i = le32_to_cpu(cmd->capset_index); 563 564 spin_lock(&vgdev->display_info_lock); 565 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); 566 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); 567 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); 568 spin_unlock(&vgdev->display_info_lock); 569 wake_up(&vgdev->resp_wq); 570 } 571 572 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, 573 struct virtio_gpu_vbuffer *vbuf) 574 { 575 struct virtio_gpu_get_capset *cmd = 576 (struct virtio_gpu_get_capset *)vbuf->buf; 577 struct virtio_gpu_resp_capset *resp = 578 (struct virtio_gpu_resp_capset *)vbuf->resp_buf; 579 struct virtio_gpu_drv_cap_cache *cache_ent; 580 581 spin_lock(&vgdev->display_info_lock); 582 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { 583 if (cache_ent->version == le32_to_cpu(cmd->capset_version) && 584 cache_ent->id == le32_to_cpu(cmd->capset_id)) { 585 memcpy(cache_ent->caps_cache, resp->capset_data, 586 cache_ent->size); 587 atomic_set(&cache_ent->is_valid, 1); 588 break; 589 } 590 } 591 spin_unlock(&vgdev->display_info_lock); 592 wake_up(&vgdev->resp_wq); 593 } 594 595 static int virtio_get_edid_block(void *data, u8 *buf, 596 unsigned int block, size_t len) 597 { 598 struct virtio_gpu_resp_edid *resp = data; 599 size_t start = block * EDID_LENGTH; 600 601 if (start + len > le32_to_cpu(resp->size)) 602 return -1; 603 memcpy(buf, resp->edid + start, len); 604 return 0; 605 } 606 607 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, 608 struct virtio_gpu_vbuffer *vbuf) 609 { 610 struct virtio_gpu_cmd_get_edid *cmd = 611 (struct virtio_gpu_cmd_get_edid *)vbuf->buf; 612 struct virtio_gpu_resp_edid *resp = 613 (struct virtio_gpu_resp_edid *)vbuf->resp_buf; 614 uint32_t scanout = le32_to_cpu(cmd->scanout); 615 struct virtio_gpu_output *output; 616 struct edid *new_edid, *old_edid; 617 618 if (scanout >= vgdev->num_scanouts) 619 return; 620 output = vgdev->outputs + scanout; 621 622 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); 623 624 spin_lock(&vgdev->display_info_lock); 625 old_edid = output->edid; 626 output->edid = new_edid; 627 drm_connector_update_edid_property(&output->conn, output->edid); 628 spin_unlock(&vgdev->display_info_lock); 629 630 kfree(old_edid); 631 wake_up(&vgdev->resp_wq); 632 } 633 634 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) 635 { 636 struct virtio_gpu_ctrl_hdr *cmd_p; 637 struct virtio_gpu_vbuffer *vbuf; 638 void *resp_buf; 639 640 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info), 641 GFP_KERNEL); 642 if (!resp_buf) 643 return -ENOMEM; 644 645 cmd_p = virtio_gpu_alloc_cmd_resp 646 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf, 647 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info), 648 resp_buf); 649 memset(cmd_p, 0, sizeof(*cmd_p)); 650 651 vgdev->display_info_pending = true; 652 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); 653 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 654 return 0; 655 } 656 657 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx) 658 { 659 struct virtio_gpu_get_capset_info *cmd_p; 660 struct virtio_gpu_vbuffer *vbuf; 661 void *resp_buf; 662 663 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info), 664 GFP_KERNEL); 665 if (!resp_buf) 666 return -ENOMEM; 667 668 cmd_p = virtio_gpu_alloc_cmd_resp 669 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf, 670 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info), 671 resp_buf); 672 memset(cmd_p, 0, sizeof(*cmd_p)); 673 674 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO); 675 cmd_p->capset_index = cpu_to_le32(idx); 676 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 677 return 0; 678 } 679 680 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, 681 int idx, int version, 682 struct virtio_gpu_drv_cap_cache **cache_p) 683 { 684 struct virtio_gpu_get_capset *cmd_p; 685 struct virtio_gpu_vbuffer *vbuf; 686 int max_size; 687 struct virtio_gpu_drv_cap_cache *cache_ent; 688 void *resp_buf; 689 690 if (idx >= vgdev->num_capsets) 691 return -EINVAL; 692 693 if (version > vgdev->capsets[idx].max_version) 694 return -EINVAL; 695 696 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL); 697 if (!cache_ent) 698 return -ENOMEM; 699 700 max_size = vgdev->capsets[idx].max_size; 701 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL); 702 if (!cache_ent->caps_cache) { 703 kfree(cache_ent); 704 return -ENOMEM; 705 } 706 707 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size, 708 GFP_KERNEL); 709 if (!resp_buf) { 710 kfree(cache_ent->caps_cache); 711 kfree(cache_ent); 712 return -ENOMEM; 713 } 714 715 cache_ent->version = version; 716 cache_ent->id = vgdev->capsets[idx].id; 717 atomic_set(&cache_ent->is_valid, 0); 718 cache_ent->size = max_size; 719 spin_lock(&vgdev->display_info_lock); 720 list_add_tail(&cache_ent->head, &vgdev->cap_cache); 721 spin_unlock(&vgdev->display_info_lock); 722 723 cmd_p = virtio_gpu_alloc_cmd_resp 724 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p), 725 sizeof(struct virtio_gpu_resp_capset) + max_size, 726 resp_buf); 727 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET); 728 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id); 729 cmd_p->capset_version = cpu_to_le32(version); 730 *cache_p = cache_ent; 731 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 732 733 return 0; 734 } 735 736 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev) 737 { 738 struct virtio_gpu_cmd_get_edid *cmd_p; 739 struct virtio_gpu_vbuffer *vbuf; 740 void *resp_buf; 741 int scanout; 742 743 if (WARN_ON(!vgdev->has_edid)) 744 return -EINVAL; 745 746 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) { 747 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid), 748 GFP_KERNEL); 749 if (!resp_buf) 750 return -ENOMEM; 751 752 cmd_p = virtio_gpu_alloc_cmd_resp 753 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf, 754 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid), 755 resp_buf); 756 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID); 757 cmd_p->scanout = cpu_to_le32(scanout); 758 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 759 } 760 761 return 0; 762 } 763 764 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, 765 uint32_t nlen, const char *name) 766 { 767 struct virtio_gpu_ctx_create *cmd_p; 768 struct virtio_gpu_vbuffer *vbuf; 769 770 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 771 memset(cmd_p, 0, sizeof(*cmd_p)); 772 773 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE); 774 cmd_p->hdr.ctx_id = cpu_to_le32(id); 775 cmd_p->nlen = cpu_to_le32(nlen); 776 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1); 777 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0; 778 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 779 } 780 781 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, 782 uint32_t id) 783 { 784 struct virtio_gpu_ctx_destroy *cmd_p; 785 struct virtio_gpu_vbuffer *vbuf; 786 787 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 788 memset(cmd_p, 0, sizeof(*cmd_p)); 789 790 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY); 791 cmd_p->hdr.ctx_id = cpu_to_le32(id); 792 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 793 } 794 795 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, 796 uint32_t ctx_id, 797 uint32_t resource_id) 798 { 799 struct virtio_gpu_ctx_resource *cmd_p; 800 struct virtio_gpu_vbuffer *vbuf; 801 802 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 803 memset(cmd_p, 0, sizeof(*cmd_p)); 804 805 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); 806 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 807 cmd_p->resource_id = cpu_to_le32(resource_id); 808 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 809 810 } 811 812 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, 813 uint32_t ctx_id, 814 uint32_t resource_id) 815 { 816 struct virtio_gpu_ctx_resource *cmd_p; 817 struct virtio_gpu_vbuffer *vbuf; 818 819 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 820 memset(cmd_p, 0, sizeof(*cmd_p)); 821 822 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); 823 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 824 cmd_p->resource_id = cpu_to_le32(resource_id); 825 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 826 } 827 828 void 829 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, 830 struct virtio_gpu_object *bo, 831 struct virtio_gpu_resource_create_3d *rc_3d) 832 { 833 struct virtio_gpu_resource_create_3d *cmd_p; 834 struct virtio_gpu_vbuffer *vbuf; 835 836 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 837 memset(cmd_p, 0, sizeof(*cmd_p)); 838 839 *cmd_p = *rc_3d; 840 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); 841 cmd_p->hdr.flags = 0; 842 843 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 844 bo->created = true; 845 } 846 847 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, 848 struct virtio_gpu_object *bo, 849 uint32_t ctx_id, 850 uint64_t offset, uint32_t level, 851 struct virtio_gpu_box *box, 852 struct virtio_gpu_fence *fence) 853 { 854 struct virtio_gpu_transfer_host_3d *cmd_p; 855 struct virtio_gpu_vbuffer *vbuf; 856 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 857 858 if (use_dma_api) 859 dma_sync_sg_for_device(vgdev->vdev->dev.parent, 860 bo->pages->sgl, bo->pages->nents, 861 DMA_TO_DEVICE); 862 863 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 864 memset(cmd_p, 0, sizeof(*cmd_p)); 865 866 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); 867 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 868 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); 869 cmd_p->box = *box; 870 cmd_p->offset = cpu_to_le64(offset); 871 cmd_p->level = cpu_to_le32(level); 872 873 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 874 } 875 876 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, 877 uint32_t resource_id, uint32_t ctx_id, 878 uint64_t offset, uint32_t level, 879 struct virtio_gpu_box *box, 880 struct virtio_gpu_fence *fence) 881 { 882 struct virtio_gpu_transfer_host_3d *cmd_p; 883 struct virtio_gpu_vbuffer *vbuf; 884 885 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 886 memset(cmd_p, 0, sizeof(*cmd_p)); 887 888 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); 889 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 890 cmd_p->resource_id = cpu_to_le32(resource_id); 891 cmd_p->box = *box; 892 cmd_p->offset = cpu_to_le64(offset); 893 cmd_p->level = cpu_to_le32(level); 894 895 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 896 } 897 898 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, 899 void *data, uint32_t data_size, 900 uint32_t ctx_id, struct virtio_gpu_fence *fence) 901 { 902 struct virtio_gpu_cmd_submit *cmd_p; 903 struct virtio_gpu_vbuffer *vbuf; 904 905 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 906 memset(cmd_p, 0, sizeof(*cmd_p)); 907 908 vbuf->data_buf = data; 909 vbuf->data_size = data_size; 910 911 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); 912 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 913 cmd_p->size = cpu_to_le32(data_size); 914 915 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 916 } 917 918 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, 919 struct virtio_gpu_object *obj, 920 struct virtio_gpu_fence *fence) 921 { 922 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 923 struct virtio_gpu_mem_entry *ents; 924 struct scatterlist *sg; 925 int si, nents; 926 927 if (!obj->created) 928 return 0; 929 930 if (!obj->pages) { 931 int ret; 932 933 ret = virtio_gpu_object_get_sg_table(vgdev, obj); 934 if (ret) 935 return ret; 936 } 937 938 if (use_dma_api) { 939 obj->mapped = dma_map_sg(vgdev->vdev->dev.parent, 940 obj->pages->sgl, obj->pages->nents, 941 DMA_TO_DEVICE); 942 nents = obj->mapped; 943 } else { 944 nents = obj->pages->nents; 945 } 946 947 /* gets freed when the ring has consumed it */ 948 ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry), 949 GFP_KERNEL); 950 if (!ents) { 951 DRM_ERROR("failed to allocate ent list\n"); 952 return -ENOMEM; 953 } 954 955 for_each_sg(obj->pages->sgl, sg, nents, si) { 956 ents[si].addr = cpu_to_le64(use_dma_api 957 ? sg_dma_address(sg) 958 : sg_phys(sg)); 959 ents[si].length = cpu_to_le32(sg->length); 960 ents[si].padding = 0; 961 } 962 963 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, 964 ents, nents, 965 fence); 966 return 0; 967 } 968 969 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, 970 struct virtio_gpu_object *obj) 971 { 972 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 973 974 if (use_dma_api && obj->mapped) { 975 struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); 976 /* detach backing and wait for the host process it ... */ 977 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence); 978 dma_fence_wait(&fence->f, true); 979 dma_fence_put(&fence->f); 980 981 /* ... then tear down iommu mappings */ 982 dma_unmap_sg(vgdev->vdev->dev.parent, 983 obj->pages->sgl, obj->mapped, 984 DMA_TO_DEVICE); 985 obj->mapped = 0; 986 } else { 987 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL); 988 } 989 } 990 991 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, 992 struct virtio_gpu_output *output) 993 { 994 struct virtio_gpu_vbuffer *vbuf; 995 struct virtio_gpu_update_cursor *cur_p; 996 997 output->cursor.pos.scanout_id = cpu_to_le32(output->index); 998 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf); 999 memcpy(cur_p, &output->cursor, sizeof(output->cursor)); 1000 virtio_gpu_queue_cursor(vgdev, vbuf); 1001 } 1002