1 /* 2 * Virtio vhost-user GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2018 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * Marc-André Lureau <marcandre.lureau@redhat.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2 or later. 12 * See the COPYING file in the top-level directory. 13 */ 14 15 #include "qemu/osdep.h" 16 #include <virglrenderer.h> 17 #include "virgl.h" 18 19 void 20 vg_virgl_update_cursor_data(VuGpu *g, uint32_t resource_id, 21 gpointer data) 22 { 23 uint32_t width, height; 24 uint32_t *cursor; 25 26 cursor = virgl_renderer_get_cursor_data(resource_id, &width, &height); 27 g_return_if_fail(cursor != NULL); 28 g_return_if_fail(width == 64); 29 g_return_if_fail(height == 64); 30 31 memcpy(data, cursor, 64 * 64 * sizeof(uint32_t)); 32 free(cursor); 33 } 34 35 static void 36 virgl_cmd_context_create(VuGpu *g, 37 struct virtio_gpu_ctrl_command *cmd) 38 { 39 struct virtio_gpu_ctx_create cc; 40 41 VUGPU_FILL_CMD(cc); 42 43 virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, 44 cc.debug_name); 45 } 46 47 static void 48 virgl_cmd_context_destroy(VuGpu *g, 49 struct virtio_gpu_ctrl_command *cmd) 50 { 51 struct virtio_gpu_ctx_destroy cd; 52 53 VUGPU_FILL_CMD(cd); 54 55 virgl_renderer_context_destroy(cd.hdr.ctx_id); 56 } 57 58 static void 59 virgl_cmd_create_resource_2d(VuGpu *g, 60 struct virtio_gpu_ctrl_command *cmd) 61 { 62 struct virtio_gpu_resource_create_2d c2d; 63 struct virgl_renderer_resource_create_args args; 64 65 VUGPU_FILL_CMD(c2d); 66 67 args.handle = c2d.resource_id; 68 args.target = 2; 69 args.format = c2d.format; 70 args.bind = (1 << 1); 71 args.width = c2d.width; 72 args.height = c2d.height; 73 args.depth = 1; 74 args.array_size = 1; 75 args.last_level = 0; 76 args.nr_samples = 0; 77 args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP; 78 virgl_renderer_resource_create(&args, NULL, 0); 79 } 80 81 static void 82 virgl_cmd_create_resource_3d(VuGpu *g, 83 struct virtio_gpu_ctrl_command *cmd) 84 { 85 struct virtio_gpu_resource_create_3d c3d; 86 struct virgl_renderer_resource_create_args args; 87 88 VUGPU_FILL_CMD(c3d); 89 90 args.handle = c3d.resource_id; 91 args.target = c3d.target; 92 args.format = c3d.format; 93 args.bind = c3d.bind; 94 args.width = c3d.width; 95 args.height = c3d.height; 96 args.depth = c3d.depth; 97 args.array_size = c3d.array_size; 98 args.last_level = c3d.last_level; 99 args.nr_samples = c3d.nr_samples; 100 args.flags = c3d.flags; 101 virgl_renderer_resource_create(&args, NULL, 0); 102 } 103 104 static void 105 virgl_cmd_resource_unref(VuGpu *g, 106 struct virtio_gpu_ctrl_command *cmd) 107 { 108 struct virtio_gpu_resource_unref unref; 109 110 VUGPU_FILL_CMD(unref); 111 112 virgl_renderer_resource_unref(unref.resource_id); 113 } 114 115 /* Not yet(?) defined in standard-headers, remove when possible */ 116 #ifndef VIRTIO_GPU_CAPSET_VIRGL2 117 #define VIRTIO_GPU_CAPSET_VIRGL2 2 118 #endif 119 120 static void 121 virgl_cmd_get_capset_info(VuGpu *g, 122 struct virtio_gpu_ctrl_command *cmd) 123 { 124 struct virtio_gpu_get_capset_info info; 125 struct virtio_gpu_resp_capset_info resp; 126 127 VUGPU_FILL_CMD(info); 128 129 if (info.capset_index == 0) { 130 resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; 131 virgl_renderer_get_cap_set(resp.capset_id, 132 &resp.capset_max_version, 133 &resp.capset_max_size); 134 } else if (info.capset_index == 1) { 135 resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2; 136 virgl_renderer_get_cap_set(resp.capset_id, 137 &resp.capset_max_version, 138 &resp.capset_max_size); 139 } else { 140 resp.capset_max_version = 0; 141 resp.capset_max_size = 0; 142 } 143 resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; 144 vg_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); 145 } 146 147 uint32_t 148 vg_virgl_get_num_capsets(void) 149 { 150 uint32_t capset2_max_ver, capset2_max_size; 151 virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2, 152 &capset2_max_ver, 153 &capset2_max_size); 154 155 return capset2_max_ver ? 2 : 1; 156 } 157 158 static void 159 virgl_cmd_get_capset(VuGpu *g, 160 struct virtio_gpu_ctrl_command *cmd) 161 { 162 struct virtio_gpu_get_capset gc; 163 struct virtio_gpu_resp_capset *resp; 164 uint32_t max_ver, max_size; 165 166 VUGPU_FILL_CMD(gc); 167 168 virgl_renderer_get_cap_set(gc.capset_id, &max_ver, 169 &max_size); 170 resp = g_malloc0(sizeof(*resp) + max_size); 171 172 resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET; 173 virgl_renderer_fill_caps(gc.capset_id, 174 gc.capset_version, 175 (void *)resp->capset_data); 176 vg_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size); 177 g_free(resp); 178 } 179 180 static void 181 virgl_cmd_submit_3d(VuGpu *g, 182 struct virtio_gpu_ctrl_command *cmd) 183 { 184 struct virtio_gpu_cmd_submit cs; 185 void *buf; 186 size_t s; 187 188 VUGPU_FILL_CMD(cs); 189 190 buf = g_malloc(cs.size); 191 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 192 sizeof(cs), buf, cs.size); 193 if (s != cs.size) { 194 g_critical("%s: size mismatch (%zd/%d)", __func__, s, cs.size); 195 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 196 goto out; 197 } 198 199 virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4); 200 201 out: 202 g_free(buf); 203 } 204 205 static void 206 virgl_cmd_transfer_to_host_2d(VuGpu *g, 207 struct virtio_gpu_ctrl_command *cmd) 208 { 209 struct virtio_gpu_transfer_to_host_2d t2d; 210 struct virtio_gpu_box box; 211 212 VUGPU_FILL_CMD(t2d); 213 214 box.x = t2d.r.x; 215 box.y = t2d.r.y; 216 box.z = 0; 217 box.w = t2d.r.width; 218 box.h = t2d.r.height; 219 box.d = 1; 220 221 virgl_renderer_transfer_write_iov(t2d.resource_id, 222 0, 223 0, 224 0, 225 0, 226 (struct virgl_box *)&box, 227 t2d.offset, NULL, 0); 228 } 229 230 static void 231 virgl_cmd_transfer_to_host_3d(VuGpu *g, 232 struct virtio_gpu_ctrl_command *cmd) 233 { 234 struct virtio_gpu_transfer_host_3d t3d; 235 236 VUGPU_FILL_CMD(t3d); 237 238 virgl_renderer_transfer_write_iov(t3d.resource_id, 239 t3d.hdr.ctx_id, 240 t3d.level, 241 t3d.stride, 242 t3d.layer_stride, 243 (struct virgl_box *)&t3d.box, 244 t3d.offset, NULL, 0); 245 } 246 247 static void 248 virgl_cmd_transfer_from_host_3d(VuGpu *g, 249 struct virtio_gpu_ctrl_command *cmd) 250 { 251 struct virtio_gpu_transfer_host_3d tf3d; 252 253 VUGPU_FILL_CMD(tf3d); 254 255 virgl_renderer_transfer_read_iov(tf3d.resource_id, 256 tf3d.hdr.ctx_id, 257 tf3d.level, 258 tf3d.stride, 259 tf3d.layer_stride, 260 (struct virgl_box *)&tf3d.box, 261 tf3d.offset, NULL, 0); 262 } 263 264 static void 265 virgl_resource_attach_backing(VuGpu *g, 266 struct virtio_gpu_ctrl_command *cmd) 267 { 268 struct virtio_gpu_resource_attach_backing att_rb; 269 struct iovec *res_iovs; 270 int ret; 271 272 VUGPU_FILL_CMD(att_rb); 273 274 ret = vg_create_mapping_iov(g, &att_rb, cmd, &res_iovs); 275 if (ret != 0) { 276 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 277 return; 278 } 279 280 virgl_renderer_resource_attach_iov(att_rb.resource_id, 281 res_iovs, att_rb.nr_entries); 282 } 283 284 static void 285 virgl_resource_detach_backing(VuGpu *g, 286 struct virtio_gpu_ctrl_command *cmd) 287 { 288 struct virtio_gpu_resource_detach_backing detach_rb; 289 struct iovec *res_iovs = NULL; 290 int num_iovs = 0; 291 292 VUGPU_FILL_CMD(detach_rb); 293 294 virgl_renderer_resource_detach_iov(detach_rb.resource_id, 295 &res_iovs, 296 &num_iovs); 297 if (res_iovs == NULL || num_iovs == 0) { 298 return; 299 } 300 g_free(res_iovs); 301 } 302 303 static void 304 virgl_cmd_set_scanout(VuGpu *g, 305 struct virtio_gpu_ctrl_command *cmd) 306 { 307 struct virtio_gpu_set_scanout ss; 308 struct virgl_renderer_resource_info info; 309 int ret; 310 311 VUGPU_FILL_CMD(ss); 312 313 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) { 314 g_critical("%s: illegal scanout id specified %d", 315 __func__, ss.scanout_id); 316 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 317 return; 318 } 319 320 memset(&info, 0, sizeof(info)); 321 322 if (ss.resource_id && ss.r.width && ss.r.height) { 323 ret = virgl_renderer_resource_get_info(ss.resource_id, &info); 324 if (ret == -1) { 325 g_critical("%s: illegal resource specified %d\n", 326 __func__, ss.resource_id); 327 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 328 return; 329 } 330 331 int fd = -1; 332 if (virgl_renderer_get_fd_for_texture(info.tex_id, &fd) < 0) { 333 g_critical("%s: failed to get fd for texture\n", __func__); 334 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 335 return; 336 } 337 assert(fd >= 0); 338 VhostUserGpuMsg msg = { 339 .request = VHOST_USER_GPU_DMABUF_SCANOUT, 340 .size = sizeof(VhostUserGpuDMABUFScanout), 341 .payload.dmabuf_scanout.scanout_id = ss.scanout_id, 342 .payload.dmabuf_scanout.x = ss.r.x, 343 .payload.dmabuf_scanout.y = ss.r.y, 344 .payload.dmabuf_scanout.width = ss.r.width, 345 .payload.dmabuf_scanout.height = ss.r.height, 346 .payload.dmabuf_scanout.fd_width = info.width, 347 .payload.dmabuf_scanout.fd_height = info.height, 348 .payload.dmabuf_scanout.fd_stride = info.stride, 349 .payload.dmabuf_scanout.fd_flags = info.flags, 350 .payload.dmabuf_scanout.fd_drm_fourcc = info.drm_fourcc 351 }; 352 vg_send_msg(g, &msg, fd); 353 close(fd); 354 } else { 355 VhostUserGpuMsg msg = { 356 .request = VHOST_USER_GPU_DMABUF_SCANOUT, 357 .size = sizeof(VhostUserGpuDMABUFScanout), 358 .payload.dmabuf_scanout.scanout_id = ss.scanout_id, 359 }; 360 g_debug("disable scanout"); 361 vg_send_msg(g, &msg, -1); 362 } 363 g->scanout[ss.scanout_id].resource_id = ss.resource_id; 364 } 365 366 static void 367 virgl_cmd_resource_flush(VuGpu *g, 368 struct virtio_gpu_ctrl_command *cmd) 369 { 370 struct virtio_gpu_resource_flush rf; 371 int i; 372 373 VUGPU_FILL_CMD(rf); 374 375 if (!rf.resource_id) { 376 g_debug("bad resource id for flush..?"); 377 return; 378 } 379 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) { 380 if (g->scanout[i].resource_id != rf.resource_id) { 381 continue; 382 } 383 VhostUserGpuMsg msg = { 384 .request = VHOST_USER_GPU_DMABUF_UPDATE, 385 .size = sizeof(VhostUserGpuUpdate), 386 .payload.update.scanout_id = i, 387 .payload.update.x = rf.r.x, 388 .payload.update.y = rf.r.y, 389 .payload.update.width = rf.r.width, 390 .payload.update.height = rf.r.height 391 }; 392 vg_send_msg(g, &msg, -1); 393 vg_wait_ok(g); 394 } 395 } 396 397 static void 398 virgl_cmd_ctx_attach_resource(VuGpu *g, 399 struct virtio_gpu_ctrl_command *cmd) 400 { 401 struct virtio_gpu_ctx_resource att_res; 402 403 VUGPU_FILL_CMD(att_res); 404 405 virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id); 406 } 407 408 static void 409 virgl_cmd_ctx_detach_resource(VuGpu *g, 410 struct virtio_gpu_ctrl_command *cmd) 411 { 412 struct virtio_gpu_ctx_resource det_res; 413 414 VUGPU_FILL_CMD(det_res); 415 416 virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id); 417 } 418 419 void vg_virgl_process_cmd(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) 420 { 421 virgl_renderer_force_ctx_0(); 422 switch (cmd->cmd_hdr.type) { 423 case VIRTIO_GPU_CMD_CTX_CREATE: 424 virgl_cmd_context_create(g, cmd); 425 break; 426 case VIRTIO_GPU_CMD_CTX_DESTROY: 427 virgl_cmd_context_destroy(g, cmd); 428 break; 429 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 430 virgl_cmd_create_resource_2d(g, cmd); 431 break; 432 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D: 433 virgl_cmd_create_resource_3d(g, cmd); 434 break; 435 case VIRTIO_GPU_CMD_SUBMIT_3D: 436 virgl_cmd_submit_3d(g, cmd); 437 break; 438 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 439 virgl_cmd_transfer_to_host_2d(g, cmd); 440 break; 441 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D: 442 virgl_cmd_transfer_to_host_3d(g, cmd); 443 break; 444 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D: 445 virgl_cmd_transfer_from_host_3d(g, cmd); 446 break; 447 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 448 virgl_resource_attach_backing(g, cmd); 449 break; 450 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 451 virgl_resource_detach_backing(g, cmd); 452 break; 453 case VIRTIO_GPU_CMD_SET_SCANOUT: 454 virgl_cmd_set_scanout(g, cmd); 455 break; 456 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 457 virgl_cmd_resource_flush(g, cmd); 458 break; 459 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 460 virgl_cmd_resource_unref(g, cmd); 461 break; 462 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: 463 /* TODO add security */ 464 virgl_cmd_ctx_attach_resource(g, cmd); 465 break; 466 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE: 467 /* TODO add security */ 468 virgl_cmd_ctx_detach_resource(g, cmd); 469 break; 470 case VIRTIO_GPU_CMD_GET_CAPSET_INFO: 471 virgl_cmd_get_capset_info(g, cmd); 472 break; 473 case VIRTIO_GPU_CMD_GET_CAPSET: 474 virgl_cmd_get_capset(g, cmd); 475 break; 476 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 477 vg_get_display_info(g, cmd); 478 break; 479 default: 480 g_debug("TODO handle ctrl %x\n", cmd->cmd_hdr.type); 481 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 482 break; 483 } 484 485 if (cmd->finished) { 486 return; 487 } 488 489 if (cmd->error) { 490 g_warning("%s: ctrl 0x%x, error 0x%x\n", __func__, 491 cmd->cmd_hdr.type, cmd->error); 492 vg_ctrl_response_nodata(g, cmd, cmd->error); 493 return; 494 } 495 496 if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { 497 vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 498 return; 499 } 500 501 g_debug("Creating fence id:%" PRId64 " type:%d", 502 cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); 503 virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); 504 } 505 506 static void 507 virgl_write_fence(void *opaque, uint32_t fence) 508 { 509 VuGpu *g = opaque; 510 struct virtio_gpu_ctrl_command *cmd, *tmp; 511 512 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { 513 /* 514 * the guest can end up emitting fences out of order 515 * so we should check all fenced cmds not just the first one. 516 */ 517 if (cmd->cmd_hdr.fence_id > fence) { 518 continue; 519 } 520 g_debug("FENCE %" PRIu64, cmd->cmd_hdr.fence_id); 521 vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 522 QTAILQ_REMOVE(&g->fenceq, cmd, next); 523 free(cmd); 524 g->inflight--; 525 } 526 } 527 528 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \ 529 VIRGL_RENDERER_CALLBACKS_VERSION >= 2 530 static int 531 virgl_get_drm_fd(void *opaque) 532 { 533 VuGpu *g = opaque; 534 535 return g->drm_rnode_fd; 536 } 537 #endif 538 539 static struct virgl_renderer_callbacks virgl_cbs = { 540 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \ 541 VIRGL_RENDERER_CALLBACKS_VERSION >= 2 542 .get_drm_fd = virgl_get_drm_fd, 543 .version = 2, 544 #else 545 .version = 1, 546 #endif 547 .write_fence = virgl_write_fence, 548 }; 549 550 static void 551 vg_virgl_poll(VuDev *dev, int condition, void *data) 552 { 553 virgl_renderer_poll(); 554 } 555 556 bool 557 vg_virgl_init(VuGpu *g) 558 { 559 int ret; 560 561 if (g->drm_rnode_fd && virgl_cbs.version == 1) { 562 g_warning("virgl will use the default rendernode"); 563 } 564 565 ret = virgl_renderer_init(g, 566 VIRGL_RENDERER_USE_EGL | 567 VIRGL_RENDERER_THREAD_SYNC, 568 &virgl_cbs); 569 if (ret != 0) { 570 return false; 571 } 572 573 ret = virgl_renderer_get_poll_fd(); 574 if (ret != -1) { 575 g->renderer_source = 576 vug_source_new(&g->dev, ret, G_IO_IN, vg_virgl_poll, g); 577 } 578 579 return true; 580 } 581