1 /* 2 * Virtio vhost-user GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2018 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * Marc-André Lureau <marcandre.lureau@redhat.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2 or later. 12 * See the COPYING file in the top-level directory. 13 */ 14 #include "qemu/osdep.h" 15 #include "qemu/drm.h" 16 #include "qapi/error.h" 17 #include "qemu/sockets.h" 18 19 #include <pixman.h> 20 #include <glib-unix.h> 21 22 #include "vugpu.h" 23 #include "hw/virtio/virtio-gpu-bswap.h" 24 #include "hw/virtio/virtio-gpu-pixman.h" 25 #include "virgl.h" 26 #include "vugbm.h" 27 28 enum { 29 VHOST_USER_GPU_MAX_QUEUES = 2, 30 }; 31 32 struct virtio_gpu_simple_resource { 33 uint32_t resource_id; 34 uint32_t width; 35 uint32_t height; 36 uint32_t format; 37 struct iovec *iov; 38 unsigned int iov_cnt; 39 uint32_t scanout_bitmask; 40 pixman_image_t *image; 41 struct vugbm_buffer buffer; 42 QTAILQ_ENTRY(virtio_gpu_simple_resource) next; 43 }; 44 45 static gboolean opt_print_caps; 46 static int opt_fdnum = -1; 47 static char *opt_socket_path; 48 static char *opt_render_node; 49 static gboolean opt_virgl; 50 51 static void vg_handle_ctrl(VuDev *dev, int qidx); 52 static void vg_cleanup_mapping(VuGpu *g, 53 struct virtio_gpu_simple_resource *res); 54 55 static const char * 56 vg_cmd_to_string(int cmd) 57 { 58 #define CMD(cmd) [cmd] = #cmd 59 static const char *vg_cmd_str[] = { 60 CMD(VIRTIO_GPU_UNDEFINED), 61 62 /* 2d commands */ 63 CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO), 64 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D), 65 CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF), 66 CMD(VIRTIO_GPU_CMD_SET_SCANOUT), 67 CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH), 68 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D), 69 CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING), 70 CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING), 71 CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO), 72 CMD(VIRTIO_GPU_CMD_GET_CAPSET), 73 74 /* 3d commands */ 75 CMD(VIRTIO_GPU_CMD_CTX_CREATE), 76 CMD(VIRTIO_GPU_CMD_CTX_DESTROY), 77 CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE), 78 CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE), 79 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D), 80 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D), 81 CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D), 82 CMD(VIRTIO_GPU_CMD_SUBMIT_3D), 83 84 /* cursor commands */ 85 CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR), 86 CMD(VIRTIO_GPU_CMD_MOVE_CURSOR), 87 }; 88 #undef REQ 89 90 if (cmd >= 0 && cmd < G_N_ELEMENTS(vg_cmd_str)) { 91 return vg_cmd_str[cmd]; 92 } else { 93 return "unknown"; 94 } 95 } 96 97 static int 98 vg_sock_fd_read(int sock, void *buf, ssize_t buflen) 99 { 100 int ret; 101 102 do { 103 ret = read(sock, buf, buflen); 104 } while (ret < 0 && (errno == EINTR || errno == EAGAIN)); 105 106 g_warn_if_fail(ret == buflen); 107 return ret; 108 } 109 110 static void 111 vg_sock_fd_close(VuGpu *g) 112 { 113 if (g->sock_fd >= 0) { 114 close(g->sock_fd); 115 g->sock_fd = -1; 116 } 117 } 118 119 static gboolean 120 source_wait_cb(gint fd, GIOCondition condition, gpointer user_data) 121 { 122 VuGpu *g = user_data; 123 124 if (!vg_recv_msg(g, VHOST_USER_GPU_DMABUF_UPDATE, 0, NULL)) { 125 return G_SOURCE_CONTINUE; 126 } 127 128 /* resume */ 129 g->wait_in = 0; 130 vg_handle_ctrl(&g->dev.parent, 0); 131 132 return G_SOURCE_REMOVE; 133 } 134 135 void 136 vg_wait_ok(VuGpu *g) 137 { 138 assert(g->wait_in == 0); 139 g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP, 140 source_wait_cb, g); 141 } 142 143 static int 144 vg_sock_fd_write(int sock, const void *buf, ssize_t buflen, int fd) 145 { 146 ssize_t ret; 147 struct iovec iov = { 148 .iov_base = (void *)buf, 149 .iov_len = buflen, 150 }; 151 struct msghdr msg = { 152 .msg_iov = &iov, 153 .msg_iovlen = 1, 154 }; 155 union { 156 struct cmsghdr cmsghdr; 157 char control[CMSG_SPACE(sizeof(int))]; 158 } cmsgu; 159 struct cmsghdr *cmsg; 160 161 if (fd != -1) { 162 msg.msg_control = cmsgu.control; 163 msg.msg_controllen = sizeof(cmsgu.control); 164 165 cmsg = CMSG_FIRSTHDR(&msg); 166 cmsg->cmsg_len = CMSG_LEN(sizeof(int)); 167 cmsg->cmsg_level = SOL_SOCKET; 168 cmsg->cmsg_type = SCM_RIGHTS; 169 170 *((int *)CMSG_DATA(cmsg)) = fd; 171 } 172 173 do { 174 ret = sendmsg(sock, &msg, 0); 175 } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); 176 177 g_warn_if_fail(ret == buflen); 178 return ret; 179 } 180 181 void 182 vg_send_msg(VuGpu *vg, const VhostUserGpuMsg *msg, int fd) 183 { 184 if (vg_sock_fd_write(vg->sock_fd, msg, 185 VHOST_USER_GPU_HDR_SIZE + msg->size, fd) < 0) { 186 vg_sock_fd_close(vg); 187 } 188 } 189 190 bool 191 vg_recv_msg(VuGpu *g, uint32_t expect_req, uint32_t expect_size, 192 gpointer payload) 193 { 194 uint32_t req, flags, size; 195 196 if (vg_sock_fd_read(g->sock_fd, &req, sizeof(req)) < 0 || 197 vg_sock_fd_read(g->sock_fd, &flags, sizeof(flags)) < 0 || 198 vg_sock_fd_read(g->sock_fd, &size, sizeof(size)) < 0) { 199 goto err; 200 } 201 202 g_return_val_if_fail(req == expect_req, false); 203 g_return_val_if_fail(flags & VHOST_USER_GPU_MSG_FLAG_REPLY, false); 204 g_return_val_if_fail(size == expect_size, false); 205 206 if (size && vg_sock_fd_read(g->sock_fd, payload, size) != size) { 207 goto err; 208 } 209 210 return true; 211 212 err: 213 vg_sock_fd_close(g); 214 return false; 215 } 216 217 static struct virtio_gpu_simple_resource * 218 virtio_gpu_find_resource(VuGpu *g, uint32_t resource_id) 219 { 220 struct virtio_gpu_simple_resource *res; 221 222 QTAILQ_FOREACH(res, &g->reslist, next) { 223 if (res->resource_id == resource_id) { 224 return res; 225 } 226 } 227 return NULL; 228 } 229 230 void 231 vg_ctrl_response(VuGpu *g, 232 struct virtio_gpu_ctrl_command *cmd, 233 struct virtio_gpu_ctrl_hdr *resp, 234 size_t resp_len) 235 { 236 size_t s; 237 238 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 239 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 240 resp->fence_id = cmd->cmd_hdr.fence_id; 241 resp->ctx_id = cmd->cmd_hdr.ctx_id; 242 } 243 virtio_gpu_ctrl_hdr_bswap(resp); 244 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 245 if (s != resp_len) { 246 g_critical("%s: response size incorrect %zu vs %zu", 247 __func__, s, resp_len); 248 } 249 vu_queue_push(&g->dev.parent, cmd->vq, &cmd->elem, s); 250 vu_queue_notify(&g->dev.parent, cmd->vq); 251 cmd->state = VG_CMD_STATE_FINISHED; 252 } 253 254 void 255 vg_ctrl_response_nodata(VuGpu *g, 256 struct virtio_gpu_ctrl_command *cmd, 257 enum virtio_gpu_ctrl_type type) 258 { 259 struct virtio_gpu_ctrl_hdr resp = { 260 .type = type, 261 }; 262 263 vg_ctrl_response(g, cmd, &resp, sizeof(resp)); 264 } 265 266 267 static gboolean 268 get_display_info_cb(gint fd, GIOCondition condition, gpointer user_data) 269 { 270 struct virtio_gpu_resp_display_info dpy_info = { {} }; 271 VuGpu *vg = user_data; 272 struct virtio_gpu_ctrl_command *cmd = QTAILQ_LAST(&vg->fenceq); 273 274 g_debug("disp info cb"); 275 assert(cmd->cmd_hdr.type == VIRTIO_GPU_CMD_GET_DISPLAY_INFO); 276 if (!vg_recv_msg(vg, VHOST_USER_GPU_GET_DISPLAY_INFO, 277 sizeof(dpy_info), &dpy_info)) { 278 return G_SOURCE_CONTINUE; 279 } 280 281 QTAILQ_REMOVE(&vg->fenceq, cmd, next); 282 vg_ctrl_response(vg, cmd, &dpy_info.hdr, sizeof(dpy_info)); 283 284 vg->wait_in = 0; 285 vg_handle_ctrl(&vg->dev.parent, 0); 286 287 return G_SOURCE_REMOVE; 288 } 289 290 void 291 vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd) 292 { 293 VhostUserGpuMsg msg = { 294 .request = VHOST_USER_GPU_GET_DISPLAY_INFO, 295 .size = 0, 296 }; 297 298 assert(vg->wait_in == 0); 299 300 vg_send_msg(vg, &msg, -1); 301 vg->wait_in = g_unix_fd_add(vg->sock_fd, G_IO_IN | G_IO_HUP, 302 get_display_info_cb, vg); 303 cmd->state = VG_CMD_STATE_PENDING; 304 } 305 306 static gboolean 307 get_edid_cb(gint fd, GIOCondition condition, gpointer user_data) 308 { 309 struct virtio_gpu_resp_edid resp_edid; 310 VuGpu *vg = user_data; 311 struct virtio_gpu_ctrl_command *cmd = QTAILQ_LAST(&vg->fenceq); 312 313 g_debug("get edid cb"); 314 assert(cmd->cmd_hdr.type == VIRTIO_GPU_CMD_GET_EDID); 315 if (!vg_recv_msg(vg, VHOST_USER_GPU_GET_EDID, 316 sizeof(resp_edid), &resp_edid)) { 317 return G_SOURCE_CONTINUE; 318 } 319 320 QTAILQ_REMOVE(&vg->fenceq, cmd, next); 321 vg_ctrl_response(vg, cmd, &resp_edid.hdr, sizeof(resp_edid)); 322 323 vg->wait_in = 0; 324 vg_handle_ctrl(&vg->dev.parent, 0); 325 326 return G_SOURCE_REMOVE; 327 } 328 329 void 330 vg_get_edid(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd) 331 { 332 struct virtio_gpu_cmd_get_edid get_edid; 333 334 VUGPU_FILL_CMD(get_edid); 335 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 336 337 VhostUserGpuMsg msg = { 338 .request = VHOST_USER_GPU_GET_EDID, 339 .size = sizeof(VhostUserGpuEdidRequest), 340 .payload.edid_req = { 341 .scanout_id = get_edid.scanout, 342 }, 343 }; 344 345 assert(vg->wait_in == 0); 346 347 vg_send_msg(vg, &msg, -1); 348 vg->wait_in = g_unix_fd_add(vg->sock_fd, G_IO_IN | G_IO_HUP, 349 get_edid_cb, vg); 350 cmd->state = VG_CMD_STATE_PENDING; 351 } 352 353 static void 354 vg_resource_create_2d(VuGpu *g, 355 struct virtio_gpu_ctrl_command *cmd) 356 { 357 pixman_format_code_t pformat; 358 struct virtio_gpu_simple_resource *res; 359 struct virtio_gpu_resource_create_2d c2d; 360 361 VUGPU_FILL_CMD(c2d); 362 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 363 364 if (c2d.resource_id == 0) { 365 g_critical("%s: resource id 0 is not allowed", __func__); 366 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 367 return; 368 } 369 370 res = virtio_gpu_find_resource(g, c2d.resource_id); 371 if (res) { 372 g_critical("%s: resource already exists %d", __func__, c2d.resource_id); 373 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 374 return; 375 } 376 377 res = g_new0(struct virtio_gpu_simple_resource, 1); 378 res->width = c2d.width; 379 res->height = c2d.height; 380 res->format = c2d.format; 381 res->resource_id = c2d.resource_id; 382 383 pformat = virtio_gpu_get_pixman_format(c2d.format); 384 if (!pformat) { 385 g_critical("%s: host couldn't handle guest format %d", 386 __func__, c2d.format); 387 g_free(res); 388 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 389 return; 390 } 391 vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height); 392 res->image = pixman_image_create_bits(pformat, 393 c2d.width, 394 c2d.height, 395 (uint32_t *)res->buffer.mmap, 396 res->buffer.stride); 397 if (!res->image) { 398 g_critical("%s: resource creation failed %d %d %d", 399 __func__, c2d.resource_id, c2d.width, c2d.height); 400 vugbm_buffer_destroy(&res->buffer); 401 g_free(res); 402 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 403 return; 404 } 405 406 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 407 } 408 409 static void 410 vg_disable_scanout(VuGpu *g, int scanout_id) 411 { 412 struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id]; 413 struct virtio_gpu_simple_resource *res; 414 415 if (scanout->resource_id == 0) { 416 return; 417 } 418 419 res = virtio_gpu_find_resource(g, scanout->resource_id); 420 if (res) { 421 res->scanout_bitmask &= ~(1 << scanout_id); 422 } 423 424 scanout->width = 0; 425 scanout->height = 0; 426 427 if (g->sock_fd >= 0) { 428 VhostUserGpuMsg msg = { 429 .request = VHOST_USER_GPU_SCANOUT, 430 .size = sizeof(VhostUserGpuScanout), 431 .payload.scanout.scanout_id = scanout_id, 432 }; 433 vg_send_msg(g, &msg, -1); 434 } 435 } 436 437 static void 438 vg_resource_destroy(VuGpu *g, 439 struct virtio_gpu_simple_resource *res) 440 { 441 int i; 442 443 if (res->scanout_bitmask) { 444 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) { 445 if (res->scanout_bitmask & (1 << i)) { 446 vg_disable_scanout(g, i); 447 } 448 } 449 } 450 451 vugbm_buffer_destroy(&res->buffer); 452 vg_cleanup_mapping(g, res); 453 pixman_image_unref(res->image); 454 QTAILQ_REMOVE(&g->reslist, res, next); 455 g_free(res); 456 } 457 458 static void 459 vg_resource_unref(VuGpu *g, 460 struct virtio_gpu_ctrl_command *cmd) 461 { 462 struct virtio_gpu_simple_resource *res; 463 struct virtio_gpu_resource_unref unref; 464 465 VUGPU_FILL_CMD(unref); 466 virtio_gpu_bswap_32(&unref, sizeof(unref)); 467 468 res = virtio_gpu_find_resource(g, unref.resource_id); 469 if (!res) { 470 g_critical("%s: illegal resource specified %d", 471 __func__, unref.resource_id); 472 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 473 return; 474 } 475 vg_resource_destroy(g, res); 476 } 477 478 int 479 vg_create_mapping_iov(VuGpu *g, 480 struct virtio_gpu_resource_attach_backing *ab, 481 struct virtio_gpu_ctrl_command *cmd, 482 struct iovec **iov) 483 { 484 struct virtio_gpu_mem_entry *ents; 485 size_t esize, s; 486 int i; 487 488 if (ab->nr_entries > 16384) { 489 g_critical("%s: nr_entries is too big (%d > 16384)", 490 __func__, ab->nr_entries); 491 return -1; 492 } 493 494 esize = sizeof(*ents) * ab->nr_entries; 495 ents = g_malloc(esize); 496 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 497 sizeof(*ab), ents, esize); 498 if (s != esize) { 499 g_critical("%s: command data size incorrect %zu vs %zu", 500 __func__, s, esize); 501 g_free(ents); 502 return -1; 503 } 504 505 *iov = g_new0(struct iovec, ab->nr_entries); 506 for (i = 0; i < ab->nr_entries; i++) { 507 uint64_t len = ents[i].length; 508 (*iov)[i].iov_len = ents[i].length; 509 (*iov)[i].iov_base = vu_gpa_to_va(&g->dev.parent, &len, ents[i].addr); 510 if (!(*iov)[i].iov_base || len != ents[i].length) { 511 g_critical("%s: resource %d element %d", 512 __func__, ab->resource_id, i); 513 g_free(*iov); 514 g_free(ents); 515 *iov = NULL; 516 return -1; 517 } 518 } 519 g_free(ents); 520 return 0; 521 } 522 523 static void 524 vg_resource_attach_backing(VuGpu *g, 525 struct virtio_gpu_ctrl_command *cmd) 526 { 527 struct virtio_gpu_simple_resource *res; 528 struct virtio_gpu_resource_attach_backing ab; 529 int ret; 530 531 VUGPU_FILL_CMD(ab); 532 virtio_gpu_bswap_32(&ab, sizeof(ab)); 533 534 res = virtio_gpu_find_resource(g, ab.resource_id); 535 if (!res) { 536 g_critical("%s: illegal resource specified %d", 537 __func__, ab.resource_id); 538 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 539 return; 540 } 541 542 if (res->iov) { 543 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 544 return; 545 } 546 547 ret = vg_create_mapping_iov(g, &ab, cmd, &res->iov); 548 if (ret != 0) { 549 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 550 return; 551 } 552 553 res->iov_cnt = ab.nr_entries; 554 } 555 556 /* Though currently only free iov, maybe later will do more work. */ 557 void vg_cleanup_mapping_iov(VuGpu *g, 558 struct iovec *iov, uint32_t count) 559 { 560 g_free(iov); 561 } 562 563 static void 564 vg_cleanup_mapping(VuGpu *g, 565 struct virtio_gpu_simple_resource *res) 566 { 567 vg_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 568 res->iov = NULL; 569 res->iov_cnt = 0; 570 } 571 572 static void 573 vg_resource_detach_backing(VuGpu *g, 574 struct virtio_gpu_ctrl_command *cmd) 575 { 576 struct virtio_gpu_simple_resource *res; 577 struct virtio_gpu_resource_detach_backing detach; 578 579 VUGPU_FILL_CMD(detach); 580 virtio_gpu_bswap_32(&detach, sizeof(detach)); 581 582 res = virtio_gpu_find_resource(g, detach.resource_id); 583 if (!res || !res->iov) { 584 g_critical("%s: illegal resource specified %d", 585 __func__, detach.resource_id); 586 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 587 return; 588 } 589 590 vg_cleanup_mapping(g, res); 591 } 592 593 static void 594 vg_transfer_to_host_2d(VuGpu *g, 595 struct virtio_gpu_ctrl_command *cmd) 596 { 597 struct virtio_gpu_simple_resource *res; 598 int h; 599 uint32_t src_offset, dst_offset, stride; 600 int bpp; 601 pixman_format_code_t format; 602 struct virtio_gpu_transfer_to_host_2d t2d; 603 604 VUGPU_FILL_CMD(t2d); 605 virtio_gpu_t2d_bswap(&t2d); 606 607 res = virtio_gpu_find_resource(g, t2d.resource_id); 608 if (!res || !res->iov) { 609 g_critical("%s: illegal resource specified %d", 610 __func__, t2d.resource_id); 611 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 612 return; 613 } 614 615 if (t2d.r.x > res->width || 616 t2d.r.y > res->height || 617 t2d.r.width > res->width || 618 t2d.r.height > res->height || 619 t2d.r.x + t2d.r.width > res->width || 620 t2d.r.y + t2d.r.height > res->height) { 621 g_critical("%s: transfer bounds outside resource" 622 " bounds for resource %d: %d %d %d %d vs %d %d", 623 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 624 t2d.r.width, t2d.r.height, res->width, res->height); 625 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 626 return; 627 } 628 629 format = pixman_image_get_format(res->image); 630 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 631 stride = pixman_image_get_stride(res->image); 632 633 if (t2d.offset || t2d.r.x || t2d.r.y || 634 t2d.r.width != pixman_image_get_width(res->image)) { 635 void *img_data = pixman_image_get_data(res->image); 636 for (h = 0; h < t2d.r.height; h++) { 637 src_offset = t2d.offset + stride * h; 638 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 639 640 iov_to_buf(res->iov, res->iov_cnt, src_offset, 641 img_data 642 + dst_offset, t2d.r.width * bpp); 643 } 644 } else { 645 iov_to_buf(res->iov, res->iov_cnt, 0, 646 pixman_image_get_data(res->image), 647 pixman_image_get_stride(res->image) 648 * pixman_image_get_height(res->image)); 649 } 650 } 651 652 static void 653 vg_set_scanout(VuGpu *g, 654 struct virtio_gpu_ctrl_command *cmd) 655 { 656 struct virtio_gpu_simple_resource *res, *ores; 657 struct virtio_gpu_scanout *scanout; 658 struct virtio_gpu_set_scanout ss; 659 int fd; 660 661 VUGPU_FILL_CMD(ss); 662 virtio_gpu_bswap_32(&ss, sizeof(ss)); 663 664 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) { 665 g_critical("%s: illegal scanout id specified %d", 666 __func__, ss.scanout_id); 667 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 668 return; 669 } 670 671 if (ss.resource_id == 0) { 672 vg_disable_scanout(g, ss.scanout_id); 673 return; 674 } 675 676 /* create a surface for this scanout */ 677 res = virtio_gpu_find_resource(g, ss.resource_id); 678 if (!res) { 679 g_critical("%s: illegal resource specified %d", 680 __func__, ss.resource_id); 681 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 682 return; 683 } 684 685 if (ss.r.x > res->width || 686 ss.r.y > res->height || 687 ss.r.width > res->width || 688 ss.r.height > res->height || 689 ss.r.x + ss.r.width > res->width || 690 ss.r.y + ss.r.height > res->height) { 691 g_critical("%s: illegal scanout %d bounds for" 692 " resource %d, (%d,%d)+%d,%d vs %d %d", 693 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 694 ss.r.width, ss.r.height, res->width, res->height); 695 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 696 return; 697 } 698 699 scanout = &g->scanout[ss.scanout_id]; 700 701 ores = virtio_gpu_find_resource(g, scanout->resource_id); 702 if (ores) { 703 ores->scanout_bitmask &= ~(1 << ss.scanout_id); 704 } 705 706 res->scanout_bitmask |= (1 << ss.scanout_id); 707 scanout->resource_id = ss.resource_id; 708 scanout->x = ss.r.x; 709 scanout->y = ss.r.y; 710 scanout->width = ss.r.width; 711 scanout->height = ss.r.height; 712 713 struct vugbm_buffer *buffer = &res->buffer; 714 715 if (vugbm_buffer_can_get_dmabuf_fd(buffer)) { 716 VhostUserGpuMsg msg = { 717 .request = VHOST_USER_GPU_DMABUF_SCANOUT, 718 .size = sizeof(VhostUserGpuDMABUFScanout), 719 .payload.dmabuf_scanout = (VhostUserGpuDMABUFScanout) { 720 .scanout_id = ss.scanout_id, 721 .x = ss.r.x, 722 .y = ss.r.y, 723 .width = ss.r.width, 724 .height = ss.r.height, 725 .fd_width = buffer->width, 726 .fd_height = buffer->height, 727 .fd_stride = buffer->stride, 728 .fd_drm_fourcc = buffer->format 729 } 730 }; 731 732 if (vugbm_buffer_get_dmabuf_fd(buffer, &fd)) { 733 vg_send_msg(g, &msg, fd); 734 close(fd); 735 } 736 } else { 737 VhostUserGpuMsg msg = { 738 .request = VHOST_USER_GPU_SCANOUT, 739 .size = sizeof(VhostUserGpuScanout), 740 .payload.scanout = (VhostUserGpuScanout) { 741 .scanout_id = ss.scanout_id, 742 .width = scanout->width, 743 .height = scanout->height 744 } 745 }; 746 vg_send_msg(g, &msg, -1); 747 } 748 } 749 750 static void 751 vg_resource_flush(VuGpu *g, 752 struct virtio_gpu_ctrl_command *cmd) 753 { 754 struct virtio_gpu_simple_resource *res; 755 struct virtio_gpu_resource_flush rf; 756 pixman_region16_t flush_region; 757 int i; 758 759 VUGPU_FILL_CMD(rf); 760 virtio_gpu_bswap_32(&rf, sizeof(rf)); 761 762 res = virtio_gpu_find_resource(g, rf.resource_id); 763 if (!res) { 764 g_critical("%s: illegal resource specified %d\n", 765 __func__, rf.resource_id); 766 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 767 return; 768 } 769 770 if (rf.r.x > res->width || 771 rf.r.y > res->height || 772 rf.r.width > res->width || 773 rf.r.height > res->height || 774 rf.r.x + rf.r.width > res->width || 775 rf.r.y + rf.r.height > res->height) { 776 g_critical("%s: flush bounds outside resource" 777 " bounds for resource %d: %d %d %d %d vs %d %d\n", 778 __func__, rf.resource_id, rf.r.x, rf.r.y, 779 rf.r.width, rf.r.height, res->width, res->height); 780 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 781 return; 782 } 783 784 pixman_region_init_rect(&flush_region, 785 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 786 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) { 787 struct virtio_gpu_scanout *scanout; 788 pixman_region16_t region, finalregion; 789 pixman_box16_t *extents; 790 791 if (!(res->scanout_bitmask & (1 << i))) { 792 continue; 793 } 794 scanout = &g->scanout[i]; 795 796 pixman_region_init(&finalregion); 797 pixman_region_init_rect(®ion, scanout->x, scanout->y, 798 scanout->width, scanout->height); 799 800 pixman_region_intersect(&finalregion, &flush_region, ®ion); 801 802 extents = pixman_region_extents(&finalregion); 803 size_t width = extents->x2 - extents->x1; 804 size_t height = extents->y2 - extents->y1; 805 806 if (vugbm_buffer_can_get_dmabuf_fd(&res->buffer)) { 807 VhostUserGpuMsg vmsg = { 808 .request = VHOST_USER_GPU_DMABUF_UPDATE, 809 .size = sizeof(VhostUserGpuUpdate), 810 .payload.update = (VhostUserGpuUpdate) { 811 .scanout_id = i, 812 .x = extents->x1, 813 .y = extents->y1, 814 .width = width, 815 .height = height, 816 } 817 }; 818 vg_send_msg(g, &vmsg, -1); 819 vg_wait_ok(g); 820 } else { 821 size_t bpp = 822 PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) / 8; 823 size_t size = width * height * bpp; 824 825 void *p = g_malloc(VHOST_USER_GPU_HDR_SIZE + 826 sizeof(VhostUserGpuUpdate) + size); 827 VhostUserGpuMsg *msg = p; 828 msg->request = VHOST_USER_GPU_UPDATE; 829 msg->size = sizeof(VhostUserGpuUpdate) + size; 830 msg->payload.update = (VhostUserGpuUpdate) { 831 .scanout_id = i, 832 .x = extents->x1, 833 .y = extents->y1, 834 .width = width, 835 .height = height, 836 }; 837 pixman_image_t *i = 838 pixman_image_create_bits(pixman_image_get_format(res->image), 839 msg->payload.update.width, 840 msg->payload.update.height, 841 p + offsetof(VhostUserGpuMsg, 842 payload.update.data), 843 width * bpp); 844 pixman_image_composite(PIXMAN_OP_SRC, 845 res->image, NULL, i, 846 extents->x1, extents->y1, 847 0, 0, 0, 0, 848 width, height); 849 pixman_image_unref(i); 850 vg_send_msg(g, msg, -1); 851 g_free(msg); 852 } 853 pixman_region_fini(®ion); 854 pixman_region_fini(&finalregion); 855 } 856 pixman_region_fini(&flush_region); 857 } 858 859 static void 860 vg_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd) 861 { 862 switch (cmd->cmd_hdr.type) { 863 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 864 vg_get_display_info(vg, cmd); 865 break; 866 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 867 vg_resource_create_2d(vg, cmd); 868 break; 869 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 870 vg_resource_unref(vg, cmd); 871 break; 872 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 873 vg_resource_flush(vg, cmd); 874 break; 875 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 876 vg_transfer_to_host_2d(vg, cmd); 877 break; 878 case VIRTIO_GPU_CMD_SET_SCANOUT: 879 vg_set_scanout(vg, cmd); 880 break; 881 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 882 vg_resource_attach_backing(vg, cmd); 883 break; 884 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 885 vg_resource_detach_backing(vg, cmd); 886 break; 887 case VIRTIO_GPU_CMD_GET_EDID: 888 vg_get_edid(vg, cmd); 889 break; 890 default: 891 g_warning("TODO handle ctrl %x\n", cmd->cmd_hdr.type); 892 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 893 break; 894 } 895 if (cmd->state == VG_CMD_STATE_NEW) { 896 vg_ctrl_response_nodata(vg, cmd, cmd->error ? cmd->error : 897 VIRTIO_GPU_RESP_OK_NODATA); 898 } 899 } 900 901 static void 902 vg_handle_ctrl(VuDev *dev, int qidx) 903 { 904 VuGpu *vg = container_of(dev, VuGpu, dev.parent); 905 VuVirtq *vq = vu_get_queue(dev, qidx); 906 struct virtio_gpu_ctrl_command *cmd = NULL; 907 size_t len; 908 909 for (;;) { 910 if (vg->wait_in != 0) { 911 return; 912 } 913 914 cmd = vu_queue_pop(dev, vq, sizeof(struct virtio_gpu_ctrl_command)); 915 if (!cmd) { 916 break; 917 } 918 cmd->vq = vq; 919 cmd->error = 0; 920 cmd->state = VG_CMD_STATE_NEW; 921 922 len = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 923 0, &cmd->cmd_hdr, sizeof(cmd->cmd_hdr)); 924 if (len != sizeof(cmd->cmd_hdr)) { 925 g_warning("%s: command size incorrect %zu vs %zu\n", 926 __func__, len, sizeof(cmd->cmd_hdr)); 927 } 928 929 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 930 g_debug("%d %s\n", cmd->cmd_hdr.type, 931 vg_cmd_to_string(cmd->cmd_hdr.type)); 932 933 if (vg->virgl) { 934 vg_virgl_process_cmd(vg, cmd); 935 } else { 936 vg_process_cmd(vg, cmd); 937 } 938 939 if (cmd->state != VG_CMD_STATE_FINISHED) { 940 QTAILQ_INSERT_TAIL(&vg->fenceq, cmd, next); 941 vg->inflight++; 942 } else { 943 free(cmd); 944 } 945 } 946 } 947 948 static void 949 update_cursor_data_simple(VuGpu *g, uint32_t resource_id, gpointer data) 950 { 951 struct virtio_gpu_simple_resource *res; 952 953 res = virtio_gpu_find_resource(g, resource_id); 954 g_return_if_fail(res != NULL); 955 g_return_if_fail(pixman_image_get_width(res->image) == 64); 956 g_return_if_fail(pixman_image_get_height(res->image) == 64); 957 g_return_if_fail( 958 PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) == 32); 959 960 memcpy(data, pixman_image_get_data(res->image), 64 * 64 * sizeof(uint32_t)); 961 } 962 963 static void 964 vg_process_cursor_cmd(VuGpu *g, struct virtio_gpu_update_cursor *cursor) 965 { 966 switch (cursor->hdr.type) { 967 case VIRTIO_GPU_CMD_MOVE_CURSOR: { 968 VhostUserGpuMsg msg = { 969 .request = cursor->resource_id ? 970 VHOST_USER_GPU_CURSOR_POS : VHOST_USER_GPU_CURSOR_POS_HIDE, 971 .size = sizeof(VhostUserGpuCursorPos), 972 .payload.cursor_pos = { 973 .scanout_id = cursor->pos.scanout_id, 974 .x = cursor->pos.x, 975 .y = cursor->pos.y, 976 } 977 }; 978 g_debug("%s: move", G_STRFUNC); 979 vg_send_msg(g, &msg, -1); 980 break; 981 } 982 case VIRTIO_GPU_CMD_UPDATE_CURSOR: { 983 VhostUserGpuMsg msg = { 984 .request = VHOST_USER_GPU_CURSOR_UPDATE, 985 .size = sizeof(VhostUserGpuCursorUpdate), 986 .payload.cursor_update = { 987 .pos = { 988 .scanout_id = cursor->pos.scanout_id, 989 .x = cursor->pos.x, 990 .y = cursor->pos.y, 991 }, 992 .hot_x = cursor->hot_x, 993 .hot_y = cursor->hot_y, 994 } 995 }; 996 g_debug("%s: update", G_STRFUNC); 997 if (g->virgl) { 998 vg_virgl_update_cursor_data(g, cursor->resource_id, 999 msg.payload.cursor_update.data); 1000 } else { 1001 update_cursor_data_simple(g, cursor->resource_id, 1002 msg.payload.cursor_update.data); 1003 } 1004 vg_send_msg(g, &msg, -1); 1005 break; 1006 } 1007 default: 1008 g_debug("%s: unknown cmd %d", G_STRFUNC, cursor->hdr.type); 1009 break; 1010 } 1011 } 1012 1013 static void 1014 vg_handle_cursor(VuDev *dev, int qidx) 1015 { 1016 VuGpu *g = container_of(dev, VuGpu, dev.parent); 1017 VuVirtq *vq = vu_get_queue(dev, qidx); 1018 VuVirtqElement *elem; 1019 size_t len; 1020 struct virtio_gpu_update_cursor cursor; 1021 1022 for (;;) { 1023 elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement)); 1024 if (!elem) { 1025 break; 1026 } 1027 g_debug("cursor out:%d in:%d\n", elem->out_num, elem->in_num); 1028 1029 len = iov_to_buf(elem->out_sg, elem->out_num, 1030 0, &cursor, sizeof(cursor)); 1031 if (len != sizeof(cursor)) { 1032 g_warning("%s: cursor size incorrect %zu vs %zu\n", 1033 __func__, len, sizeof(cursor)); 1034 } else { 1035 virtio_gpu_bswap_32(&cursor, sizeof(cursor)); 1036 vg_process_cursor_cmd(g, &cursor); 1037 } 1038 vu_queue_push(dev, vq, elem, 0); 1039 vu_queue_notify(dev, vq); 1040 free(elem); 1041 } 1042 } 1043 1044 static void 1045 vg_panic(VuDev *dev, const char *msg) 1046 { 1047 g_critical("%s\n", msg); 1048 exit(1); 1049 } 1050 1051 static void 1052 vg_queue_set_started(VuDev *dev, int qidx, bool started) 1053 { 1054 VuVirtq *vq = vu_get_queue(dev, qidx); 1055 1056 g_debug("queue started %d:%d\n", qidx, started); 1057 1058 switch (qidx) { 1059 case 0: 1060 vu_set_queue_handler(dev, vq, started ? vg_handle_ctrl : NULL); 1061 break; 1062 case 1: 1063 vu_set_queue_handler(dev, vq, started ? vg_handle_cursor : NULL); 1064 break; 1065 default: 1066 break; 1067 } 1068 } 1069 1070 static gboolean 1071 protocol_features_cb(gint fd, GIOCondition condition, gpointer user_data) 1072 { 1073 const uint64_t protocol_edid = (1 << VHOST_USER_GPU_PROTOCOL_F_EDID); 1074 VuGpu *g = user_data; 1075 uint64_t protocol_features; 1076 VhostUserGpuMsg msg = { 1077 .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES 1078 }; 1079 1080 if (!vg_recv_msg(g, msg.request, 1081 sizeof(protocol_features), &protocol_features)) { 1082 return G_SOURCE_CONTINUE; 1083 } 1084 1085 protocol_features &= protocol_edid; 1086 1087 msg = (VhostUserGpuMsg) { 1088 .request = VHOST_USER_GPU_SET_PROTOCOL_FEATURES, 1089 .size = sizeof(uint64_t), 1090 .payload.u64 = protocol_features, 1091 }; 1092 vg_send_msg(g, &msg, -1); 1093 1094 g->wait_in = 0; 1095 vg_handle_ctrl(&g->dev.parent, 0); 1096 1097 if (g->edid_inited && !(protocol_features & protocol_edid)) { 1098 g_printerr("EDID feature set by the frontend but it does not support " 1099 "the EDID vhost-user-gpu protocol.\n"); 1100 exit(EXIT_FAILURE); 1101 } 1102 1103 return G_SOURCE_REMOVE; 1104 } 1105 1106 static void 1107 set_gpu_protocol_features(VuGpu *g) 1108 { 1109 VhostUserGpuMsg msg = { 1110 .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES, 1111 }; 1112 1113 vg_send_msg(g, &msg, -1); 1114 assert(g->wait_in == 0); 1115 g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP, 1116 protocol_features_cb, g); 1117 } 1118 1119 static int 1120 vg_process_msg(VuDev *dev, VhostUserMsg *msg, int *do_reply) 1121 { 1122 VuGpu *g = container_of(dev, VuGpu, dev.parent); 1123 1124 switch (msg->request) { 1125 case VHOST_USER_GPU_SET_SOCKET: { 1126 g_return_val_if_fail(msg->fd_num == 1, 1); 1127 g_return_val_if_fail(g->sock_fd == -1, 1); 1128 g->sock_fd = msg->fds[0]; 1129 set_gpu_protocol_features(g); 1130 return 1; 1131 } 1132 default: 1133 return 0; 1134 } 1135 1136 return 0; 1137 } 1138 1139 static uint64_t 1140 vg_get_features(VuDev *dev) 1141 { 1142 uint64_t features = 0; 1143 1144 if (opt_virgl) { 1145 features |= 1 << VIRTIO_GPU_F_VIRGL; 1146 } 1147 features |= 1 << VIRTIO_GPU_F_EDID; 1148 1149 return features; 1150 } 1151 1152 static void 1153 vg_set_features(VuDev *dev, uint64_t features) 1154 { 1155 VuGpu *g = container_of(dev, VuGpu, dev.parent); 1156 bool virgl = features & (1 << VIRTIO_GPU_F_VIRGL); 1157 1158 if (virgl && !g->virgl_inited) { 1159 if (!vg_virgl_init(g)) { 1160 vg_panic(dev, "Failed to initialize virgl"); 1161 } 1162 g->virgl_inited = true; 1163 } 1164 1165 g->edid_inited = !!(features & (1 << VIRTIO_GPU_F_EDID)); 1166 1167 g->virgl = virgl; 1168 } 1169 1170 static int 1171 vg_get_config(VuDev *dev, uint8_t *config, uint32_t len) 1172 { 1173 VuGpu *g = container_of(dev, VuGpu, dev.parent); 1174 1175 if (len > sizeof(struct virtio_gpu_config)) { 1176 return -1; 1177 } 1178 1179 if (opt_virgl) { 1180 g->virtio_config.num_capsets = vg_virgl_get_num_capsets(); 1181 } 1182 1183 memcpy(config, &g->virtio_config, len); 1184 1185 return 0; 1186 } 1187 1188 static int 1189 vg_set_config(VuDev *dev, const uint8_t *data, 1190 uint32_t offset, uint32_t size, 1191 uint32_t flags) 1192 { 1193 VuGpu *g = container_of(dev, VuGpu, dev.parent); 1194 struct virtio_gpu_config *config = (struct virtio_gpu_config *)data; 1195 1196 if (config->events_clear) { 1197 g->virtio_config.events_read &= ~config->events_clear; 1198 } 1199 1200 return 0; 1201 } 1202 1203 static const VuDevIface vuiface = { 1204 .set_features = vg_set_features, 1205 .get_features = vg_get_features, 1206 .queue_set_started = vg_queue_set_started, 1207 .process_msg = vg_process_msg, 1208 .get_config = vg_get_config, 1209 .set_config = vg_set_config, 1210 }; 1211 1212 static void 1213 vg_destroy(VuGpu *g) 1214 { 1215 struct virtio_gpu_simple_resource *res, *tmp; 1216 1217 vug_deinit(&g->dev); 1218 1219 vg_sock_fd_close(g); 1220 1221 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1222 vg_resource_destroy(g, res); 1223 } 1224 1225 vugbm_device_destroy(&g->gdev); 1226 } 1227 1228 static GOptionEntry entries[] = { 1229 { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &opt_print_caps, 1230 "Print capabilities", NULL }, 1231 { "fd", 'f', 0, G_OPTION_ARG_INT, &opt_fdnum, 1232 "Use inherited fd socket", "FDNUM" }, 1233 { "socket-path", 's', 0, G_OPTION_ARG_FILENAME, &opt_socket_path, 1234 "Use UNIX socket path", "PATH" }, 1235 { "render-node", 'r', 0, G_OPTION_ARG_FILENAME, &opt_render_node, 1236 "Specify DRM render node", "PATH" }, 1237 { "virgl", 'v', 0, G_OPTION_ARG_NONE, &opt_virgl, 1238 "Turn virgl rendering on", NULL }, 1239 { NULL, } 1240 }; 1241 1242 int 1243 main(int argc, char *argv[]) 1244 { 1245 GOptionContext *context; 1246 GError *error = NULL; 1247 GMainLoop *loop = NULL; 1248 int fd; 1249 VuGpu g = { .sock_fd = -1, .drm_rnode_fd = -1 }; 1250 1251 QTAILQ_INIT(&g.reslist); 1252 QTAILQ_INIT(&g.fenceq); 1253 1254 context = g_option_context_new("QEMU vhost-user-gpu"); 1255 g_option_context_add_main_entries(context, entries, NULL); 1256 if (!g_option_context_parse(context, &argc, &argv, &error)) { 1257 g_printerr("Option parsing failed: %s\n", error->message); 1258 exit(EXIT_FAILURE); 1259 } 1260 g_option_context_free(context); 1261 1262 if (opt_print_caps) { 1263 g_print("{\n"); 1264 g_print(" \"type\": \"gpu\",\n"); 1265 g_print(" \"features\": [\n"); 1266 g_print(" \"render-node\",\n"); 1267 g_print(" \"virgl\"\n"); 1268 g_print(" ]\n"); 1269 g_print("}\n"); 1270 exit(EXIT_SUCCESS); 1271 } 1272 1273 g.drm_rnode_fd = qemu_drm_rendernode_open(opt_render_node); 1274 if (opt_render_node && g.drm_rnode_fd == -1) { 1275 g_printerr("Failed to open DRM rendernode.\n"); 1276 exit(EXIT_FAILURE); 1277 } 1278 1279 vugbm_device_init(&g.gdev, g.drm_rnode_fd); 1280 1281 if ((!!opt_socket_path + (opt_fdnum != -1)) != 1) { 1282 g_printerr("Please specify either --fd or --socket-path\n"); 1283 exit(EXIT_FAILURE); 1284 } 1285 1286 if (opt_socket_path) { 1287 int lsock = unix_listen(opt_socket_path, &error_fatal); 1288 if (lsock < 0) { 1289 g_printerr("Failed to listen on %s.\n", opt_socket_path); 1290 exit(EXIT_FAILURE); 1291 } 1292 fd = accept(lsock, NULL, NULL); 1293 close(lsock); 1294 } else { 1295 fd = opt_fdnum; 1296 } 1297 if (fd == -1) { 1298 g_printerr("Invalid vhost-user socket.\n"); 1299 exit(EXIT_FAILURE); 1300 } 1301 1302 if (!vug_init(&g.dev, VHOST_USER_GPU_MAX_QUEUES, fd, vg_panic, &vuiface)) { 1303 g_printerr("Failed to initialize libvhost-user-glib.\n"); 1304 exit(EXIT_FAILURE); 1305 } 1306 1307 loop = g_main_loop_new(NULL, FALSE); 1308 g_main_loop_run(loop); 1309 g_main_loop_unref(loop); 1310 1311 vg_destroy(&g); 1312 if (g.drm_rnode_fd >= 0) { 1313 close(g.drm_rnode_fd); 1314 } 1315 1316 return 0; 1317 } 1318