1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu/iov.h" 17 #include "system/cpus.h" 18 #include "ui/console.h" 19 #include "ui/rect.h" 20 #include "trace.h" 21 #include "system/dma.h" 22 #include "system/system.h" 23 #include "hw/virtio/virtio.h" 24 #include "migration/qemu-file-types.h" 25 #include "hw/virtio/virtio-gpu.h" 26 #include "hw/virtio/virtio-gpu-bswap.h" 27 #include "hw/virtio/virtio-gpu-pixman.h" 28 #include "hw/virtio/virtio-bus.h" 29 #include "hw/qdev-properties.h" 30 #include "qemu/log.h" 31 #include "qemu/memfd.h" 32 #include "qemu/module.h" 33 #include "qapi/error.h" 34 #include "qemu/error-report.h" 35 36 #define VIRTIO_GPU_VM_VERSION 1 37 38 static struct virtio_gpu_simple_resource * 39 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 40 bool require_backing, 41 const char *caller, uint32_t *error); 42 43 static void virtio_gpu_reset_bh(void *opaque); 44 45 void virtio_gpu_update_cursor_data(VirtIOGPU *g, 46 struct virtio_gpu_scanout *s, 47 uint32_t resource_id) 48 { 49 struct virtio_gpu_simple_resource *res; 50 uint32_t pixels; 51 void *data; 52 53 res = virtio_gpu_find_check_resource(g, resource_id, false, 54 __func__, NULL); 55 if (!res) { 56 return; 57 } 58 59 if (res->blob_size) { 60 if (res->blob_size < (s->current_cursor->width * 61 s->current_cursor->height * 4)) { 62 return; 63 } 64 data = res->blob; 65 } else { 66 if (pixman_image_get_width(res->image) != s->current_cursor->width || 67 pixman_image_get_height(res->image) != s->current_cursor->height) { 68 return; 69 } 70 data = pixman_image_get_data(res->image); 71 } 72 73 pixels = s->current_cursor->width * s->current_cursor->height; 74 memcpy(s->current_cursor->data, data, 75 pixels * sizeof(uint32_t)); 76 } 77 78 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 79 { 80 struct virtio_gpu_scanout *s; 81 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 82 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 83 84 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { 85 return; 86 } 87 s = &g->parent_obj.scanout[cursor->pos.scanout_id]; 88 89 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 90 cursor->pos.x, 91 cursor->pos.y, 92 move ? "move" : "update", 93 cursor->resource_id); 94 95 if (!move) { 96 if (!s->current_cursor) { 97 s->current_cursor = cursor_alloc(64, 64); 98 } 99 100 s->current_cursor->hot_x = cursor->hot_x; 101 s->current_cursor->hot_y = cursor->hot_y; 102 103 if (cursor->resource_id > 0) { 104 vgc->update_cursor_data(g, s, cursor->resource_id); 105 } 106 dpy_cursor_define(s->con, s->current_cursor); 107 108 s->cursor = *cursor; 109 } else { 110 s->cursor.pos.x = cursor->pos.x; 111 s->cursor.pos.y = cursor->pos.y; 112 } 113 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, cursor->resource_id); 114 } 115 116 struct virtio_gpu_simple_resource * 117 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 118 { 119 struct virtio_gpu_simple_resource *res; 120 121 QTAILQ_FOREACH(res, &g->reslist, next) { 122 if (res->resource_id == resource_id) { 123 return res; 124 } 125 } 126 return NULL; 127 } 128 129 static struct virtio_gpu_simple_resource * 130 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 131 bool require_backing, 132 const char *caller, uint32_t *error) 133 { 134 struct virtio_gpu_simple_resource *res; 135 136 res = virtio_gpu_find_resource(g, resource_id); 137 if (!res) { 138 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n", 139 caller, resource_id); 140 if (error) { 141 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 142 } 143 return NULL; 144 } 145 146 if (require_backing) { 147 if (!res->iov || (!res->image && !res->blob)) { 148 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n", 149 caller, resource_id); 150 if (error) { 151 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 152 } 153 return NULL; 154 } 155 } 156 157 return res; 158 } 159 160 void virtio_gpu_ctrl_response(VirtIOGPU *g, 161 struct virtio_gpu_ctrl_command *cmd, 162 struct virtio_gpu_ctrl_hdr *resp, 163 size_t resp_len) 164 { 165 size_t s; 166 167 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 168 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 169 resp->fence_id = cmd->cmd_hdr.fence_id; 170 resp->ctx_id = cmd->cmd_hdr.ctx_id; 171 } 172 virtio_gpu_ctrl_hdr_bswap(resp); 173 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 174 if (s != resp_len) { 175 qemu_log_mask(LOG_GUEST_ERROR, 176 "%s: response size incorrect %zu vs %zu\n", 177 __func__, s, resp_len); 178 } 179 virtqueue_push(cmd->vq, &cmd->elem, s); 180 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 181 cmd->finished = true; 182 } 183 184 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 185 struct virtio_gpu_ctrl_command *cmd, 186 enum virtio_gpu_ctrl_type type) 187 { 188 struct virtio_gpu_ctrl_hdr resp; 189 190 memset(&resp, 0, sizeof(resp)); 191 resp.type = type; 192 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 193 } 194 195 void virtio_gpu_get_display_info(VirtIOGPU *g, 196 struct virtio_gpu_ctrl_command *cmd) 197 { 198 struct virtio_gpu_resp_display_info display_info; 199 200 trace_virtio_gpu_cmd_get_display_info(); 201 memset(&display_info, 0, sizeof(display_info)); 202 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 203 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); 204 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 205 sizeof(display_info)); 206 } 207 208 void virtio_gpu_get_edid(VirtIOGPU *g, 209 struct virtio_gpu_ctrl_command *cmd) 210 { 211 struct virtio_gpu_resp_edid edid; 212 struct virtio_gpu_cmd_get_edid get_edid; 213 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 214 215 VIRTIO_GPU_FILL_CMD(get_edid); 216 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 217 218 if (get_edid.scanout >= b->conf.max_outputs) { 219 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 220 return; 221 } 222 223 trace_virtio_gpu_cmd_get_edid(get_edid.scanout); 224 memset(&edid, 0, sizeof(edid)); 225 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; 226 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid); 227 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); 228 } 229 230 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 231 uint32_t width, uint32_t height) 232 { 233 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 234 * pixman_image_create_bits will fail in case it overflow. 235 */ 236 237 int bpp = PIXMAN_FORMAT_BPP(pformat); 238 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 239 return height * stride; 240 } 241 242 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 243 struct virtio_gpu_ctrl_command *cmd) 244 { 245 Error *err = NULL; 246 pixman_format_code_t pformat; 247 struct virtio_gpu_simple_resource *res; 248 struct virtio_gpu_resource_create_2d c2d; 249 250 VIRTIO_GPU_FILL_CMD(c2d); 251 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 252 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 253 c2d.width, c2d.height); 254 255 if (c2d.resource_id == 0) { 256 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 257 __func__); 258 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 259 return; 260 } 261 262 res = virtio_gpu_find_resource(g, c2d.resource_id); 263 if (res) { 264 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 265 __func__, c2d.resource_id); 266 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 267 return; 268 } 269 270 res = g_new0(struct virtio_gpu_simple_resource, 1); 271 272 res->width = c2d.width; 273 res->height = c2d.height; 274 res->format = c2d.format; 275 res->resource_id = c2d.resource_id; 276 277 pformat = virtio_gpu_get_pixman_format(c2d.format); 278 if (!pformat) { 279 qemu_log_mask(LOG_GUEST_ERROR, 280 "%s: host couldn't handle guest format %d\n", 281 __func__, c2d.format); 282 g_free(res); 283 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 284 return; 285 } 286 287 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 288 if (res->hostmem + g->hostmem < g->conf_max_hostmem) { 289 if (!qemu_pixman_image_new_shareable( 290 &res->image, 291 &res->share_handle, 292 "virtio-gpu res", 293 pformat, 294 c2d.width, 295 c2d.height, 296 c2d.height ? res->hostmem / c2d.height : 0, 297 &err)) { 298 warn_report_err(err); 299 goto end; 300 } 301 } 302 303 end: 304 if (!res->image) { 305 qemu_log_mask(LOG_GUEST_ERROR, 306 "%s: resource creation failed %d %d %d\n", 307 __func__, c2d.resource_id, c2d.width, c2d.height); 308 g_free(res); 309 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 310 return; 311 } 312 313 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 314 g->hostmem += res->hostmem; 315 } 316 317 static void virtio_gpu_resource_create_blob(VirtIOGPU *g, 318 struct virtio_gpu_ctrl_command *cmd) 319 { 320 struct virtio_gpu_simple_resource *res; 321 struct virtio_gpu_resource_create_blob cblob; 322 int ret; 323 324 VIRTIO_GPU_FILL_CMD(cblob); 325 virtio_gpu_create_blob_bswap(&cblob); 326 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); 327 328 if (cblob.resource_id == 0) { 329 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 330 __func__); 331 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 332 return; 333 } 334 335 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST && 336 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) { 337 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n", 338 __func__); 339 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 340 return; 341 } 342 343 if (virtio_gpu_find_resource(g, cblob.resource_id)) { 344 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 345 __func__, cblob.resource_id); 346 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 347 return; 348 } 349 350 res = g_new0(struct virtio_gpu_simple_resource, 1); 351 res->resource_id = cblob.resource_id; 352 res->blob_size = cblob.size; 353 354 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), 355 cmd, &res->addrs, &res->iov, 356 &res->iov_cnt); 357 if (ret != 0) { 358 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 359 g_free(res); 360 return; 361 } 362 363 virtio_gpu_init_udmabuf(res); 364 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 365 } 366 367 void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 368 { 369 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 370 struct virtio_gpu_simple_resource *res; 371 372 if (scanout->resource_id == 0) { 373 return; 374 } 375 376 res = virtio_gpu_find_resource(g, scanout->resource_id); 377 if (res) { 378 res->scanout_bitmask &= ~(1 << scanout_id); 379 } 380 381 dpy_gfx_replace_surface(scanout->con, NULL); 382 scanout->resource_id = 0; 383 scanout->ds = NULL; 384 scanout->width = 0; 385 scanout->height = 0; 386 } 387 388 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 389 struct virtio_gpu_simple_resource *res, 390 Error **errp) 391 { 392 int i; 393 394 if (res->scanout_bitmask) { 395 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 396 if (res->scanout_bitmask & (1 << i)) { 397 virtio_gpu_disable_scanout(g, i); 398 } 399 } 400 } 401 402 qemu_pixman_image_unref(res->image); 403 virtio_gpu_cleanup_mapping(g, res); 404 QTAILQ_REMOVE(&g->reslist, res, next); 405 g->hostmem -= res->hostmem; 406 g_free(res); 407 } 408 409 static void virtio_gpu_resource_unref(VirtIOGPU *g, 410 struct virtio_gpu_ctrl_command *cmd) 411 { 412 struct virtio_gpu_simple_resource *res; 413 struct virtio_gpu_resource_unref unref; 414 415 VIRTIO_GPU_FILL_CMD(unref); 416 virtio_gpu_bswap_32(&unref, sizeof(unref)); 417 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 418 419 res = virtio_gpu_find_resource(g, unref.resource_id); 420 if (!res) { 421 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 422 __func__, unref.resource_id); 423 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 424 return; 425 } 426 /* 427 * virtio_gpu_resource_destroy does not set any errors, so pass a NULL errp 428 * to ignore them. 429 */ 430 virtio_gpu_resource_destroy(g, res, NULL); 431 } 432 433 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 434 struct virtio_gpu_ctrl_command *cmd) 435 { 436 struct virtio_gpu_simple_resource *res; 437 int h, bpp; 438 uint32_t src_offset, dst_offset, stride; 439 pixman_format_code_t format; 440 struct virtio_gpu_transfer_to_host_2d t2d; 441 void *img_data; 442 443 VIRTIO_GPU_FILL_CMD(t2d); 444 virtio_gpu_t2d_bswap(&t2d); 445 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 446 447 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true, 448 __func__, &cmd->error); 449 if (!res || res->blob) { 450 return; 451 } 452 453 if (t2d.r.x > res->width || 454 t2d.r.y > res->height || 455 t2d.r.width > res->width || 456 t2d.r.height > res->height || 457 t2d.r.x + t2d.r.width > res->width || 458 t2d.r.y + t2d.r.height > res->height) { 459 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 460 " bounds for resource %d: %d %d %d %d vs %d %d\n", 461 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 462 t2d.r.width, t2d.r.height, res->width, res->height); 463 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 464 return; 465 } 466 467 format = pixman_image_get_format(res->image); 468 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 469 stride = pixman_image_get_stride(res->image); 470 img_data = pixman_image_get_data(res->image); 471 472 if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) { 473 for (h = 0; h < t2d.r.height; h++) { 474 src_offset = t2d.offset + stride * h; 475 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 476 477 iov_to_buf(res->iov, res->iov_cnt, src_offset, 478 (uint8_t *)img_data + dst_offset, 479 t2d.r.width * bpp); 480 } 481 } else { 482 src_offset = t2d.offset; 483 dst_offset = t2d.r.y * stride + t2d.r.x * bpp; 484 iov_to_buf(res->iov, res->iov_cnt, src_offset, 485 (uint8_t *)img_data + dst_offset, 486 stride * t2d.r.height); 487 } 488 } 489 490 static void virtio_gpu_resource_flush(VirtIOGPU *g, 491 struct virtio_gpu_ctrl_command *cmd) 492 { 493 struct virtio_gpu_simple_resource *res; 494 struct virtio_gpu_resource_flush rf; 495 struct virtio_gpu_scanout *scanout; 496 QemuRect flush_rect; 497 bool within_bounds = false; 498 bool update_submitted = false; 499 int i; 500 501 VIRTIO_GPU_FILL_CMD(rf); 502 virtio_gpu_bswap_32(&rf, sizeof(rf)); 503 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 504 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 505 506 res = virtio_gpu_find_check_resource(g, rf.resource_id, false, 507 __func__, &cmd->error); 508 if (!res) { 509 return; 510 } 511 512 if (res->blob) { 513 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 514 scanout = &g->parent_obj.scanout[i]; 515 if (scanout->resource_id == res->resource_id && 516 rf.r.x < scanout->x + scanout->width && 517 rf.r.x + rf.r.width >= scanout->x && 518 rf.r.y < scanout->y + scanout->height && 519 rf.r.y + rf.r.height >= scanout->y) { 520 within_bounds = true; 521 522 if (console_has_gl(scanout->con)) { 523 dpy_gl_update(scanout->con, 0, 0, scanout->width, 524 scanout->height); 525 update_submitted = true; 526 } 527 } 528 } 529 530 if (update_submitted) { 531 return; 532 } 533 if (!within_bounds) { 534 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts" 535 " bounds for flush %d: %d %d %d %d\n", 536 __func__, rf.resource_id, rf.r.x, rf.r.y, 537 rf.r.width, rf.r.height); 538 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 539 return; 540 } 541 } 542 543 if (!res->blob && 544 (rf.r.x > res->width || 545 rf.r.y > res->height || 546 rf.r.width > res->width || 547 rf.r.height > res->height || 548 rf.r.x + rf.r.width > res->width || 549 rf.r.y + rf.r.height > res->height)) { 550 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 551 " bounds for resource %d: %d %d %d %d vs %d %d\n", 552 __func__, rf.resource_id, rf.r.x, rf.r.y, 553 rf.r.width, rf.r.height, res->width, res->height); 554 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 555 return; 556 } 557 558 qemu_rect_init(&flush_rect, rf.r.x, rf.r.y, rf.r.width, rf.r.height); 559 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 560 QemuRect rect; 561 562 if (!(res->scanout_bitmask & (1 << i))) { 563 continue; 564 } 565 scanout = &g->parent_obj.scanout[i]; 566 567 qemu_rect_init(&rect, scanout->x, scanout->y, 568 scanout->width, scanout->height); 569 570 /* work out the area we need to update for each console */ 571 if (qemu_rect_intersect(&flush_rect, &rect, &rect)) { 572 qemu_rect_translate(&rect, -scanout->x, -scanout->y); 573 dpy_gfx_update(g->parent_obj.scanout[i].con, 574 rect.x, rect.y, rect.width, rect.height); 575 } 576 } 577 } 578 579 static void virtio_unref_resource(pixman_image_t *image, void *data) 580 { 581 pixman_image_unref(data); 582 } 583 584 void virtio_gpu_update_scanout(VirtIOGPU *g, 585 uint32_t scanout_id, 586 struct virtio_gpu_simple_resource *res, 587 struct virtio_gpu_framebuffer *fb, 588 struct virtio_gpu_rect *r) 589 { 590 struct virtio_gpu_simple_resource *ores; 591 struct virtio_gpu_scanout *scanout; 592 593 scanout = &g->parent_obj.scanout[scanout_id]; 594 ores = virtio_gpu_find_resource(g, scanout->resource_id); 595 if (ores) { 596 ores->scanout_bitmask &= ~(1 << scanout_id); 597 } 598 599 res->scanout_bitmask |= (1 << scanout_id); 600 scanout->resource_id = res->resource_id; 601 scanout->x = r->x; 602 scanout->y = r->y; 603 scanout->width = r->width; 604 scanout->height = r->height; 605 scanout->fb = *fb; 606 } 607 608 static bool virtio_gpu_do_set_scanout(VirtIOGPU *g, 609 uint32_t scanout_id, 610 struct virtio_gpu_framebuffer *fb, 611 struct virtio_gpu_simple_resource *res, 612 struct virtio_gpu_rect *r, 613 uint32_t *error) 614 { 615 struct virtio_gpu_scanout *scanout; 616 uint8_t *data; 617 618 scanout = &g->parent_obj.scanout[scanout_id]; 619 620 if (r->x > fb->width || 621 r->y > fb->height || 622 r->width < 16 || 623 r->height < 16 || 624 r->width > fb->width || 625 r->height > fb->height || 626 r->x + r->width > fb->width || 627 r->y + r->height > fb->height) { 628 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 629 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", 630 __func__, scanout_id, res->resource_id, 631 r->x, r->y, r->width, r->height, 632 fb->width, fb->height); 633 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 634 return false; 635 } 636 637 g->parent_obj.enable = 1; 638 639 if (res->blob) { 640 if (console_has_gl(scanout->con)) { 641 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) { 642 virtio_gpu_update_scanout(g, scanout_id, res, fb, r); 643 } else { 644 *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 645 return false; 646 } 647 return true; 648 } 649 650 data = res->blob; 651 } else { 652 data = (uint8_t *)pixman_image_get_data(res->image); 653 } 654 655 /* create a surface for this scanout */ 656 if ((res->blob && !console_has_gl(scanout->con)) || 657 !scanout->ds || 658 surface_data(scanout->ds) != data + fb->offset || 659 scanout->width != r->width || 660 scanout->height != r->height) { 661 pixman_image_t *rect; 662 void *ptr = data + fb->offset; 663 rect = pixman_image_create_bits(fb->format, r->width, r->height, 664 ptr, fb->stride); 665 666 if (res->image) { 667 pixman_image_ref(res->image); 668 pixman_image_set_destroy_function(rect, virtio_unref_resource, 669 res->image); 670 } 671 672 /* realloc the surface ptr */ 673 scanout->ds = qemu_create_displaysurface_pixman(rect); 674 qemu_displaysurface_set_share_handle(scanout->ds, res->share_handle, fb->offset); 675 676 pixman_image_unref(rect); 677 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, 678 scanout->ds); 679 } 680 681 virtio_gpu_update_scanout(g, scanout_id, res, fb, r); 682 return true; 683 } 684 685 static void virtio_gpu_set_scanout(VirtIOGPU *g, 686 struct virtio_gpu_ctrl_command *cmd) 687 { 688 struct virtio_gpu_simple_resource *res; 689 struct virtio_gpu_framebuffer fb = { 0 }; 690 struct virtio_gpu_set_scanout ss; 691 692 VIRTIO_GPU_FILL_CMD(ss); 693 virtio_gpu_bswap_32(&ss, sizeof(ss)); 694 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 695 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 696 697 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 698 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 699 __func__, ss.scanout_id); 700 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 701 return; 702 } 703 704 if (ss.resource_id == 0) { 705 virtio_gpu_disable_scanout(g, ss.scanout_id); 706 return; 707 } 708 709 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 710 __func__, &cmd->error); 711 if (!res) { 712 return; 713 } 714 715 fb.format = pixman_image_get_format(res->image); 716 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 717 fb.width = pixman_image_get_width(res->image); 718 fb.height = pixman_image_get_height(res->image); 719 fb.stride = pixman_image_get_stride(res->image); 720 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 721 722 virtio_gpu_do_set_scanout(g, ss.scanout_id, 723 &fb, res, &ss.r, &cmd->error); 724 } 725 726 bool virtio_gpu_scanout_blob_to_fb(struct virtio_gpu_framebuffer *fb, 727 struct virtio_gpu_set_scanout_blob *ss, 728 uint64_t blob_size) 729 { 730 uint64_t fbend; 731 732 fb->format = virtio_gpu_get_pixman_format(ss->format); 733 if (!fb->format) { 734 qemu_log_mask(LOG_GUEST_ERROR, 735 "%s: host couldn't handle guest format %d\n", 736 __func__, ss->format); 737 return false; 738 } 739 740 fb->bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb->format), 8); 741 fb->width = ss->width; 742 fb->height = ss->height; 743 fb->stride = ss->strides[0]; 744 fb->offset = ss->offsets[0] + ss->r.x * fb->bytes_pp + ss->r.y * fb->stride; 745 746 fbend = fb->offset; 747 fbend += (uint64_t) fb->stride * ss->r.height; 748 749 if (fbend > blob_size) { 750 qemu_log_mask(LOG_GUEST_ERROR, 751 "%s: fb end out of range\n", 752 __func__); 753 return false; 754 } 755 756 return true; 757 } 758 759 760 761 static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, 762 struct virtio_gpu_ctrl_command *cmd) 763 { 764 struct virtio_gpu_simple_resource *res; 765 struct virtio_gpu_framebuffer fb = { 0 }; 766 struct virtio_gpu_set_scanout_blob ss; 767 768 VIRTIO_GPU_FILL_CMD(ss); 769 virtio_gpu_scanout_blob_bswap(&ss); 770 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, 771 ss.r.width, ss.r.height, ss.r.x, 772 ss.r.y); 773 774 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 775 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 776 __func__, ss.scanout_id); 777 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 778 return; 779 } 780 781 if (ss.resource_id == 0) { 782 virtio_gpu_disable_scanout(g, ss.scanout_id); 783 return; 784 } 785 786 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 787 __func__, &cmd->error); 788 if (!res) { 789 return; 790 } 791 792 if (!virtio_gpu_scanout_blob_to_fb(&fb, &ss, res->blob_size)) { 793 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 794 return; 795 } 796 797 virtio_gpu_do_set_scanout(g, ss.scanout_id, 798 &fb, res, &ss.r, &cmd->error); 799 } 800 801 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 802 uint32_t nr_entries, uint32_t offset, 803 struct virtio_gpu_ctrl_command *cmd, 804 uint64_t **addr, struct iovec **iov, 805 uint32_t *niov) 806 { 807 struct virtio_gpu_mem_entry *ents; 808 size_t esize, s; 809 int e, v; 810 811 if (nr_entries > 16384) { 812 qemu_log_mask(LOG_GUEST_ERROR, 813 "%s: nr_entries is too big (%d > 16384)\n", 814 __func__, nr_entries); 815 return -1; 816 } 817 818 esize = sizeof(*ents) * nr_entries; 819 ents = g_malloc(esize); 820 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 821 offset, ents, esize); 822 if (s != esize) { 823 qemu_log_mask(LOG_GUEST_ERROR, 824 "%s: command data size incorrect %zu vs %zu\n", 825 __func__, s, esize); 826 g_free(ents); 827 return -1; 828 } 829 830 *iov = NULL; 831 if (addr) { 832 *addr = NULL; 833 } 834 for (e = 0, v = 0; e < nr_entries; e++) { 835 uint64_t a = le64_to_cpu(ents[e].addr); 836 uint32_t l = le32_to_cpu(ents[e].length); 837 hwaddr len; 838 void *map; 839 840 do { 841 len = l; 842 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len, 843 DMA_DIRECTION_TO_DEVICE, 844 MEMTXATTRS_UNSPECIFIED); 845 if (!map) { 846 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 847 " element %d\n", __func__, e); 848 virtio_gpu_cleanup_mapping_iov(g, *iov, v); 849 g_free(ents); 850 *iov = NULL; 851 if (addr) { 852 g_free(*addr); 853 *addr = NULL; 854 } 855 return -1; 856 } 857 858 if (!(v % 16)) { 859 *iov = g_renew(struct iovec, *iov, v + 16); 860 if (addr) { 861 *addr = g_renew(uint64_t, *addr, v + 16); 862 } 863 } 864 (*iov)[v].iov_base = map; 865 (*iov)[v].iov_len = len; 866 if (addr) { 867 (*addr)[v] = a; 868 } 869 870 a += len; 871 l -= len; 872 v += 1; 873 } while (l > 0); 874 } 875 *niov = v; 876 877 g_free(ents); 878 return 0; 879 } 880 881 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 882 struct iovec *iov, uint32_t count) 883 { 884 int i; 885 886 for (i = 0; i < count; i++) { 887 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 888 iov[i].iov_base, iov[i].iov_len, 889 DMA_DIRECTION_TO_DEVICE, 890 iov[i].iov_len); 891 } 892 g_free(iov); 893 } 894 895 void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 896 struct virtio_gpu_simple_resource *res) 897 { 898 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 899 res->iov = NULL; 900 res->iov_cnt = 0; 901 g_free(res->addrs); 902 res->addrs = NULL; 903 904 if (res->blob) { 905 virtio_gpu_fini_udmabuf(res); 906 } 907 } 908 909 static void 910 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 911 struct virtio_gpu_ctrl_command *cmd) 912 { 913 struct virtio_gpu_simple_resource *res; 914 struct virtio_gpu_resource_attach_backing ab; 915 int ret; 916 917 VIRTIO_GPU_FILL_CMD(ab); 918 virtio_gpu_bswap_32(&ab, sizeof(ab)); 919 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 920 921 res = virtio_gpu_find_resource(g, ab.resource_id); 922 if (!res) { 923 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 924 __func__, ab.resource_id); 925 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 926 return; 927 } 928 929 if (res->iov) { 930 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 931 return; 932 } 933 934 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd, 935 &res->addrs, &res->iov, &res->iov_cnt); 936 if (ret != 0) { 937 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 938 return; 939 } 940 } 941 942 static void 943 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 944 struct virtio_gpu_ctrl_command *cmd) 945 { 946 struct virtio_gpu_simple_resource *res; 947 struct virtio_gpu_resource_detach_backing detach; 948 949 VIRTIO_GPU_FILL_CMD(detach); 950 virtio_gpu_bswap_32(&detach, sizeof(detach)); 951 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 952 953 res = virtio_gpu_find_check_resource(g, detach.resource_id, true, 954 __func__, &cmd->error); 955 if (!res) { 956 return; 957 } 958 virtio_gpu_cleanup_mapping(g, res); 959 } 960 961 void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 962 struct virtio_gpu_ctrl_command *cmd) 963 { 964 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 965 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 966 967 switch (cmd->cmd_hdr.type) { 968 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 969 virtio_gpu_get_display_info(g, cmd); 970 break; 971 case VIRTIO_GPU_CMD_GET_EDID: 972 virtio_gpu_get_edid(g, cmd); 973 break; 974 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 975 virtio_gpu_resource_create_2d(g, cmd); 976 break; 977 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: 978 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 979 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 980 break; 981 } 982 virtio_gpu_resource_create_blob(g, cmd); 983 break; 984 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 985 virtio_gpu_resource_unref(g, cmd); 986 break; 987 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 988 virtio_gpu_resource_flush(g, cmd); 989 break; 990 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 991 virtio_gpu_transfer_to_host_2d(g, cmd); 992 break; 993 case VIRTIO_GPU_CMD_SET_SCANOUT: 994 virtio_gpu_set_scanout(g, cmd); 995 break; 996 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: 997 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 998 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 999 break; 1000 } 1001 virtio_gpu_set_scanout_blob(g, cmd); 1002 break; 1003 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 1004 virtio_gpu_resource_attach_backing(g, cmd); 1005 break; 1006 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 1007 virtio_gpu_resource_detach_backing(g, cmd); 1008 break; 1009 default: 1010 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 1011 break; 1012 } 1013 if (!cmd->finished) { 1014 if (!g->parent_obj.renderer_blocked) { 1015 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 1016 VIRTIO_GPU_RESP_OK_NODATA); 1017 } 1018 } 1019 } 1020 1021 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 1022 { 1023 VirtIOGPU *g = VIRTIO_GPU(vdev); 1024 qemu_bh_schedule(g->ctrl_bh); 1025 } 1026 1027 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 1028 { 1029 VirtIOGPU *g = VIRTIO_GPU(vdev); 1030 qemu_bh_schedule(g->cursor_bh); 1031 } 1032 1033 void virtio_gpu_process_cmdq(VirtIOGPU *g) 1034 { 1035 struct virtio_gpu_ctrl_command *cmd; 1036 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1037 1038 if (g->processing_cmdq) { 1039 return; 1040 } 1041 g->processing_cmdq = true; 1042 while (!QTAILQ_EMPTY(&g->cmdq)) { 1043 cmd = QTAILQ_FIRST(&g->cmdq); 1044 1045 if (g->parent_obj.renderer_blocked) { 1046 break; 1047 } 1048 1049 /* process command */ 1050 vgc->process_cmd(g, cmd); 1051 1052 /* command suspended */ 1053 if (!cmd->finished && !(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { 1054 trace_virtio_gpu_cmd_suspended(cmd->cmd_hdr.type); 1055 break; 1056 } 1057 1058 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1059 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1060 g->stats.requests++; 1061 } 1062 1063 if (!cmd->finished) { 1064 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 1065 g->inflight++; 1066 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1067 if (g->stats.max_inflight < g->inflight) { 1068 g->stats.max_inflight = g->inflight; 1069 } 1070 trace_virtio_gpu_inc_inflight_fences(g->inflight); 1071 } 1072 } else { 1073 g_free(cmd); 1074 } 1075 } 1076 g->processing_cmdq = false; 1077 } 1078 1079 static void virtio_gpu_process_fenceq(VirtIOGPU *g) 1080 { 1081 struct virtio_gpu_ctrl_command *cmd, *tmp; 1082 1083 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { 1084 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); 1085 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 1086 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1087 g_free(cmd); 1088 g->inflight--; 1089 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1090 trace_virtio_gpu_dec_inflight_fences(g->inflight); 1091 } 1092 } 1093 } 1094 1095 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b) 1096 { 1097 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj); 1098 1099 virtio_gpu_process_fenceq(g); 1100 virtio_gpu_process_cmdq(g); 1101 } 1102 1103 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 1104 { 1105 VirtIOGPU *g = VIRTIO_GPU(vdev); 1106 struct virtio_gpu_ctrl_command *cmd; 1107 1108 if (!virtio_queue_ready(vq)) { 1109 return; 1110 } 1111 1112 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1113 while (cmd) { 1114 cmd->vq = vq; 1115 cmd->error = 0; 1116 cmd->finished = false; 1117 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 1118 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1119 } 1120 1121 virtio_gpu_process_cmdq(g); 1122 } 1123 1124 static void virtio_gpu_ctrl_bh(void *opaque) 1125 { 1126 VirtIOGPU *g = opaque; 1127 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1128 1129 vgc->handle_ctrl(VIRTIO_DEVICE(g), g->ctrl_vq); 1130 } 1131 1132 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 1133 { 1134 VirtIOGPU *g = VIRTIO_GPU(vdev); 1135 VirtQueueElement *elem; 1136 size_t s; 1137 struct virtio_gpu_update_cursor cursor_info; 1138 1139 if (!virtio_queue_ready(vq)) { 1140 return; 1141 } 1142 for (;;) { 1143 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 1144 if (!elem) { 1145 break; 1146 } 1147 1148 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 1149 &cursor_info, sizeof(cursor_info)); 1150 if (s != sizeof(cursor_info)) { 1151 qemu_log_mask(LOG_GUEST_ERROR, 1152 "%s: cursor size incorrect %zu vs %zu\n", 1153 __func__, s, sizeof(cursor_info)); 1154 } else { 1155 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 1156 update_cursor(g, &cursor_info); 1157 } 1158 virtqueue_push(vq, elem, 0); 1159 virtio_notify(vdev, vq); 1160 g_free(elem); 1161 } 1162 } 1163 1164 static void virtio_gpu_cursor_bh(void *opaque) 1165 { 1166 VirtIOGPU *g = opaque; 1167 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); 1168 } 1169 1170 static bool scanout_vmstate_after_v2(void *opaque, int version) 1171 { 1172 struct VirtIOGPUBase *base = container_of(opaque, VirtIOGPUBase, scanout); 1173 struct VirtIOGPU *gpu = container_of(base, VirtIOGPU, parent_obj); 1174 1175 return gpu->scanout_vmstate_version >= 2; 1176 } 1177 1178 static const VMStateDescription vmstate_virtio_gpu_scanout = { 1179 .name = "virtio-gpu-one-scanout", 1180 .version_id = 1, 1181 .fields = (const VMStateField[]) { 1182 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1183 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1184 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1185 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1186 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1187 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1188 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1189 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1190 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1191 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1192 VMSTATE_UINT32_TEST(fb.format, struct virtio_gpu_scanout, 1193 scanout_vmstate_after_v2), 1194 VMSTATE_UINT32_TEST(fb.bytes_pp, struct virtio_gpu_scanout, 1195 scanout_vmstate_after_v2), 1196 VMSTATE_UINT32_TEST(fb.width, struct virtio_gpu_scanout, 1197 scanout_vmstate_after_v2), 1198 VMSTATE_UINT32_TEST(fb.height, struct virtio_gpu_scanout, 1199 scanout_vmstate_after_v2), 1200 VMSTATE_UINT32_TEST(fb.stride, struct virtio_gpu_scanout, 1201 scanout_vmstate_after_v2), 1202 VMSTATE_UINT32_TEST(fb.offset, struct virtio_gpu_scanout, 1203 scanout_vmstate_after_v2), 1204 VMSTATE_END_OF_LIST() 1205 }, 1206 }; 1207 1208 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1209 .name = "virtio-gpu-scanouts", 1210 .version_id = 1, 1211 .fields = (const VMStateField[]) { 1212 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), 1213 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, 1214 struct VirtIOGPU, NULL), 1215 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, 1216 parent_obj.conf.max_outputs, 1, 1217 vmstate_virtio_gpu_scanout, 1218 struct virtio_gpu_scanout), 1219 VMSTATE_END_OF_LIST() 1220 }, 1221 }; 1222 1223 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1224 const VMStateField *field, JSONWriter *vmdesc) 1225 { 1226 VirtIOGPU *g = opaque; 1227 struct virtio_gpu_simple_resource *res; 1228 Error *err = NULL; 1229 int i, ret; 1230 1231 /* in 2d mode we should never find unprocessed commands here */ 1232 assert(QTAILQ_EMPTY(&g->cmdq)); 1233 1234 QTAILQ_FOREACH(res, &g->reslist, next) { 1235 if (res->blob_size) { 1236 continue; 1237 } 1238 qemu_put_be32(f, res->resource_id); 1239 qemu_put_be32(f, res->width); 1240 qemu_put_be32(f, res->height); 1241 qemu_put_be32(f, res->format); 1242 qemu_put_be32(f, res->iov_cnt); 1243 for (i = 0; i < res->iov_cnt; i++) { 1244 qemu_put_be64(f, res->addrs[i]); 1245 qemu_put_be32(f, res->iov[i].iov_len); 1246 } 1247 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1248 pixman_image_get_stride(res->image) * res->height); 1249 } 1250 qemu_put_be32(f, 0); /* end of list */ 1251 1252 ret = vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL, 1253 &err); 1254 if (ret < 0) { 1255 error_report_err(err); 1256 } 1257 return ret; 1258 } 1259 1260 static bool virtio_gpu_load_restore_mapping(VirtIOGPU *g, 1261 struct virtio_gpu_simple_resource *res) 1262 { 1263 int i; 1264 1265 for (i = 0; i < res->iov_cnt; i++) { 1266 hwaddr len = res->iov[i].iov_len; 1267 res->iov[i].iov_base = 1268 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len, 1269 DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED); 1270 1271 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1272 /* Clean up the half-a-mapping we just created... */ 1273 if (res->iov[i].iov_base) { 1274 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, res->iov[i].iov_base, 1275 len, DMA_DIRECTION_TO_DEVICE, 0); 1276 } 1277 /* ...and the mappings for previous loop iterations */ 1278 res->iov_cnt = i; 1279 virtio_gpu_cleanup_mapping(g, res); 1280 return false; 1281 } 1282 } 1283 1284 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1285 g->hostmem += res->hostmem; 1286 return true; 1287 } 1288 1289 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1290 const VMStateField *field) 1291 { 1292 VirtIOGPU *g = opaque; 1293 Error *err = NULL; 1294 struct virtio_gpu_simple_resource *res; 1295 uint32_t resource_id, pformat; 1296 int i, ret; 1297 1298 g->hostmem = 0; 1299 1300 resource_id = qemu_get_be32(f); 1301 while (resource_id != 0) { 1302 res = virtio_gpu_find_resource(g, resource_id); 1303 if (res) { 1304 return -EINVAL; 1305 } 1306 1307 res = g_new0(struct virtio_gpu_simple_resource, 1); 1308 res->resource_id = resource_id; 1309 res->width = qemu_get_be32(f); 1310 res->height = qemu_get_be32(f); 1311 res->format = qemu_get_be32(f); 1312 res->iov_cnt = qemu_get_be32(f); 1313 1314 /* allocate */ 1315 pformat = virtio_gpu_get_pixman_format(res->format); 1316 if (!pformat) { 1317 g_free(res); 1318 return -EINVAL; 1319 } 1320 1321 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1322 if (!qemu_pixman_image_new_shareable(&res->image, 1323 &res->share_handle, 1324 "virtio-gpu res", 1325 pformat, 1326 res->width, 1327 res->height, 1328 res->height ? res->hostmem / res->height : 0, 1329 &err)) { 1330 warn_report_err(err); 1331 g_free(res); 1332 return -EINVAL; 1333 } 1334 1335 res->addrs = g_new(uint64_t, res->iov_cnt); 1336 res->iov = g_new(struct iovec, res->iov_cnt); 1337 1338 /* read data */ 1339 for (i = 0; i < res->iov_cnt; i++) { 1340 res->addrs[i] = qemu_get_be64(f); 1341 res->iov[i].iov_len = qemu_get_be32(f); 1342 } 1343 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1344 pixman_image_get_stride(res->image) * res->height); 1345 1346 if (!virtio_gpu_load_restore_mapping(g, res)) { 1347 pixman_image_unref(res->image); 1348 g_free(res); 1349 return -EINVAL; 1350 } 1351 1352 resource_id = qemu_get_be32(f); 1353 } 1354 1355 /* load & apply scanout state */ 1356 ret = vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1, &err); 1357 if (ret < 0) { 1358 error_report_err(err); 1359 } 1360 return ret; 1361 } 1362 1363 static int virtio_gpu_blob_save(QEMUFile *f, void *opaque, size_t size, 1364 const VMStateField *field, JSONWriter *vmdesc) 1365 { 1366 VirtIOGPU *g = opaque; 1367 struct virtio_gpu_simple_resource *res; 1368 int i; 1369 1370 /* in 2d mode we should never find unprocessed commands here */ 1371 assert(QTAILQ_EMPTY(&g->cmdq)); 1372 1373 QTAILQ_FOREACH(res, &g->reslist, next) { 1374 if (!res->blob_size) { 1375 continue; 1376 } 1377 assert(!res->image); 1378 qemu_put_be32(f, res->resource_id); 1379 qemu_put_be32(f, res->blob_size); 1380 qemu_put_be32(f, res->iov_cnt); 1381 for (i = 0; i < res->iov_cnt; i++) { 1382 qemu_put_be64(f, res->addrs[i]); 1383 qemu_put_be32(f, res->iov[i].iov_len); 1384 } 1385 } 1386 qemu_put_be32(f, 0); /* end of list */ 1387 1388 return 0; 1389 } 1390 1391 static int virtio_gpu_blob_load(QEMUFile *f, void *opaque, size_t size, 1392 const VMStateField *field) 1393 { 1394 VirtIOGPU *g = opaque; 1395 struct virtio_gpu_simple_resource *res; 1396 uint32_t resource_id; 1397 int i; 1398 1399 resource_id = qemu_get_be32(f); 1400 while (resource_id != 0) { 1401 res = virtio_gpu_find_resource(g, resource_id); 1402 if (res) { 1403 return -EINVAL; 1404 } 1405 1406 res = g_new0(struct virtio_gpu_simple_resource, 1); 1407 res->resource_id = resource_id; 1408 res->blob_size = qemu_get_be32(f); 1409 res->iov_cnt = qemu_get_be32(f); 1410 res->addrs = g_new(uint64_t, res->iov_cnt); 1411 res->iov = g_new(struct iovec, res->iov_cnt); 1412 1413 /* read data */ 1414 for (i = 0; i < res->iov_cnt; i++) { 1415 res->addrs[i] = qemu_get_be64(f); 1416 res->iov[i].iov_len = qemu_get_be32(f); 1417 } 1418 1419 if (!virtio_gpu_load_restore_mapping(g, res)) { 1420 g_free(res); 1421 return -EINVAL; 1422 } 1423 1424 virtio_gpu_init_udmabuf(res); 1425 1426 resource_id = qemu_get_be32(f); 1427 } 1428 1429 return 0; 1430 } 1431 1432 static int virtio_gpu_post_load(void *opaque, int version_id) 1433 { 1434 VirtIOGPU *g = opaque; 1435 struct virtio_gpu_scanout *scanout; 1436 struct virtio_gpu_simple_resource *res; 1437 int i; 1438 1439 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1440 scanout = &g->parent_obj.scanout[i]; 1441 if (!scanout->resource_id) { 1442 continue; 1443 } 1444 1445 res = virtio_gpu_find_resource(g, scanout->resource_id); 1446 if (!res) { 1447 return -EINVAL; 1448 } 1449 1450 if (scanout->fb.format != 0) { 1451 uint32_t error = 0; 1452 struct virtio_gpu_rect r = { 1453 .x = scanout->x, 1454 .y = scanout->y, 1455 .width = scanout->width, 1456 .height = scanout->height 1457 }; 1458 1459 if (!virtio_gpu_do_set_scanout(g, i, &scanout->fb, res, &r, &error)) { 1460 return -EINVAL; 1461 } 1462 } else { 1463 /* legacy v1 migration support */ 1464 if (!res->image) { 1465 return -EINVAL; 1466 } 1467 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1468 qemu_displaysurface_set_share_handle(scanout->ds, res->share_handle, 0); 1469 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1470 } 1471 1472 dpy_gfx_update_full(scanout->con); 1473 if (scanout->cursor.resource_id) { 1474 update_cursor(g, &scanout->cursor); 1475 } 1476 res->scanout_bitmask |= (1 << i); 1477 } 1478 1479 return 0; 1480 } 1481 1482 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1483 { 1484 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1485 VirtIOGPU *g = VIRTIO_GPU(qdev); 1486 1487 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1488 if (!virtio_gpu_rutabaga_enabled(g->parent_obj.conf) && 1489 !virtio_gpu_virgl_enabled(g->parent_obj.conf) && 1490 !virtio_gpu_have_udmabuf()) { 1491 error_setg(errp, "need rutabaga or udmabuf for blob resources"); 1492 return; 1493 } 1494 1495 #ifdef VIRGL_VERSION_MAJOR 1496 #if VIRGL_VERSION_MAJOR < 1 1497 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { 1498 error_setg(errp, "old virglrenderer, blob resources unsupported"); 1499 return; 1500 } 1501 #endif 1502 #endif 1503 } 1504 1505 if (virtio_gpu_venus_enabled(g->parent_obj.conf)) { 1506 #ifdef VIRGL_VERSION_MAJOR 1507 #if VIRGL_VERSION_MAJOR >= 1 1508 if (!virtio_gpu_blob_enabled(g->parent_obj.conf) || 1509 !virtio_gpu_hostmem_enabled(g->parent_obj.conf)) { 1510 error_setg(errp, "venus requires enabled blob and hostmem options"); 1511 return; 1512 } 1513 #else 1514 error_setg(errp, "old virglrenderer, venus unsupported"); 1515 return; 1516 #endif 1517 #endif 1518 } 1519 1520 if (!virtio_gpu_base_device_realize(qdev, 1521 virtio_gpu_handle_ctrl_cb, 1522 virtio_gpu_handle_cursor_cb, 1523 errp)) { 1524 return; 1525 } 1526 1527 g->ctrl_vq = virtio_get_queue(vdev, 0); 1528 g->cursor_vq = virtio_get_queue(vdev, 1); 1529 g->ctrl_bh = virtio_bh_new_guarded(qdev, virtio_gpu_ctrl_bh, g); 1530 g->cursor_bh = virtio_bh_new_guarded(qdev, virtio_gpu_cursor_bh, g); 1531 g->reset_bh = qemu_bh_new(virtio_gpu_reset_bh, g); 1532 qemu_cond_init(&g->reset_cond); 1533 QTAILQ_INIT(&g->reslist); 1534 QTAILQ_INIT(&g->cmdq); 1535 QTAILQ_INIT(&g->fenceq); 1536 } 1537 1538 static void virtio_gpu_device_unrealize(DeviceState *qdev) 1539 { 1540 VirtIOGPU *g = VIRTIO_GPU(qdev); 1541 1542 g_clear_pointer(&g->ctrl_bh, qemu_bh_delete); 1543 g_clear_pointer(&g->cursor_bh, qemu_bh_delete); 1544 g_clear_pointer(&g->reset_bh, qemu_bh_delete); 1545 qemu_cond_destroy(&g->reset_cond); 1546 virtio_gpu_base_device_unrealize(qdev); 1547 } 1548 1549 static void virtio_gpu_reset_bh(void *opaque) 1550 { 1551 VirtIOGPU *g = VIRTIO_GPU(opaque); 1552 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1553 struct virtio_gpu_simple_resource *res, *tmp; 1554 uint32_t resource_id; 1555 Error *local_err = NULL; 1556 int i = 0; 1557 1558 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1559 resource_id = res->resource_id; 1560 vgc->resource_destroy(g, res, &local_err); 1561 if (local_err) { 1562 error_append_hint(&local_err, "%s: %s resource_destroy" 1563 "for resource_id = %"PRIu32" failed.\n", 1564 __func__, object_get_typename(OBJECT(g)), 1565 resource_id); 1566 /* error_report_err frees the error object for us */ 1567 error_report_err(local_err); 1568 local_err = NULL; 1569 } 1570 } 1571 1572 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1573 dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL); 1574 } 1575 1576 g->reset_finished = true; 1577 qemu_cond_signal(&g->reset_cond); 1578 } 1579 1580 void virtio_gpu_reset(VirtIODevice *vdev) 1581 { 1582 VirtIOGPU *g = VIRTIO_GPU(vdev); 1583 struct virtio_gpu_ctrl_command *cmd; 1584 1585 if (qemu_in_vcpu_thread()) { 1586 g->reset_finished = false; 1587 qemu_bh_schedule(g->reset_bh); 1588 while (!g->reset_finished) { 1589 qemu_cond_wait_bql(&g->reset_cond); 1590 } 1591 } else { 1592 aio_bh_call(g->reset_bh); 1593 } 1594 1595 while (!QTAILQ_EMPTY(&g->cmdq)) { 1596 cmd = QTAILQ_FIRST(&g->cmdq); 1597 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1598 g_free(cmd); 1599 } 1600 1601 while (!QTAILQ_EMPTY(&g->fenceq)) { 1602 cmd = QTAILQ_FIRST(&g->fenceq); 1603 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1604 g->inflight--; 1605 g_free(cmd); 1606 } 1607 1608 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); 1609 } 1610 1611 static void 1612 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 1613 { 1614 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1615 1616 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 1617 } 1618 1619 static void 1620 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 1621 { 1622 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1623 const struct virtio_gpu_config *vgconfig = 1624 (const struct virtio_gpu_config *)config; 1625 1626 if (vgconfig->events_clear) { 1627 g->virtio_config.events_read &= ~vgconfig->events_clear; 1628 } 1629 } 1630 1631 static bool virtio_gpu_blob_state_needed(void *opaque) 1632 { 1633 VirtIOGPU *g = VIRTIO_GPU(opaque); 1634 1635 return virtio_gpu_blob_enabled(g->parent_obj.conf); 1636 } 1637 1638 const VMStateDescription vmstate_virtio_gpu_blob_state = { 1639 .name = "virtio-gpu/blob", 1640 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1641 .version_id = VIRTIO_GPU_VM_VERSION, 1642 .needed = virtio_gpu_blob_state_needed, 1643 .fields = (const VMStateField[]){ 1644 { 1645 .name = "virtio-gpu/blob", 1646 .info = &(const VMStateInfo) { 1647 .name = "blob", 1648 .get = virtio_gpu_blob_load, 1649 .put = virtio_gpu_blob_save, 1650 }, 1651 .flags = VMS_SINGLE, 1652 } /* device */, 1653 VMSTATE_END_OF_LIST() 1654 }, 1655 }; 1656 1657 /* 1658 * For historical reasons virtio_gpu does not adhere to virtio migration 1659 * scheme as described in doc/virtio-migration.txt, in a sense that no 1660 * save/load callback are provided to the core. Instead the device data 1661 * is saved/loaded after the core data. 1662 * 1663 * Because of this we need a special vmsd. 1664 */ 1665 static const VMStateDescription vmstate_virtio_gpu = { 1666 .name = "virtio-gpu", 1667 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1668 .version_id = VIRTIO_GPU_VM_VERSION, 1669 .fields = (const VMStateField[]) { 1670 VMSTATE_VIRTIO_DEVICE /* core */, 1671 { 1672 .name = "virtio-gpu", 1673 .info = &(const VMStateInfo) { 1674 .name = "virtio-gpu", 1675 .get = virtio_gpu_load, 1676 .put = virtio_gpu_save, 1677 }, 1678 .flags = VMS_SINGLE, 1679 } /* device */, 1680 VMSTATE_END_OF_LIST() 1681 }, 1682 .subsections = (const VMStateDescription * const []) { 1683 &vmstate_virtio_gpu_blob_state, 1684 NULL 1685 }, 1686 .post_load = virtio_gpu_post_load, 1687 }; 1688 1689 static const Property virtio_gpu_properties[] = { 1690 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), 1691 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 1692 256 * MiB), 1693 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags, 1694 VIRTIO_GPU_FLAG_BLOB_ENABLED, false), 1695 DEFINE_PROP_SIZE("hostmem", VirtIOGPU, parent_obj.conf.hostmem, 0), 1696 DEFINE_PROP_UINT8("x-scanout-vmstate-version", VirtIOGPU, scanout_vmstate_version, 2), 1697 }; 1698 1699 static void virtio_gpu_class_init(ObjectClass *klass, const void *data) 1700 { 1701 DeviceClass *dc = DEVICE_CLASS(klass); 1702 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1703 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); 1704 VirtIOGPUBaseClass *vgbc = &vgc->parent; 1705 1706 vgc->handle_ctrl = virtio_gpu_handle_ctrl; 1707 vgc->process_cmd = virtio_gpu_simple_process_cmd; 1708 vgc->update_cursor_data = virtio_gpu_update_cursor_data; 1709 vgc->resource_destroy = virtio_gpu_resource_destroy; 1710 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed; 1711 1712 vdc->realize = virtio_gpu_device_realize; 1713 vdc->unrealize = virtio_gpu_device_unrealize; 1714 vdc->reset = virtio_gpu_reset; 1715 vdc->get_config = virtio_gpu_get_config; 1716 vdc->set_config = virtio_gpu_set_config; 1717 1718 dc->vmsd = &vmstate_virtio_gpu; 1719 device_class_set_props(dc, virtio_gpu_properties); 1720 } 1721 1722 static const TypeInfo virtio_gpu_info = { 1723 .name = TYPE_VIRTIO_GPU, 1724 .parent = TYPE_VIRTIO_GPU_BASE, 1725 .instance_size = sizeof(VirtIOGPU), 1726 .class_size = sizeof(VirtIOGPUClass), 1727 .class_init = virtio_gpu_class_init, 1728 }; 1729 module_obj(TYPE_VIRTIO_GPU); 1730 module_kconfig(VIRTIO_GPU); 1731 1732 static void virtio_register_types(void) 1733 { 1734 type_register_static(&virtio_gpu_info); 1735 } 1736 1737 type_init(virtio_register_types) 1738