1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu/iov.h" 17 #include "sysemu/cpus.h" 18 #include "ui/console.h" 19 #include "ui/rect.h" 20 #include "trace.h" 21 #include "sysemu/dma.h" 22 #include "sysemu/sysemu.h" 23 #include "hw/virtio/virtio.h" 24 #include "migration/qemu-file-types.h" 25 #include "hw/virtio/virtio-gpu.h" 26 #include "hw/virtio/virtio-gpu-bswap.h" 27 #include "hw/virtio/virtio-gpu-pixman.h" 28 #include "hw/virtio/virtio-bus.h" 29 #include "hw/qdev-properties.h" 30 #include "qemu/log.h" 31 #include "qemu/memfd.h" 32 #include "qemu/module.h" 33 #include "qapi/error.h" 34 #include "qemu/error-report.h" 35 36 #define VIRTIO_GPU_VM_VERSION 1 37 38 static struct virtio_gpu_simple_resource * 39 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 40 bool require_backing, 41 const char *caller, uint32_t *error); 42 43 static void virtio_gpu_reset_bh(void *opaque); 44 45 void virtio_gpu_update_cursor_data(VirtIOGPU *g, 46 struct virtio_gpu_scanout *s, 47 uint32_t resource_id) 48 { 49 struct virtio_gpu_simple_resource *res; 50 uint32_t pixels; 51 void *data; 52 53 res = virtio_gpu_find_check_resource(g, resource_id, false, 54 __func__, NULL); 55 if (!res) { 56 return; 57 } 58 59 if (res->blob_size) { 60 if (res->blob_size < (s->current_cursor->width * 61 s->current_cursor->height * 4)) { 62 return; 63 } 64 data = res->blob; 65 } else { 66 if (pixman_image_get_width(res->image) != s->current_cursor->width || 67 pixman_image_get_height(res->image) != s->current_cursor->height) { 68 return; 69 } 70 data = pixman_image_get_data(res->image); 71 } 72 73 pixels = s->current_cursor->width * s->current_cursor->height; 74 memcpy(s->current_cursor->data, data, 75 pixels * sizeof(uint32_t)); 76 } 77 78 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 79 { 80 struct virtio_gpu_scanout *s; 81 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 82 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 83 84 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { 85 return; 86 } 87 s = &g->parent_obj.scanout[cursor->pos.scanout_id]; 88 89 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 90 cursor->pos.x, 91 cursor->pos.y, 92 move ? "move" : "update", 93 cursor->resource_id); 94 95 if (!move) { 96 if (!s->current_cursor) { 97 s->current_cursor = cursor_alloc(64, 64); 98 } 99 100 s->current_cursor->hot_x = cursor->hot_x; 101 s->current_cursor->hot_y = cursor->hot_y; 102 103 if (cursor->resource_id > 0) { 104 vgc->update_cursor_data(g, s, cursor->resource_id); 105 } 106 dpy_cursor_define(s->con, s->current_cursor); 107 108 s->cursor = *cursor; 109 } else { 110 s->cursor.pos.x = cursor->pos.x; 111 s->cursor.pos.y = cursor->pos.y; 112 } 113 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, cursor->resource_id); 114 } 115 116 struct virtio_gpu_simple_resource * 117 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 118 { 119 struct virtio_gpu_simple_resource *res; 120 121 QTAILQ_FOREACH(res, &g->reslist, next) { 122 if (res->resource_id == resource_id) { 123 return res; 124 } 125 } 126 return NULL; 127 } 128 129 static struct virtio_gpu_simple_resource * 130 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 131 bool require_backing, 132 const char *caller, uint32_t *error) 133 { 134 struct virtio_gpu_simple_resource *res; 135 136 res = virtio_gpu_find_resource(g, resource_id); 137 if (!res) { 138 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n", 139 caller, resource_id); 140 if (error) { 141 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 142 } 143 return NULL; 144 } 145 146 if (require_backing) { 147 if (!res->iov || (!res->image && !res->blob)) { 148 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n", 149 caller, resource_id); 150 if (error) { 151 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 152 } 153 return NULL; 154 } 155 } 156 157 return res; 158 } 159 160 void virtio_gpu_ctrl_response(VirtIOGPU *g, 161 struct virtio_gpu_ctrl_command *cmd, 162 struct virtio_gpu_ctrl_hdr *resp, 163 size_t resp_len) 164 { 165 size_t s; 166 167 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 168 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 169 resp->fence_id = cmd->cmd_hdr.fence_id; 170 resp->ctx_id = cmd->cmd_hdr.ctx_id; 171 } 172 virtio_gpu_ctrl_hdr_bswap(resp); 173 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 174 if (s != resp_len) { 175 qemu_log_mask(LOG_GUEST_ERROR, 176 "%s: response size incorrect %zu vs %zu\n", 177 __func__, s, resp_len); 178 } 179 virtqueue_push(cmd->vq, &cmd->elem, s); 180 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 181 cmd->finished = true; 182 } 183 184 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 185 struct virtio_gpu_ctrl_command *cmd, 186 enum virtio_gpu_ctrl_type type) 187 { 188 struct virtio_gpu_ctrl_hdr resp; 189 190 memset(&resp, 0, sizeof(resp)); 191 resp.type = type; 192 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 193 } 194 195 void virtio_gpu_get_display_info(VirtIOGPU *g, 196 struct virtio_gpu_ctrl_command *cmd) 197 { 198 struct virtio_gpu_resp_display_info display_info; 199 200 trace_virtio_gpu_cmd_get_display_info(); 201 memset(&display_info, 0, sizeof(display_info)); 202 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 203 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); 204 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 205 sizeof(display_info)); 206 } 207 208 void virtio_gpu_get_edid(VirtIOGPU *g, 209 struct virtio_gpu_ctrl_command *cmd) 210 { 211 struct virtio_gpu_resp_edid edid; 212 struct virtio_gpu_cmd_get_edid get_edid; 213 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 214 215 VIRTIO_GPU_FILL_CMD(get_edid); 216 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 217 218 if (get_edid.scanout >= b->conf.max_outputs) { 219 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 220 return; 221 } 222 223 trace_virtio_gpu_cmd_get_edid(get_edid.scanout); 224 memset(&edid, 0, sizeof(edid)); 225 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; 226 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid); 227 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); 228 } 229 230 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 231 uint32_t width, uint32_t height) 232 { 233 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 234 * pixman_image_create_bits will fail in case it overflow. 235 */ 236 237 int bpp = PIXMAN_FORMAT_BPP(pformat); 238 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 239 return height * stride; 240 } 241 242 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 243 struct virtio_gpu_ctrl_command *cmd) 244 { 245 pixman_format_code_t pformat; 246 struct virtio_gpu_simple_resource *res; 247 struct virtio_gpu_resource_create_2d c2d; 248 249 VIRTIO_GPU_FILL_CMD(c2d); 250 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 251 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 252 c2d.width, c2d.height); 253 254 if (c2d.resource_id == 0) { 255 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 256 __func__); 257 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 258 return; 259 } 260 261 res = virtio_gpu_find_resource(g, c2d.resource_id); 262 if (res) { 263 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 264 __func__, c2d.resource_id); 265 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 266 return; 267 } 268 269 res = g_new0(struct virtio_gpu_simple_resource, 1); 270 271 res->width = c2d.width; 272 res->height = c2d.height; 273 res->format = c2d.format; 274 res->resource_id = c2d.resource_id; 275 276 pformat = virtio_gpu_get_pixman_format(c2d.format); 277 if (!pformat) { 278 qemu_log_mask(LOG_GUEST_ERROR, 279 "%s: host couldn't handle guest format %d\n", 280 __func__, c2d.format); 281 g_free(res); 282 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 283 return; 284 } 285 286 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 287 if (res->hostmem + g->hostmem < g->conf_max_hostmem) { 288 if (!qemu_pixman_image_new_shareable( 289 &res->image, 290 &res->share_handle, 291 "virtio-gpu res", 292 pformat, 293 c2d.width, 294 c2d.height, 295 c2d.height ? res->hostmem / c2d.height : 0, 296 &error_warn)) { 297 goto end; 298 } 299 } 300 301 end: 302 if (!res->image) { 303 qemu_log_mask(LOG_GUEST_ERROR, 304 "%s: resource creation failed %d %d %d\n", 305 __func__, c2d.resource_id, c2d.width, c2d.height); 306 g_free(res); 307 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 308 return; 309 } 310 311 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 312 g->hostmem += res->hostmem; 313 } 314 315 static void virtio_gpu_resource_create_blob(VirtIOGPU *g, 316 struct virtio_gpu_ctrl_command *cmd) 317 { 318 struct virtio_gpu_simple_resource *res; 319 struct virtio_gpu_resource_create_blob cblob; 320 int ret; 321 322 VIRTIO_GPU_FILL_CMD(cblob); 323 virtio_gpu_create_blob_bswap(&cblob); 324 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); 325 326 if (cblob.resource_id == 0) { 327 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 328 __func__); 329 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 330 return; 331 } 332 333 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST && 334 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) { 335 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n", 336 __func__); 337 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 338 return; 339 } 340 341 if (virtio_gpu_find_resource(g, cblob.resource_id)) { 342 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 343 __func__, cblob.resource_id); 344 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 345 return; 346 } 347 348 res = g_new0(struct virtio_gpu_simple_resource, 1); 349 res->resource_id = cblob.resource_id; 350 res->blob_size = cblob.size; 351 352 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), 353 cmd, &res->addrs, &res->iov, 354 &res->iov_cnt); 355 if (ret != 0) { 356 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 357 g_free(res); 358 return; 359 } 360 361 virtio_gpu_init_udmabuf(res); 362 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 363 } 364 365 void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 366 { 367 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 368 struct virtio_gpu_simple_resource *res; 369 370 if (scanout->resource_id == 0) { 371 return; 372 } 373 374 res = virtio_gpu_find_resource(g, scanout->resource_id); 375 if (res) { 376 res->scanout_bitmask &= ~(1 << scanout_id); 377 } 378 379 dpy_gfx_replace_surface(scanout->con, NULL); 380 scanout->resource_id = 0; 381 scanout->ds = NULL; 382 scanout->width = 0; 383 scanout->height = 0; 384 } 385 386 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 387 struct virtio_gpu_simple_resource *res, 388 Error **errp) 389 { 390 int i; 391 392 if (res->scanout_bitmask) { 393 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 394 if (res->scanout_bitmask & (1 << i)) { 395 virtio_gpu_disable_scanout(g, i); 396 } 397 } 398 } 399 400 qemu_pixman_image_unref(res->image); 401 virtio_gpu_cleanup_mapping(g, res); 402 QTAILQ_REMOVE(&g->reslist, res, next); 403 g->hostmem -= res->hostmem; 404 g_free(res); 405 } 406 407 static void virtio_gpu_resource_unref(VirtIOGPU *g, 408 struct virtio_gpu_ctrl_command *cmd) 409 { 410 struct virtio_gpu_simple_resource *res; 411 struct virtio_gpu_resource_unref unref; 412 413 VIRTIO_GPU_FILL_CMD(unref); 414 virtio_gpu_bswap_32(&unref, sizeof(unref)); 415 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 416 417 res = virtio_gpu_find_resource(g, unref.resource_id); 418 if (!res) { 419 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 420 __func__, unref.resource_id); 421 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 422 return; 423 } 424 /* 425 * virtio_gpu_resource_destroy does not set any errors, so pass a NULL errp 426 * to ignore them. 427 */ 428 virtio_gpu_resource_destroy(g, res, NULL); 429 } 430 431 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 432 struct virtio_gpu_ctrl_command *cmd) 433 { 434 struct virtio_gpu_simple_resource *res; 435 int h, bpp; 436 uint32_t src_offset, dst_offset, stride; 437 pixman_format_code_t format; 438 struct virtio_gpu_transfer_to_host_2d t2d; 439 void *img_data; 440 441 VIRTIO_GPU_FILL_CMD(t2d); 442 virtio_gpu_t2d_bswap(&t2d); 443 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 444 445 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true, 446 __func__, &cmd->error); 447 if (!res || res->blob) { 448 return; 449 } 450 451 if (t2d.r.x > res->width || 452 t2d.r.y > res->height || 453 t2d.r.width > res->width || 454 t2d.r.height > res->height || 455 t2d.r.x + t2d.r.width > res->width || 456 t2d.r.y + t2d.r.height > res->height) { 457 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 458 " bounds for resource %d: %d %d %d %d vs %d %d\n", 459 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 460 t2d.r.width, t2d.r.height, res->width, res->height); 461 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 462 return; 463 } 464 465 format = pixman_image_get_format(res->image); 466 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 467 stride = pixman_image_get_stride(res->image); 468 img_data = pixman_image_get_data(res->image); 469 470 if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) { 471 for (h = 0; h < t2d.r.height; h++) { 472 src_offset = t2d.offset + stride * h; 473 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 474 475 iov_to_buf(res->iov, res->iov_cnt, src_offset, 476 (uint8_t *)img_data + dst_offset, 477 t2d.r.width * bpp); 478 } 479 } else { 480 src_offset = t2d.offset; 481 dst_offset = t2d.r.y * stride + t2d.r.x * bpp; 482 iov_to_buf(res->iov, res->iov_cnt, src_offset, 483 (uint8_t *)img_data + dst_offset, 484 stride * t2d.r.height); 485 } 486 } 487 488 static void virtio_gpu_resource_flush(VirtIOGPU *g, 489 struct virtio_gpu_ctrl_command *cmd) 490 { 491 struct virtio_gpu_simple_resource *res; 492 struct virtio_gpu_resource_flush rf; 493 struct virtio_gpu_scanout *scanout; 494 QemuRect flush_rect; 495 bool within_bounds = false; 496 bool update_submitted = false; 497 int i; 498 499 VIRTIO_GPU_FILL_CMD(rf); 500 virtio_gpu_bswap_32(&rf, sizeof(rf)); 501 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 502 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 503 504 res = virtio_gpu_find_check_resource(g, rf.resource_id, false, 505 __func__, &cmd->error); 506 if (!res) { 507 return; 508 } 509 510 if (res->blob) { 511 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 512 scanout = &g->parent_obj.scanout[i]; 513 if (scanout->resource_id == res->resource_id && 514 rf.r.x < scanout->x + scanout->width && 515 rf.r.x + rf.r.width >= scanout->x && 516 rf.r.y < scanout->y + scanout->height && 517 rf.r.y + rf.r.height >= scanout->y) { 518 within_bounds = true; 519 520 if (console_has_gl(scanout->con)) { 521 dpy_gl_update(scanout->con, 0, 0, scanout->width, 522 scanout->height); 523 update_submitted = true; 524 } 525 } 526 } 527 528 if (update_submitted) { 529 return; 530 } 531 if (!within_bounds) { 532 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts" 533 " bounds for flush %d: %d %d %d %d\n", 534 __func__, rf.resource_id, rf.r.x, rf.r.y, 535 rf.r.width, rf.r.height); 536 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 537 return; 538 } 539 } 540 541 if (!res->blob && 542 (rf.r.x > res->width || 543 rf.r.y > res->height || 544 rf.r.width > res->width || 545 rf.r.height > res->height || 546 rf.r.x + rf.r.width > res->width || 547 rf.r.y + rf.r.height > res->height)) { 548 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 549 " bounds for resource %d: %d %d %d %d vs %d %d\n", 550 __func__, rf.resource_id, rf.r.x, rf.r.y, 551 rf.r.width, rf.r.height, res->width, res->height); 552 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 553 return; 554 } 555 556 qemu_rect_init(&flush_rect, rf.r.x, rf.r.y, rf.r.width, rf.r.height); 557 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 558 QemuRect rect; 559 560 if (!(res->scanout_bitmask & (1 << i))) { 561 continue; 562 } 563 scanout = &g->parent_obj.scanout[i]; 564 565 qemu_rect_init(&rect, scanout->x, scanout->y, 566 scanout->width, scanout->height); 567 568 /* work out the area we need to update for each console */ 569 if (qemu_rect_intersect(&flush_rect, &rect, &rect)) { 570 qemu_rect_translate(&rect, -scanout->x, -scanout->y); 571 dpy_gfx_update(g->parent_obj.scanout[i].con, 572 rect.x, rect.y, rect.width, rect.height); 573 } 574 } 575 } 576 577 static void virtio_unref_resource(pixman_image_t *image, void *data) 578 { 579 pixman_image_unref(data); 580 } 581 582 void virtio_gpu_update_scanout(VirtIOGPU *g, 583 uint32_t scanout_id, 584 struct virtio_gpu_simple_resource *res, 585 struct virtio_gpu_framebuffer *fb, 586 struct virtio_gpu_rect *r) 587 { 588 struct virtio_gpu_simple_resource *ores; 589 struct virtio_gpu_scanout *scanout; 590 591 scanout = &g->parent_obj.scanout[scanout_id]; 592 ores = virtio_gpu_find_resource(g, scanout->resource_id); 593 if (ores) { 594 ores->scanout_bitmask &= ~(1 << scanout_id); 595 } 596 597 res->scanout_bitmask |= (1 << scanout_id); 598 scanout->resource_id = res->resource_id; 599 scanout->x = r->x; 600 scanout->y = r->y; 601 scanout->width = r->width; 602 scanout->height = r->height; 603 scanout->fb = *fb; 604 } 605 606 static bool virtio_gpu_do_set_scanout(VirtIOGPU *g, 607 uint32_t scanout_id, 608 struct virtio_gpu_framebuffer *fb, 609 struct virtio_gpu_simple_resource *res, 610 struct virtio_gpu_rect *r, 611 uint32_t *error) 612 { 613 struct virtio_gpu_scanout *scanout; 614 uint8_t *data; 615 616 scanout = &g->parent_obj.scanout[scanout_id]; 617 618 if (r->x > fb->width || 619 r->y > fb->height || 620 r->width < 16 || 621 r->height < 16 || 622 r->width > fb->width || 623 r->height > fb->height || 624 r->x + r->width > fb->width || 625 r->y + r->height > fb->height) { 626 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 627 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", 628 __func__, scanout_id, res->resource_id, 629 r->x, r->y, r->width, r->height, 630 fb->width, fb->height); 631 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 632 return false; 633 } 634 635 g->parent_obj.enable = 1; 636 637 if (res->blob) { 638 if (console_has_gl(scanout->con)) { 639 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) { 640 virtio_gpu_update_scanout(g, scanout_id, res, fb, r); 641 } else { 642 *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 643 return false; 644 } 645 return true; 646 } 647 648 data = res->blob; 649 } else { 650 data = (uint8_t *)pixman_image_get_data(res->image); 651 } 652 653 /* create a surface for this scanout */ 654 if ((res->blob && !console_has_gl(scanout->con)) || 655 !scanout->ds || 656 surface_data(scanout->ds) != data + fb->offset || 657 scanout->width != r->width || 658 scanout->height != r->height) { 659 pixman_image_t *rect; 660 void *ptr = data + fb->offset; 661 rect = pixman_image_create_bits(fb->format, r->width, r->height, 662 ptr, fb->stride); 663 664 if (res->image) { 665 pixman_image_ref(res->image); 666 pixman_image_set_destroy_function(rect, virtio_unref_resource, 667 res->image); 668 } 669 670 /* realloc the surface ptr */ 671 scanout->ds = qemu_create_displaysurface_pixman(rect); 672 qemu_displaysurface_set_share_handle(scanout->ds, res->share_handle, fb->offset); 673 674 pixman_image_unref(rect); 675 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, 676 scanout->ds); 677 } 678 679 virtio_gpu_update_scanout(g, scanout_id, res, fb, r); 680 return true; 681 } 682 683 static void virtio_gpu_set_scanout(VirtIOGPU *g, 684 struct virtio_gpu_ctrl_command *cmd) 685 { 686 struct virtio_gpu_simple_resource *res; 687 struct virtio_gpu_framebuffer fb = { 0 }; 688 struct virtio_gpu_set_scanout ss; 689 690 VIRTIO_GPU_FILL_CMD(ss); 691 virtio_gpu_bswap_32(&ss, sizeof(ss)); 692 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 693 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 694 695 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 696 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 697 __func__, ss.scanout_id); 698 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 699 return; 700 } 701 702 if (ss.resource_id == 0) { 703 virtio_gpu_disable_scanout(g, ss.scanout_id); 704 return; 705 } 706 707 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 708 __func__, &cmd->error); 709 if (!res) { 710 return; 711 } 712 713 fb.format = pixman_image_get_format(res->image); 714 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 715 fb.width = pixman_image_get_width(res->image); 716 fb.height = pixman_image_get_height(res->image); 717 fb.stride = pixman_image_get_stride(res->image); 718 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 719 720 virtio_gpu_do_set_scanout(g, ss.scanout_id, 721 &fb, res, &ss.r, &cmd->error); 722 } 723 724 static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, 725 struct virtio_gpu_ctrl_command *cmd) 726 { 727 struct virtio_gpu_simple_resource *res; 728 struct virtio_gpu_framebuffer fb = { 0 }; 729 struct virtio_gpu_set_scanout_blob ss; 730 uint64_t fbend; 731 732 VIRTIO_GPU_FILL_CMD(ss); 733 virtio_gpu_scanout_blob_bswap(&ss); 734 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, 735 ss.r.width, ss.r.height, ss.r.x, 736 ss.r.y); 737 738 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 739 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 740 __func__, ss.scanout_id); 741 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 742 return; 743 } 744 745 if (ss.resource_id == 0) { 746 virtio_gpu_disable_scanout(g, ss.scanout_id); 747 return; 748 } 749 750 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 751 __func__, &cmd->error); 752 if (!res) { 753 return; 754 } 755 756 fb.format = virtio_gpu_get_pixman_format(ss.format); 757 if (!fb.format) { 758 qemu_log_mask(LOG_GUEST_ERROR, 759 "%s: host couldn't handle guest format %d\n", 760 __func__, ss.format); 761 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 762 return; 763 } 764 765 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 766 fb.width = ss.width; 767 fb.height = ss.height; 768 fb.stride = ss.strides[0]; 769 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 770 771 fbend = fb.offset; 772 fbend += fb.stride * (ss.r.height - 1); 773 fbend += fb.bytes_pp * ss.r.width; 774 if (fbend > res->blob_size) { 775 qemu_log_mask(LOG_GUEST_ERROR, 776 "%s: fb end out of range\n", 777 __func__); 778 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 779 return; 780 } 781 782 virtio_gpu_do_set_scanout(g, ss.scanout_id, 783 &fb, res, &ss.r, &cmd->error); 784 } 785 786 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 787 uint32_t nr_entries, uint32_t offset, 788 struct virtio_gpu_ctrl_command *cmd, 789 uint64_t **addr, struct iovec **iov, 790 uint32_t *niov) 791 { 792 struct virtio_gpu_mem_entry *ents; 793 size_t esize, s; 794 int e, v; 795 796 if (nr_entries > 16384) { 797 qemu_log_mask(LOG_GUEST_ERROR, 798 "%s: nr_entries is too big (%d > 16384)\n", 799 __func__, nr_entries); 800 return -1; 801 } 802 803 esize = sizeof(*ents) * nr_entries; 804 ents = g_malloc(esize); 805 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 806 offset, ents, esize); 807 if (s != esize) { 808 qemu_log_mask(LOG_GUEST_ERROR, 809 "%s: command data size incorrect %zu vs %zu\n", 810 __func__, s, esize); 811 g_free(ents); 812 return -1; 813 } 814 815 *iov = NULL; 816 if (addr) { 817 *addr = NULL; 818 } 819 for (e = 0, v = 0; e < nr_entries; e++) { 820 uint64_t a = le64_to_cpu(ents[e].addr); 821 uint32_t l = le32_to_cpu(ents[e].length); 822 hwaddr len; 823 void *map; 824 825 do { 826 len = l; 827 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len, 828 DMA_DIRECTION_TO_DEVICE, 829 MEMTXATTRS_UNSPECIFIED); 830 if (!map) { 831 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 832 " element %d\n", __func__, e); 833 virtio_gpu_cleanup_mapping_iov(g, *iov, v); 834 g_free(ents); 835 *iov = NULL; 836 if (addr) { 837 g_free(*addr); 838 *addr = NULL; 839 } 840 return -1; 841 } 842 843 if (!(v % 16)) { 844 *iov = g_renew(struct iovec, *iov, v + 16); 845 if (addr) { 846 *addr = g_renew(uint64_t, *addr, v + 16); 847 } 848 } 849 (*iov)[v].iov_base = map; 850 (*iov)[v].iov_len = len; 851 if (addr) { 852 (*addr)[v] = a; 853 } 854 855 a += len; 856 l -= len; 857 v += 1; 858 } while (l > 0); 859 } 860 *niov = v; 861 862 g_free(ents); 863 return 0; 864 } 865 866 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 867 struct iovec *iov, uint32_t count) 868 { 869 int i; 870 871 for (i = 0; i < count; i++) { 872 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 873 iov[i].iov_base, iov[i].iov_len, 874 DMA_DIRECTION_TO_DEVICE, 875 iov[i].iov_len); 876 } 877 g_free(iov); 878 } 879 880 void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 881 struct virtio_gpu_simple_resource *res) 882 { 883 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 884 res->iov = NULL; 885 res->iov_cnt = 0; 886 g_free(res->addrs); 887 res->addrs = NULL; 888 889 if (res->blob) { 890 virtio_gpu_fini_udmabuf(res); 891 } 892 } 893 894 static void 895 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 896 struct virtio_gpu_ctrl_command *cmd) 897 { 898 struct virtio_gpu_simple_resource *res; 899 struct virtio_gpu_resource_attach_backing ab; 900 int ret; 901 902 VIRTIO_GPU_FILL_CMD(ab); 903 virtio_gpu_bswap_32(&ab, sizeof(ab)); 904 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 905 906 res = virtio_gpu_find_resource(g, ab.resource_id); 907 if (!res) { 908 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 909 __func__, ab.resource_id); 910 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 911 return; 912 } 913 914 if (res->iov) { 915 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 916 return; 917 } 918 919 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd, 920 &res->addrs, &res->iov, &res->iov_cnt); 921 if (ret != 0) { 922 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 923 return; 924 } 925 } 926 927 static void 928 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 929 struct virtio_gpu_ctrl_command *cmd) 930 { 931 struct virtio_gpu_simple_resource *res; 932 struct virtio_gpu_resource_detach_backing detach; 933 934 VIRTIO_GPU_FILL_CMD(detach); 935 virtio_gpu_bswap_32(&detach, sizeof(detach)); 936 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 937 938 res = virtio_gpu_find_check_resource(g, detach.resource_id, true, 939 __func__, &cmd->error); 940 if (!res) { 941 return; 942 } 943 virtio_gpu_cleanup_mapping(g, res); 944 } 945 946 void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 947 struct virtio_gpu_ctrl_command *cmd) 948 { 949 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 950 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 951 952 switch (cmd->cmd_hdr.type) { 953 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 954 virtio_gpu_get_display_info(g, cmd); 955 break; 956 case VIRTIO_GPU_CMD_GET_EDID: 957 virtio_gpu_get_edid(g, cmd); 958 break; 959 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 960 virtio_gpu_resource_create_2d(g, cmd); 961 break; 962 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: 963 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 964 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 965 break; 966 } 967 virtio_gpu_resource_create_blob(g, cmd); 968 break; 969 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 970 virtio_gpu_resource_unref(g, cmd); 971 break; 972 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 973 virtio_gpu_resource_flush(g, cmd); 974 break; 975 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 976 virtio_gpu_transfer_to_host_2d(g, cmd); 977 break; 978 case VIRTIO_GPU_CMD_SET_SCANOUT: 979 virtio_gpu_set_scanout(g, cmd); 980 break; 981 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: 982 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 983 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 984 break; 985 } 986 virtio_gpu_set_scanout_blob(g, cmd); 987 break; 988 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 989 virtio_gpu_resource_attach_backing(g, cmd); 990 break; 991 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 992 virtio_gpu_resource_detach_backing(g, cmd); 993 break; 994 default: 995 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 996 break; 997 } 998 if (!cmd->finished) { 999 if (!g->parent_obj.renderer_blocked) { 1000 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 1001 VIRTIO_GPU_RESP_OK_NODATA); 1002 } 1003 } 1004 } 1005 1006 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 1007 { 1008 VirtIOGPU *g = VIRTIO_GPU(vdev); 1009 qemu_bh_schedule(g->ctrl_bh); 1010 } 1011 1012 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 1013 { 1014 VirtIOGPU *g = VIRTIO_GPU(vdev); 1015 qemu_bh_schedule(g->cursor_bh); 1016 } 1017 1018 void virtio_gpu_process_cmdq(VirtIOGPU *g) 1019 { 1020 struct virtio_gpu_ctrl_command *cmd; 1021 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1022 1023 if (g->processing_cmdq) { 1024 return; 1025 } 1026 g->processing_cmdq = true; 1027 while (!QTAILQ_EMPTY(&g->cmdq)) { 1028 cmd = QTAILQ_FIRST(&g->cmdq); 1029 1030 if (g->parent_obj.renderer_blocked) { 1031 break; 1032 } 1033 1034 /* process command */ 1035 vgc->process_cmd(g, cmd); 1036 1037 /* command suspended */ 1038 if (!cmd->finished && !(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { 1039 trace_virtio_gpu_cmd_suspended(cmd->cmd_hdr.type); 1040 break; 1041 } 1042 1043 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1044 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1045 g->stats.requests++; 1046 } 1047 1048 if (!cmd->finished) { 1049 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 1050 g->inflight++; 1051 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1052 if (g->stats.max_inflight < g->inflight) { 1053 g->stats.max_inflight = g->inflight; 1054 } 1055 trace_virtio_gpu_inc_inflight_fences(g->inflight); 1056 } 1057 } else { 1058 g_free(cmd); 1059 } 1060 } 1061 g->processing_cmdq = false; 1062 } 1063 1064 static void virtio_gpu_process_fenceq(VirtIOGPU *g) 1065 { 1066 struct virtio_gpu_ctrl_command *cmd, *tmp; 1067 1068 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { 1069 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); 1070 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 1071 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1072 g_free(cmd); 1073 g->inflight--; 1074 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1075 trace_virtio_gpu_dec_inflight_fences(g->inflight); 1076 } 1077 } 1078 } 1079 1080 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b) 1081 { 1082 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj); 1083 1084 virtio_gpu_process_fenceq(g); 1085 virtio_gpu_process_cmdq(g); 1086 } 1087 1088 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 1089 { 1090 VirtIOGPU *g = VIRTIO_GPU(vdev); 1091 struct virtio_gpu_ctrl_command *cmd; 1092 1093 if (!virtio_queue_ready(vq)) { 1094 return; 1095 } 1096 1097 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1098 while (cmd) { 1099 cmd->vq = vq; 1100 cmd->error = 0; 1101 cmd->finished = false; 1102 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 1103 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1104 } 1105 1106 virtio_gpu_process_cmdq(g); 1107 } 1108 1109 static void virtio_gpu_ctrl_bh(void *opaque) 1110 { 1111 VirtIOGPU *g = opaque; 1112 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1113 1114 vgc->handle_ctrl(VIRTIO_DEVICE(g), g->ctrl_vq); 1115 } 1116 1117 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 1118 { 1119 VirtIOGPU *g = VIRTIO_GPU(vdev); 1120 VirtQueueElement *elem; 1121 size_t s; 1122 struct virtio_gpu_update_cursor cursor_info; 1123 1124 if (!virtio_queue_ready(vq)) { 1125 return; 1126 } 1127 for (;;) { 1128 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 1129 if (!elem) { 1130 break; 1131 } 1132 1133 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 1134 &cursor_info, sizeof(cursor_info)); 1135 if (s != sizeof(cursor_info)) { 1136 qemu_log_mask(LOG_GUEST_ERROR, 1137 "%s: cursor size incorrect %zu vs %zu\n", 1138 __func__, s, sizeof(cursor_info)); 1139 } else { 1140 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 1141 update_cursor(g, &cursor_info); 1142 } 1143 virtqueue_push(vq, elem, 0); 1144 virtio_notify(vdev, vq); 1145 g_free(elem); 1146 } 1147 } 1148 1149 static void virtio_gpu_cursor_bh(void *opaque) 1150 { 1151 VirtIOGPU *g = opaque; 1152 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); 1153 } 1154 1155 static bool scanout_vmstate_after_v2(void *opaque, int version) 1156 { 1157 struct VirtIOGPUBase *base = container_of(opaque, VirtIOGPUBase, scanout); 1158 struct VirtIOGPU *gpu = container_of(base, VirtIOGPU, parent_obj); 1159 1160 return gpu->scanout_vmstate_version >= 2; 1161 } 1162 1163 static const VMStateDescription vmstate_virtio_gpu_scanout = { 1164 .name = "virtio-gpu-one-scanout", 1165 .version_id = 1, 1166 .fields = (const VMStateField[]) { 1167 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1168 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1169 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1170 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1171 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1172 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1173 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1174 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1175 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1176 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1177 VMSTATE_UINT32_TEST(fb.format, struct virtio_gpu_scanout, 1178 scanout_vmstate_after_v2), 1179 VMSTATE_UINT32_TEST(fb.bytes_pp, struct virtio_gpu_scanout, 1180 scanout_vmstate_after_v2), 1181 VMSTATE_UINT32_TEST(fb.width, struct virtio_gpu_scanout, 1182 scanout_vmstate_after_v2), 1183 VMSTATE_UINT32_TEST(fb.height, struct virtio_gpu_scanout, 1184 scanout_vmstate_after_v2), 1185 VMSTATE_UINT32_TEST(fb.stride, struct virtio_gpu_scanout, 1186 scanout_vmstate_after_v2), 1187 VMSTATE_UINT32_TEST(fb.offset, struct virtio_gpu_scanout, 1188 scanout_vmstate_after_v2), 1189 VMSTATE_END_OF_LIST() 1190 }, 1191 }; 1192 1193 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1194 .name = "virtio-gpu-scanouts", 1195 .version_id = 1, 1196 .fields = (const VMStateField[]) { 1197 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), 1198 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, 1199 struct VirtIOGPU, NULL), 1200 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, 1201 parent_obj.conf.max_outputs, 1, 1202 vmstate_virtio_gpu_scanout, 1203 struct virtio_gpu_scanout), 1204 VMSTATE_END_OF_LIST() 1205 }, 1206 }; 1207 1208 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1209 const VMStateField *field, JSONWriter *vmdesc) 1210 { 1211 VirtIOGPU *g = opaque; 1212 struct virtio_gpu_simple_resource *res; 1213 int i; 1214 1215 /* in 2d mode we should never find unprocessed commands here */ 1216 assert(QTAILQ_EMPTY(&g->cmdq)); 1217 1218 QTAILQ_FOREACH(res, &g->reslist, next) { 1219 if (res->blob_size) { 1220 continue; 1221 } 1222 qemu_put_be32(f, res->resource_id); 1223 qemu_put_be32(f, res->width); 1224 qemu_put_be32(f, res->height); 1225 qemu_put_be32(f, res->format); 1226 qemu_put_be32(f, res->iov_cnt); 1227 for (i = 0; i < res->iov_cnt; i++) { 1228 qemu_put_be64(f, res->addrs[i]); 1229 qemu_put_be32(f, res->iov[i].iov_len); 1230 } 1231 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1232 pixman_image_get_stride(res->image) * res->height); 1233 } 1234 qemu_put_be32(f, 0); /* end of list */ 1235 1236 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1237 } 1238 1239 static bool virtio_gpu_load_restore_mapping(VirtIOGPU *g, 1240 struct virtio_gpu_simple_resource *res) 1241 { 1242 int i; 1243 1244 for (i = 0; i < res->iov_cnt; i++) { 1245 hwaddr len = res->iov[i].iov_len; 1246 res->iov[i].iov_base = 1247 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len, 1248 DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED); 1249 1250 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1251 /* Clean up the half-a-mapping we just created... */ 1252 if (res->iov[i].iov_base) { 1253 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, res->iov[i].iov_base, 1254 len, DMA_DIRECTION_TO_DEVICE, 0); 1255 } 1256 /* ...and the mappings for previous loop iterations */ 1257 res->iov_cnt = i; 1258 virtio_gpu_cleanup_mapping(g, res); 1259 return false; 1260 } 1261 } 1262 1263 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1264 g->hostmem += res->hostmem; 1265 return true; 1266 } 1267 1268 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1269 const VMStateField *field) 1270 { 1271 VirtIOGPU *g = opaque; 1272 struct virtio_gpu_simple_resource *res; 1273 uint32_t resource_id, pformat; 1274 int i; 1275 1276 g->hostmem = 0; 1277 1278 resource_id = qemu_get_be32(f); 1279 while (resource_id != 0) { 1280 res = virtio_gpu_find_resource(g, resource_id); 1281 if (res) { 1282 return -EINVAL; 1283 } 1284 1285 res = g_new0(struct virtio_gpu_simple_resource, 1); 1286 res->resource_id = resource_id; 1287 res->width = qemu_get_be32(f); 1288 res->height = qemu_get_be32(f); 1289 res->format = qemu_get_be32(f); 1290 res->iov_cnt = qemu_get_be32(f); 1291 1292 /* allocate */ 1293 pformat = virtio_gpu_get_pixman_format(res->format); 1294 if (!pformat) { 1295 g_free(res); 1296 return -EINVAL; 1297 } 1298 1299 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1300 if (!qemu_pixman_image_new_shareable(&res->image, 1301 &res->share_handle, 1302 "virtio-gpu res", 1303 pformat, 1304 res->width, 1305 res->height, 1306 res->height ? res->hostmem / res->height : 0, 1307 &error_warn)) { 1308 g_free(res); 1309 return -EINVAL; 1310 } 1311 1312 res->addrs = g_new(uint64_t, res->iov_cnt); 1313 res->iov = g_new(struct iovec, res->iov_cnt); 1314 1315 /* read data */ 1316 for (i = 0; i < res->iov_cnt; i++) { 1317 res->addrs[i] = qemu_get_be64(f); 1318 res->iov[i].iov_len = qemu_get_be32(f); 1319 } 1320 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1321 pixman_image_get_stride(res->image) * res->height); 1322 1323 if (!virtio_gpu_load_restore_mapping(g, res)) { 1324 pixman_image_unref(res->image); 1325 g_free(res); 1326 return -EINVAL; 1327 } 1328 1329 resource_id = qemu_get_be32(f); 1330 } 1331 1332 /* load & apply scanout state */ 1333 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1334 1335 return 0; 1336 } 1337 1338 static int virtio_gpu_blob_save(QEMUFile *f, void *opaque, size_t size, 1339 const VMStateField *field, JSONWriter *vmdesc) 1340 { 1341 VirtIOGPU *g = opaque; 1342 struct virtio_gpu_simple_resource *res; 1343 int i; 1344 1345 /* in 2d mode we should never find unprocessed commands here */ 1346 assert(QTAILQ_EMPTY(&g->cmdq)); 1347 1348 QTAILQ_FOREACH(res, &g->reslist, next) { 1349 if (!res->blob_size) { 1350 continue; 1351 } 1352 assert(!res->image); 1353 qemu_put_be32(f, res->resource_id); 1354 qemu_put_be32(f, res->blob_size); 1355 qemu_put_be32(f, res->iov_cnt); 1356 for (i = 0; i < res->iov_cnt; i++) { 1357 qemu_put_be64(f, res->addrs[i]); 1358 qemu_put_be32(f, res->iov[i].iov_len); 1359 } 1360 } 1361 qemu_put_be32(f, 0); /* end of list */ 1362 1363 return 0; 1364 } 1365 1366 static int virtio_gpu_blob_load(QEMUFile *f, void *opaque, size_t size, 1367 const VMStateField *field) 1368 { 1369 VirtIOGPU *g = opaque; 1370 struct virtio_gpu_simple_resource *res; 1371 uint32_t resource_id; 1372 int i; 1373 1374 resource_id = qemu_get_be32(f); 1375 while (resource_id != 0) { 1376 res = virtio_gpu_find_resource(g, resource_id); 1377 if (res) { 1378 return -EINVAL; 1379 } 1380 1381 res = g_new0(struct virtio_gpu_simple_resource, 1); 1382 res->resource_id = resource_id; 1383 res->blob_size = qemu_get_be32(f); 1384 res->iov_cnt = qemu_get_be32(f); 1385 res->addrs = g_new(uint64_t, res->iov_cnt); 1386 res->iov = g_new(struct iovec, res->iov_cnt); 1387 1388 /* read data */ 1389 for (i = 0; i < res->iov_cnt; i++) { 1390 res->addrs[i] = qemu_get_be64(f); 1391 res->iov[i].iov_len = qemu_get_be32(f); 1392 } 1393 1394 if (!virtio_gpu_load_restore_mapping(g, res)) { 1395 g_free(res); 1396 return -EINVAL; 1397 } 1398 1399 virtio_gpu_init_udmabuf(res); 1400 1401 resource_id = qemu_get_be32(f); 1402 } 1403 1404 return 0; 1405 } 1406 1407 static int virtio_gpu_post_load(void *opaque, int version_id) 1408 { 1409 VirtIOGPU *g = opaque; 1410 struct virtio_gpu_scanout *scanout; 1411 struct virtio_gpu_simple_resource *res; 1412 int i; 1413 1414 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1415 scanout = &g->parent_obj.scanout[i]; 1416 if (!scanout->resource_id) { 1417 continue; 1418 } 1419 1420 res = virtio_gpu_find_resource(g, scanout->resource_id); 1421 if (!res) { 1422 return -EINVAL; 1423 } 1424 1425 if (scanout->fb.format != 0) { 1426 uint32_t error = 0; 1427 struct virtio_gpu_rect r = { 1428 .x = scanout->x, 1429 .y = scanout->y, 1430 .width = scanout->width, 1431 .height = scanout->height 1432 }; 1433 1434 if (!virtio_gpu_do_set_scanout(g, i, &scanout->fb, res, &r, &error)) { 1435 return -EINVAL; 1436 } 1437 } else { 1438 /* legacy v1 migration support */ 1439 if (!res->image) { 1440 return -EINVAL; 1441 } 1442 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1443 qemu_displaysurface_set_share_handle(scanout->ds, res->share_handle, 0); 1444 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1445 } 1446 1447 dpy_gfx_update_full(scanout->con); 1448 if (scanout->cursor.resource_id) { 1449 update_cursor(g, &scanout->cursor); 1450 } 1451 res->scanout_bitmask |= (1 << i); 1452 } 1453 1454 return 0; 1455 } 1456 1457 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1458 { 1459 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1460 VirtIOGPU *g = VIRTIO_GPU(qdev); 1461 1462 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1463 if (!virtio_gpu_rutabaga_enabled(g->parent_obj.conf) && 1464 !virtio_gpu_virgl_enabled(g->parent_obj.conf) && 1465 !virtio_gpu_have_udmabuf()) { 1466 error_setg(errp, "need rutabaga or udmabuf for blob resources"); 1467 return; 1468 } 1469 1470 #ifdef VIRGL_VERSION_MAJOR 1471 #if VIRGL_VERSION_MAJOR < 1 1472 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { 1473 error_setg(errp, "old virglrenderer, blob resources unsupported"); 1474 return; 1475 } 1476 #endif 1477 #endif 1478 } 1479 1480 if (virtio_gpu_venus_enabled(g->parent_obj.conf)) { 1481 #ifdef VIRGL_VERSION_MAJOR 1482 #if VIRGL_VERSION_MAJOR >= 1 1483 if (!virtio_gpu_blob_enabled(g->parent_obj.conf) || 1484 !virtio_gpu_hostmem_enabled(g->parent_obj.conf)) { 1485 error_setg(errp, "venus requires enabled blob and hostmem options"); 1486 return; 1487 } 1488 #else 1489 error_setg(errp, "old virglrenderer, venus unsupported"); 1490 return; 1491 #endif 1492 #endif 1493 } 1494 1495 if (!virtio_gpu_base_device_realize(qdev, 1496 virtio_gpu_handle_ctrl_cb, 1497 virtio_gpu_handle_cursor_cb, 1498 errp)) { 1499 return; 1500 } 1501 1502 g->ctrl_vq = virtio_get_queue(vdev, 0); 1503 g->cursor_vq = virtio_get_queue(vdev, 1); 1504 g->ctrl_bh = virtio_bh_new_guarded(qdev, virtio_gpu_ctrl_bh, g); 1505 g->cursor_bh = virtio_bh_new_guarded(qdev, virtio_gpu_cursor_bh, g); 1506 g->reset_bh = qemu_bh_new(virtio_gpu_reset_bh, g); 1507 qemu_cond_init(&g->reset_cond); 1508 QTAILQ_INIT(&g->reslist); 1509 QTAILQ_INIT(&g->cmdq); 1510 QTAILQ_INIT(&g->fenceq); 1511 } 1512 1513 static void virtio_gpu_device_unrealize(DeviceState *qdev) 1514 { 1515 VirtIOGPU *g = VIRTIO_GPU(qdev); 1516 1517 g_clear_pointer(&g->ctrl_bh, qemu_bh_delete); 1518 g_clear_pointer(&g->cursor_bh, qemu_bh_delete); 1519 g_clear_pointer(&g->reset_bh, qemu_bh_delete); 1520 qemu_cond_destroy(&g->reset_cond); 1521 virtio_gpu_base_device_unrealize(qdev); 1522 } 1523 1524 static void virtio_gpu_reset_bh(void *opaque) 1525 { 1526 VirtIOGPU *g = VIRTIO_GPU(opaque); 1527 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1528 struct virtio_gpu_simple_resource *res, *tmp; 1529 uint32_t resource_id; 1530 Error *local_err = NULL; 1531 int i = 0; 1532 1533 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1534 resource_id = res->resource_id; 1535 vgc->resource_destroy(g, res, &local_err); 1536 if (local_err) { 1537 error_append_hint(&local_err, "%s: %s resource_destroy" 1538 "for resource_id = %"PRIu32" failed.\n", 1539 __func__, object_get_typename(OBJECT(g)), 1540 resource_id); 1541 /* error_report_err frees the error object for us */ 1542 error_report_err(local_err); 1543 local_err = NULL; 1544 } 1545 } 1546 1547 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1548 dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL); 1549 } 1550 1551 g->reset_finished = true; 1552 qemu_cond_signal(&g->reset_cond); 1553 } 1554 1555 void virtio_gpu_reset(VirtIODevice *vdev) 1556 { 1557 VirtIOGPU *g = VIRTIO_GPU(vdev); 1558 struct virtio_gpu_ctrl_command *cmd; 1559 1560 if (qemu_in_vcpu_thread()) { 1561 g->reset_finished = false; 1562 qemu_bh_schedule(g->reset_bh); 1563 while (!g->reset_finished) { 1564 qemu_cond_wait_bql(&g->reset_cond); 1565 } 1566 } else { 1567 aio_bh_call(g->reset_bh); 1568 } 1569 1570 while (!QTAILQ_EMPTY(&g->cmdq)) { 1571 cmd = QTAILQ_FIRST(&g->cmdq); 1572 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1573 g_free(cmd); 1574 } 1575 1576 while (!QTAILQ_EMPTY(&g->fenceq)) { 1577 cmd = QTAILQ_FIRST(&g->fenceq); 1578 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1579 g->inflight--; 1580 g_free(cmd); 1581 } 1582 1583 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); 1584 } 1585 1586 static void 1587 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 1588 { 1589 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1590 1591 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 1592 } 1593 1594 static void 1595 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 1596 { 1597 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1598 const struct virtio_gpu_config *vgconfig = 1599 (const struct virtio_gpu_config *)config; 1600 1601 if (vgconfig->events_clear) { 1602 g->virtio_config.events_read &= ~vgconfig->events_clear; 1603 } 1604 } 1605 1606 static bool virtio_gpu_blob_state_needed(void *opaque) 1607 { 1608 VirtIOGPU *g = VIRTIO_GPU(opaque); 1609 1610 return virtio_gpu_blob_enabled(g->parent_obj.conf); 1611 } 1612 1613 const VMStateDescription vmstate_virtio_gpu_blob_state = { 1614 .name = "virtio-gpu/blob", 1615 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1616 .version_id = VIRTIO_GPU_VM_VERSION, 1617 .needed = virtio_gpu_blob_state_needed, 1618 .fields = (const VMStateField[]){ 1619 { 1620 .name = "virtio-gpu/blob", 1621 .info = &(const VMStateInfo) { 1622 .name = "blob", 1623 .get = virtio_gpu_blob_load, 1624 .put = virtio_gpu_blob_save, 1625 }, 1626 .flags = VMS_SINGLE, 1627 } /* device */, 1628 VMSTATE_END_OF_LIST() 1629 }, 1630 }; 1631 1632 /* 1633 * For historical reasons virtio_gpu does not adhere to virtio migration 1634 * scheme as described in doc/virtio-migration.txt, in a sense that no 1635 * save/load callback are provided to the core. Instead the device data 1636 * is saved/loaded after the core data. 1637 * 1638 * Because of this we need a special vmsd. 1639 */ 1640 static const VMStateDescription vmstate_virtio_gpu = { 1641 .name = "virtio-gpu", 1642 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1643 .version_id = VIRTIO_GPU_VM_VERSION, 1644 .fields = (const VMStateField[]) { 1645 VMSTATE_VIRTIO_DEVICE /* core */, 1646 { 1647 .name = "virtio-gpu", 1648 .info = &(const VMStateInfo) { 1649 .name = "virtio-gpu", 1650 .get = virtio_gpu_load, 1651 .put = virtio_gpu_save, 1652 }, 1653 .flags = VMS_SINGLE, 1654 } /* device */, 1655 VMSTATE_END_OF_LIST() 1656 }, 1657 .subsections = (const VMStateDescription * const []) { 1658 &vmstate_virtio_gpu_blob_state, 1659 NULL 1660 }, 1661 .post_load = virtio_gpu_post_load, 1662 }; 1663 1664 static Property virtio_gpu_properties[] = { 1665 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), 1666 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 1667 256 * MiB), 1668 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags, 1669 VIRTIO_GPU_FLAG_BLOB_ENABLED, false), 1670 DEFINE_PROP_SIZE("hostmem", VirtIOGPU, parent_obj.conf.hostmem, 0), 1671 DEFINE_PROP_UINT8("x-scanout-vmstate-version", VirtIOGPU, scanout_vmstate_version, 2), 1672 DEFINE_PROP_END_OF_LIST(), 1673 }; 1674 1675 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1676 { 1677 DeviceClass *dc = DEVICE_CLASS(klass); 1678 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1679 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); 1680 VirtIOGPUBaseClass *vgbc = &vgc->parent; 1681 1682 vgc->handle_ctrl = virtio_gpu_handle_ctrl; 1683 vgc->process_cmd = virtio_gpu_simple_process_cmd; 1684 vgc->update_cursor_data = virtio_gpu_update_cursor_data; 1685 vgc->resource_destroy = virtio_gpu_resource_destroy; 1686 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed; 1687 1688 vdc->realize = virtio_gpu_device_realize; 1689 vdc->unrealize = virtio_gpu_device_unrealize; 1690 vdc->reset = virtio_gpu_reset; 1691 vdc->get_config = virtio_gpu_get_config; 1692 vdc->set_config = virtio_gpu_set_config; 1693 1694 dc->vmsd = &vmstate_virtio_gpu; 1695 device_class_set_props(dc, virtio_gpu_properties); 1696 } 1697 1698 static const TypeInfo virtio_gpu_info = { 1699 .name = TYPE_VIRTIO_GPU, 1700 .parent = TYPE_VIRTIO_GPU_BASE, 1701 .instance_size = sizeof(VirtIOGPU), 1702 .class_size = sizeof(VirtIOGPUClass), 1703 .class_init = virtio_gpu_class_init, 1704 }; 1705 module_obj(TYPE_VIRTIO_GPU); 1706 module_kconfig(VIRTIO_GPU); 1707 1708 static void virtio_register_types(void) 1709 { 1710 type_register_static(&virtio_gpu_info); 1711 } 1712 1713 type_init(virtio_register_types) 1714