1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "sysemu/dma.h" 20 #include "sysemu/sysemu.h" 21 #include "hw/virtio/virtio.h" 22 #include "migration/qemu-file-types.h" 23 #include "hw/virtio/virtio-gpu.h" 24 #include "hw/virtio/virtio-gpu-bswap.h" 25 #include "hw/virtio/virtio-gpu-pixman.h" 26 #include "hw/virtio/virtio-bus.h" 27 #include "hw/display/edid.h" 28 #include "hw/qdev-properties.h" 29 #include "qemu/log.h" 30 #include "qemu/module.h" 31 #include "qapi/error.h" 32 #include "qemu/error-report.h" 33 34 #define VIRTIO_GPU_VM_VERSION 1 35 36 static struct virtio_gpu_simple_resource* 37 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 38 static struct virtio_gpu_simple_resource * 39 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 40 bool require_backing, 41 const char *caller, uint32_t *error); 42 43 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 44 struct virtio_gpu_simple_resource *res); 45 46 void virtio_gpu_update_cursor_data(VirtIOGPU *g, 47 struct virtio_gpu_scanout *s, 48 uint32_t resource_id) 49 { 50 struct virtio_gpu_simple_resource *res; 51 uint32_t pixels; 52 void *data; 53 54 res = virtio_gpu_find_check_resource(g, resource_id, false, 55 __func__, NULL); 56 if (!res) { 57 return; 58 } 59 60 if (res->blob_size) { 61 if (res->blob_size < (s->current_cursor->width * 62 s->current_cursor->height * 4)) { 63 return; 64 } 65 data = res->blob; 66 } else { 67 if (pixman_image_get_width(res->image) != s->current_cursor->width || 68 pixman_image_get_height(res->image) != s->current_cursor->height) { 69 return; 70 } 71 data = pixman_image_get_data(res->image); 72 } 73 74 pixels = s->current_cursor->width * s->current_cursor->height; 75 memcpy(s->current_cursor->data, data, 76 pixels * sizeof(uint32_t)); 77 } 78 79 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 80 { 81 struct virtio_gpu_scanout *s; 82 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 83 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 84 85 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { 86 return; 87 } 88 s = &g->parent_obj.scanout[cursor->pos.scanout_id]; 89 90 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 91 cursor->pos.x, 92 cursor->pos.y, 93 move ? "move" : "update", 94 cursor->resource_id); 95 96 if (!move) { 97 if (!s->current_cursor) { 98 s->current_cursor = cursor_alloc(64, 64); 99 } 100 101 s->current_cursor->hot_x = cursor->hot_x; 102 s->current_cursor->hot_y = cursor->hot_y; 103 104 if (cursor->resource_id > 0) { 105 vgc->update_cursor_data(g, s, cursor->resource_id); 106 } 107 dpy_cursor_define(s->con, s->current_cursor); 108 109 s->cursor = *cursor; 110 } else { 111 s->cursor.pos.x = cursor->pos.x; 112 s->cursor.pos.y = cursor->pos.y; 113 } 114 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 115 cursor->resource_id ? 1 : 0); 116 } 117 118 static struct virtio_gpu_simple_resource * 119 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 120 { 121 struct virtio_gpu_simple_resource *res; 122 123 QTAILQ_FOREACH(res, &g->reslist, next) { 124 if (res->resource_id == resource_id) { 125 return res; 126 } 127 } 128 return NULL; 129 } 130 131 static struct virtio_gpu_simple_resource * 132 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 133 bool require_backing, 134 const char *caller, uint32_t *error) 135 { 136 struct virtio_gpu_simple_resource *res; 137 138 res = virtio_gpu_find_resource(g, resource_id); 139 if (!res) { 140 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n", 141 caller, resource_id); 142 if (error) { 143 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 144 } 145 return NULL; 146 } 147 148 if (require_backing) { 149 if (!res->iov || (!res->image && !res->blob)) { 150 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n", 151 caller, resource_id); 152 if (error) { 153 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 154 } 155 return NULL; 156 } 157 } 158 159 return res; 160 } 161 162 void virtio_gpu_ctrl_response(VirtIOGPU *g, 163 struct virtio_gpu_ctrl_command *cmd, 164 struct virtio_gpu_ctrl_hdr *resp, 165 size_t resp_len) 166 { 167 size_t s; 168 169 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 170 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 171 resp->fence_id = cmd->cmd_hdr.fence_id; 172 resp->ctx_id = cmd->cmd_hdr.ctx_id; 173 } 174 virtio_gpu_ctrl_hdr_bswap(resp); 175 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 176 if (s != resp_len) { 177 qemu_log_mask(LOG_GUEST_ERROR, 178 "%s: response size incorrect %zu vs %zu\n", 179 __func__, s, resp_len); 180 } 181 virtqueue_push(cmd->vq, &cmd->elem, s); 182 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 183 cmd->finished = true; 184 } 185 186 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 187 struct virtio_gpu_ctrl_command *cmd, 188 enum virtio_gpu_ctrl_type type) 189 { 190 struct virtio_gpu_ctrl_hdr resp; 191 192 memset(&resp, 0, sizeof(resp)); 193 resp.type = type; 194 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 195 } 196 197 void virtio_gpu_get_display_info(VirtIOGPU *g, 198 struct virtio_gpu_ctrl_command *cmd) 199 { 200 struct virtio_gpu_resp_display_info display_info; 201 202 trace_virtio_gpu_cmd_get_display_info(); 203 memset(&display_info, 0, sizeof(display_info)); 204 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 205 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); 206 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 207 sizeof(display_info)); 208 } 209 210 static void 211 virtio_gpu_generate_edid(VirtIOGPU *g, int scanout, 212 struct virtio_gpu_resp_edid *edid) 213 { 214 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 215 qemu_edid_info info = { 216 .width_mm = b->req_state[scanout].width_mm, 217 .height_mm = b->req_state[scanout].height_mm, 218 .prefx = b->req_state[scanout].width, 219 .prefy = b->req_state[scanout].height, 220 .refresh_rate = b->req_state[scanout].refresh_rate, 221 }; 222 223 edid->size = cpu_to_le32(sizeof(edid->edid)); 224 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info); 225 } 226 227 void virtio_gpu_get_edid(VirtIOGPU *g, 228 struct virtio_gpu_ctrl_command *cmd) 229 { 230 struct virtio_gpu_resp_edid edid; 231 struct virtio_gpu_cmd_get_edid get_edid; 232 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 233 234 VIRTIO_GPU_FILL_CMD(get_edid); 235 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 236 237 if (get_edid.scanout >= b->conf.max_outputs) { 238 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 239 return; 240 } 241 242 trace_virtio_gpu_cmd_get_edid(get_edid.scanout); 243 memset(&edid, 0, sizeof(edid)); 244 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; 245 virtio_gpu_generate_edid(g, get_edid.scanout, &edid); 246 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); 247 } 248 249 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 250 uint32_t width, uint32_t height) 251 { 252 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 253 * pixman_image_create_bits will fail in case it overflow. 254 */ 255 256 int bpp = PIXMAN_FORMAT_BPP(pformat); 257 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 258 return height * stride; 259 } 260 261 #ifdef WIN32 262 static void 263 win32_pixman_image_destroy(pixman_image_t *image, void *data) 264 { 265 HANDLE handle = data; 266 267 qemu_win32_map_free(pixman_image_get_data(image), handle, &error_warn); 268 } 269 #endif 270 271 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 272 struct virtio_gpu_ctrl_command *cmd) 273 { 274 pixman_format_code_t pformat; 275 struct virtio_gpu_simple_resource *res; 276 struct virtio_gpu_resource_create_2d c2d; 277 278 VIRTIO_GPU_FILL_CMD(c2d); 279 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 280 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 281 c2d.width, c2d.height); 282 283 if (c2d.resource_id == 0) { 284 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 285 __func__); 286 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 287 return; 288 } 289 290 res = virtio_gpu_find_resource(g, c2d.resource_id); 291 if (res) { 292 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 293 __func__, c2d.resource_id); 294 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 295 return; 296 } 297 298 res = g_new0(struct virtio_gpu_simple_resource, 1); 299 300 res->width = c2d.width; 301 res->height = c2d.height; 302 res->format = c2d.format; 303 res->resource_id = c2d.resource_id; 304 305 pformat = virtio_gpu_get_pixman_format(c2d.format); 306 if (!pformat) { 307 qemu_log_mask(LOG_GUEST_ERROR, 308 "%s: host couldn't handle guest format %d\n", 309 __func__, c2d.format); 310 g_free(res); 311 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 312 return; 313 } 314 315 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 316 if (res->hostmem + g->hostmem < g->conf_max_hostmem) { 317 void *bits = NULL; 318 #ifdef WIN32 319 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn); 320 if (!bits) { 321 goto end; 322 } 323 #endif 324 res->image = pixman_image_create_bits(pformat, 325 c2d.width, 326 c2d.height, 327 bits, res->hostmem / c2d.height); 328 #ifdef WIN32 329 if (res->image) { 330 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle); 331 } 332 #endif 333 } 334 335 #ifdef WIN32 336 end: 337 #endif 338 if (!res->image) { 339 qemu_log_mask(LOG_GUEST_ERROR, 340 "%s: resource creation failed %d %d %d\n", 341 __func__, c2d.resource_id, c2d.width, c2d.height); 342 g_free(res); 343 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 344 return; 345 } 346 347 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 348 g->hostmem += res->hostmem; 349 } 350 351 static void virtio_gpu_resource_create_blob(VirtIOGPU *g, 352 struct virtio_gpu_ctrl_command *cmd) 353 { 354 struct virtio_gpu_simple_resource *res; 355 struct virtio_gpu_resource_create_blob cblob; 356 int ret; 357 358 VIRTIO_GPU_FILL_CMD(cblob); 359 virtio_gpu_create_blob_bswap(&cblob); 360 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); 361 362 if (cblob.resource_id == 0) { 363 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 364 __func__); 365 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 366 return; 367 } 368 369 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST && 370 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) { 371 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n", 372 __func__); 373 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 374 return; 375 } 376 377 if (virtio_gpu_find_resource(g, cblob.resource_id)) { 378 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 379 __func__, cblob.resource_id); 380 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 381 return; 382 } 383 384 res = g_new0(struct virtio_gpu_simple_resource, 1); 385 res->resource_id = cblob.resource_id; 386 res->blob_size = cblob.size; 387 388 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), 389 cmd, &res->addrs, &res->iov, 390 &res->iov_cnt); 391 if (ret != 0) { 392 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 393 g_free(res); 394 return; 395 } 396 397 virtio_gpu_init_udmabuf(res); 398 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 399 } 400 401 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 402 { 403 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 404 struct virtio_gpu_simple_resource *res; 405 406 if (scanout->resource_id == 0) { 407 return; 408 } 409 410 res = virtio_gpu_find_resource(g, scanout->resource_id); 411 if (res) { 412 res->scanout_bitmask &= ~(1 << scanout_id); 413 } 414 415 dpy_gfx_replace_surface(scanout->con, NULL); 416 scanout->resource_id = 0; 417 scanout->ds = NULL; 418 scanout->width = 0; 419 scanout->height = 0; 420 } 421 422 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 423 struct virtio_gpu_simple_resource *res) 424 { 425 int i; 426 427 if (res->scanout_bitmask) { 428 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 429 if (res->scanout_bitmask & (1 << i)) { 430 virtio_gpu_disable_scanout(g, i); 431 } 432 } 433 } 434 435 qemu_pixman_image_unref(res->image); 436 virtio_gpu_cleanup_mapping(g, res); 437 QTAILQ_REMOVE(&g->reslist, res, next); 438 g->hostmem -= res->hostmem; 439 g_free(res); 440 } 441 442 static void virtio_gpu_resource_unref(VirtIOGPU *g, 443 struct virtio_gpu_ctrl_command *cmd) 444 { 445 struct virtio_gpu_simple_resource *res; 446 struct virtio_gpu_resource_unref unref; 447 448 VIRTIO_GPU_FILL_CMD(unref); 449 virtio_gpu_bswap_32(&unref, sizeof(unref)); 450 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 451 452 res = virtio_gpu_find_resource(g, unref.resource_id); 453 if (!res) { 454 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 455 __func__, unref.resource_id); 456 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 457 return; 458 } 459 virtio_gpu_resource_destroy(g, res); 460 } 461 462 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 463 struct virtio_gpu_ctrl_command *cmd) 464 { 465 struct virtio_gpu_simple_resource *res; 466 int h, bpp; 467 uint32_t src_offset, dst_offset, stride; 468 pixman_format_code_t format; 469 struct virtio_gpu_transfer_to_host_2d t2d; 470 void *img_data; 471 472 VIRTIO_GPU_FILL_CMD(t2d); 473 virtio_gpu_t2d_bswap(&t2d); 474 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 475 476 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true, 477 __func__, &cmd->error); 478 if (!res || res->blob) { 479 return; 480 } 481 482 if (t2d.r.x > res->width || 483 t2d.r.y > res->height || 484 t2d.r.width > res->width || 485 t2d.r.height > res->height || 486 t2d.r.x + t2d.r.width > res->width || 487 t2d.r.y + t2d.r.height > res->height) { 488 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 489 " bounds for resource %d: %d %d %d %d vs %d %d\n", 490 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 491 t2d.r.width, t2d.r.height, res->width, res->height); 492 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 493 return; 494 } 495 496 format = pixman_image_get_format(res->image); 497 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 498 stride = pixman_image_get_stride(res->image); 499 img_data = pixman_image_get_data(res->image); 500 501 if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) { 502 for (h = 0; h < t2d.r.height; h++) { 503 src_offset = t2d.offset + stride * h; 504 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 505 506 iov_to_buf(res->iov, res->iov_cnt, src_offset, 507 (uint8_t *)img_data + dst_offset, 508 t2d.r.width * bpp); 509 } 510 } else { 511 src_offset = t2d.offset; 512 dst_offset = t2d.r.y * stride + t2d.r.x * bpp; 513 iov_to_buf(res->iov, res->iov_cnt, src_offset, 514 (uint8_t *)img_data + dst_offset, 515 stride * t2d.r.height); 516 } 517 } 518 519 static void virtio_gpu_resource_flush(VirtIOGPU *g, 520 struct virtio_gpu_ctrl_command *cmd) 521 { 522 struct virtio_gpu_simple_resource *res; 523 struct virtio_gpu_resource_flush rf; 524 struct virtio_gpu_scanout *scanout; 525 pixman_region16_t flush_region; 526 bool within_bounds = false; 527 bool update_submitted = false; 528 int i; 529 530 VIRTIO_GPU_FILL_CMD(rf); 531 virtio_gpu_bswap_32(&rf, sizeof(rf)); 532 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 533 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 534 535 res = virtio_gpu_find_check_resource(g, rf.resource_id, false, 536 __func__, &cmd->error); 537 if (!res) { 538 return; 539 } 540 541 if (res->blob) { 542 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 543 scanout = &g->parent_obj.scanout[i]; 544 if (scanout->resource_id == res->resource_id && 545 rf.r.x < scanout->x + scanout->width && 546 rf.r.x + rf.r.width >= scanout->x && 547 rf.r.y < scanout->y + scanout->height && 548 rf.r.y + rf.r.height >= scanout->y) { 549 within_bounds = true; 550 551 if (console_has_gl(scanout->con)) { 552 dpy_gl_update(scanout->con, 0, 0, scanout->width, 553 scanout->height); 554 update_submitted = true; 555 } 556 } 557 } 558 559 if (update_submitted) { 560 return; 561 } 562 if (!within_bounds) { 563 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts" 564 " bounds for flush %d: %d %d %d %d\n", 565 __func__, rf.resource_id, rf.r.x, rf.r.y, 566 rf.r.width, rf.r.height); 567 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 568 return; 569 } 570 } 571 572 if (!res->blob && 573 (rf.r.x > res->width || 574 rf.r.y > res->height || 575 rf.r.width > res->width || 576 rf.r.height > res->height || 577 rf.r.x + rf.r.width > res->width || 578 rf.r.y + rf.r.height > res->height)) { 579 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 580 " bounds for resource %d: %d %d %d %d vs %d %d\n", 581 __func__, rf.resource_id, rf.r.x, rf.r.y, 582 rf.r.width, rf.r.height, res->width, res->height); 583 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 584 return; 585 } 586 587 pixman_region_init_rect(&flush_region, 588 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 589 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 590 pixman_region16_t region, finalregion; 591 pixman_box16_t *extents; 592 593 if (!(res->scanout_bitmask & (1 << i))) { 594 continue; 595 } 596 scanout = &g->parent_obj.scanout[i]; 597 598 pixman_region_init(&finalregion); 599 pixman_region_init_rect(®ion, scanout->x, scanout->y, 600 scanout->width, scanout->height); 601 602 pixman_region_intersect(&finalregion, &flush_region, ®ion); 603 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 604 extents = pixman_region_extents(&finalregion); 605 /* work out the area we need to update for each console */ 606 dpy_gfx_update(g->parent_obj.scanout[i].con, 607 extents->x1, extents->y1, 608 extents->x2 - extents->x1, 609 extents->y2 - extents->y1); 610 611 pixman_region_fini(®ion); 612 pixman_region_fini(&finalregion); 613 } 614 pixman_region_fini(&flush_region); 615 } 616 617 static void virtio_unref_resource(pixman_image_t *image, void *data) 618 { 619 pixman_image_unref(data); 620 } 621 622 static void virtio_gpu_update_scanout(VirtIOGPU *g, 623 uint32_t scanout_id, 624 struct virtio_gpu_simple_resource *res, 625 struct virtio_gpu_rect *r) 626 { 627 struct virtio_gpu_simple_resource *ores; 628 struct virtio_gpu_scanout *scanout; 629 630 scanout = &g->parent_obj.scanout[scanout_id]; 631 ores = virtio_gpu_find_resource(g, scanout->resource_id); 632 if (ores) { 633 ores->scanout_bitmask &= ~(1 << scanout_id); 634 } 635 636 res->scanout_bitmask |= (1 << scanout_id); 637 scanout->resource_id = res->resource_id; 638 scanout->x = r->x; 639 scanout->y = r->y; 640 scanout->width = r->width; 641 scanout->height = r->height; 642 } 643 644 static void virtio_gpu_do_set_scanout(VirtIOGPU *g, 645 uint32_t scanout_id, 646 struct virtio_gpu_framebuffer *fb, 647 struct virtio_gpu_simple_resource *res, 648 struct virtio_gpu_rect *r, 649 uint32_t *error) 650 { 651 struct virtio_gpu_scanout *scanout; 652 uint8_t *data; 653 654 scanout = &g->parent_obj.scanout[scanout_id]; 655 656 if (r->x > fb->width || 657 r->y > fb->height || 658 r->width < 16 || 659 r->height < 16 || 660 r->width > fb->width || 661 r->height > fb->height || 662 r->x + r->width > fb->width || 663 r->y + r->height > fb->height) { 664 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 665 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", 666 __func__, scanout_id, res->resource_id, 667 r->x, r->y, r->width, r->height, 668 fb->width, fb->height); 669 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 670 return; 671 } 672 673 g->parent_obj.enable = 1; 674 675 if (res->blob) { 676 if (console_has_gl(scanout->con)) { 677 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) { 678 virtio_gpu_update_scanout(g, scanout_id, res, r); 679 } else { 680 *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 681 } 682 return; 683 } 684 685 data = res->blob; 686 } else { 687 data = (uint8_t *)pixman_image_get_data(res->image); 688 } 689 690 /* create a surface for this scanout */ 691 if ((res->blob && !console_has_gl(scanout->con)) || 692 !scanout->ds || 693 surface_data(scanout->ds) != data + fb->offset || 694 scanout->width != r->width || 695 scanout->height != r->height) { 696 pixman_image_t *rect; 697 void *ptr = data + fb->offset; 698 rect = pixman_image_create_bits(fb->format, r->width, r->height, 699 ptr, fb->stride); 700 701 if (res->image) { 702 pixman_image_ref(res->image); 703 pixman_image_set_destroy_function(rect, virtio_unref_resource, 704 res->image); 705 } 706 707 /* realloc the surface ptr */ 708 scanout->ds = qemu_create_displaysurface_pixman(rect); 709 if (!scanout->ds) { 710 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 711 return; 712 } 713 #ifdef WIN32 714 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset); 715 #endif 716 717 pixman_image_unref(rect); 718 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, 719 scanout->ds); 720 } 721 722 virtio_gpu_update_scanout(g, scanout_id, res, r); 723 } 724 725 static void virtio_gpu_set_scanout(VirtIOGPU *g, 726 struct virtio_gpu_ctrl_command *cmd) 727 { 728 struct virtio_gpu_simple_resource *res; 729 struct virtio_gpu_framebuffer fb = { 0 }; 730 struct virtio_gpu_set_scanout ss; 731 732 VIRTIO_GPU_FILL_CMD(ss); 733 virtio_gpu_bswap_32(&ss, sizeof(ss)); 734 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 735 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 736 737 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 738 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 739 __func__, ss.scanout_id); 740 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 741 return; 742 } 743 744 if (ss.resource_id == 0) { 745 virtio_gpu_disable_scanout(g, ss.scanout_id); 746 return; 747 } 748 749 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 750 __func__, &cmd->error); 751 if (!res) { 752 return; 753 } 754 755 fb.format = pixman_image_get_format(res->image); 756 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 757 fb.width = pixman_image_get_width(res->image); 758 fb.height = pixman_image_get_height(res->image); 759 fb.stride = pixman_image_get_stride(res->image); 760 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 761 762 virtio_gpu_do_set_scanout(g, ss.scanout_id, 763 &fb, res, &ss.r, &cmd->error); 764 } 765 766 static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, 767 struct virtio_gpu_ctrl_command *cmd) 768 { 769 struct virtio_gpu_simple_resource *res; 770 struct virtio_gpu_framebuffer fb = { 0 }; 771 struct virtio_gpu_set_scanout_blob ss; 772 uint64_t fbend; 773 774 VIRTIO_GPU_FILL_CMD(ss); 775 virtio_gpu_scanout_blob_bswap(&ss); 776 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, 777 ss.r.width, ss.r.height, ss.r.x, 778 ss.r.y); 779 780 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 781 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 782 __func__, ss.scanout_id); 783 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 784 return; 785 } 786 787 if (ss.resource_id == 0) { 788 virtio_gpu_disable_scanout(g, ss.scanout_id); 789 return; 790 } 791 792 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 793 __func__, &cmd->error); 794 if (!res) { 795 return; 796 } 797 798 fb.format = virtio_gpu_get_pixman_format(ss.format); 799 if (!fb.format) { 800 qemu_log_mask(LOG_GUEST_ERROR, 801 "%s: host couldn't handle guest format %d\n", 802 __func__, ss.format); 803 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 804 return; 805 } 806 807 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 808 fb.width = ss.width; 809 fb.height = ss.height; 810 fb.stride = ss.strides[0]; 811 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 812 813 fbend = fb.offset; 814 fbend += fb.stride * (ss.r.height - 1); 815 fbend += fb.bytes_pp * ss.r.width; 816 if (fbend > res->blob_size) { 817 qemu_log_mask(LOG_GUEST_ERROR, 818 "%s: fb end out of range\n", 819 __func__); 820 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 821 return; 822 } 823 824 virtio_gpu_do_set_scanout(g, ss.scanout_id, 825 &fb, res, &ss.r, &cmd->error); 826 } 827 828 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 829 uint32_t nr_entries, uint32_t offset, 830 struct virtio_gpu_ctrl_command *cmd, 831 uint64_t **addr, struct iovec **iov, 832 uint32_t *niov) 833 { 834 struct virtio_gpu_mem_entry *ents; 835 size_t esize, s; 836 int e, v; 837 838 if (nr_entries > 16384) { 839 qemu_log_mask(LOG_GUEST_ERROR, 840 "%s: nr_entries is too big (%d > 16384)\n", 841 __func__, nr_entries); 842 return -1; 843 } 844 845 esize = sizeof(*ents) * nr_entries; 846 ents = g_malloc(esize); 847 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 848 offset, ents, esize); 849 if (s != esize) { 850 qemu_log_mask(LOG_GUEST_ERROR, 851 "%s: command data size incorrect %zu vs %zu\n", 852 __func__, s, esize); 853 g_free(ents); 854 return -1; 855 } 856 857 *iov = NULL; 858 if (addr) { 859 *addr = NULL; 860 } 861 for (e = 0, v = 0; e < nr_entries; e++) { 862 uint64_t a = le64_to_cpu(ents[e].addr); 863 uint32_t l = le32_to_cpu(ents[e].length); 864 hwaddr len; 865 void *map; 866 867 do { 868 len = l; 869 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len, 870 DMA_DIRECTION_TO_DEVICE, 871 MEMTXATTRS_UNSPECIFIED); 872 if (!map) { 873 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 874 " element %d\n", __func__, e); 875 virtio_gpu_cleanup_mapping_iov(g, *iov, v); 876 g_free(ents); 877 *iov = NULL; 878 if (addr) { 879 g_free(*addr); 880 *addr = NULL; 881 } 882 return -1; 883 } 884 885 if (!(v % 16)) { 886 *iov = g_renew(struct iovec, *iov, v + 16); 887 if (addr) { 888 *addr = g_renew(uint64_t, *addr, v + 16); 889 } 890 } 891 (*iov)[v].iov_base = map; 892 (*iov)[v].iov_len = len; 893 if (addr) { 894 (*addr)[v] = a; 895 } 896 897 a += len; 898 l -= len; 899 v += 1; 900 } while (l > 0); 901 } 902 *niov = v; 903 904 g_free(ents); 905 return 0; 906 } 907 908 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 909 struct iovec *iov, uint32_t count) 910 { 911 int i; 912 913 for (i = 0; i < count; i++) { 914 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 915 iov[i].iov_base, iov[i].iov_len, 916 DMA_DIRECTION_TO_DEVICE, 917 iov[i].iov_len); 918 } 919 g_free(iov); 920 } 921 922 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 923 struct virtio_gpu_simple_resource *res) 924 { 925 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 926 res->iov = NULL; 927 res->iov_cnt = 0; 928 g_free(res->addrs); 929 res->addrs = NULL; 930 931 if (res->blob) { 932 virtio_gpu_fini_udmabuf(res); 933 } 934 } 935 936 static void 937 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 938 struct virtio_gpu_ctrl_command *cmd) 939 { 940 struct virtio_gpu_simple_resource *res; 941 struct virtio_gpu_resource_attach_backing ab; 942 int ret; 943 944 VIRTIO_GPU_FILL_CMD(ab); 945 virtio_gpu_bswap_32(&ab, sizeof(ab)); 946 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 947 948 res = virtio_gpu_find_resource(g, ab.resource_id); 949 if (!res) { 950 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 951 __func__, ab.resource_id); 952 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 953 return; 954 } 955 956 if (res->iov) { 957 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 958 return; 959 } 960 961 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd, 962 &res->addrs, &res->iov, &res->iov_cnt); 963 if (ret != 0) { 964 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 965 return; 966 } 967 } 968 969 static void 970 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 971 struct virtio_gpu_ctrl_command *cmd) 972 { 973 struct virtio_gpu_simple_resource *res; 974 struct virtio_gpu_resource_detach_backing detach; 975 976 VIRTIO_GPU_FILL_CMD(detach); 977 virtio_gpu_bswap_32(&detach, sizeof(detach)); 978 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 979 980 res = virtio_gpu_find_check_resource(g, detach.resource_id, true, 981 __func__, &cmd->error); 982 if (!res) { 983 return; 984 } 985 virtio_gpu_cleanup_mapping(g, res); 986 } 987 988 void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 989 struct virtio_gpu_ctrl_command *cmd) 990 { 991 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 992 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 993 994 switch (cmd->cmd_hdr.type) { 995 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 996 virtio_gpu_get_display_info(g, cmd); 997 break; 998 case VIRTIO_GPU_CMD_GET_EDID: 999 virtio_gpu_get_edid(g, cmd); 1000 break; 1001 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 1002 virtio_gpu_resource_create_2d(g, cmd); 1003 break; 1004 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: 1005 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1006 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 1007 break; 1008 } 1009 virtio_gpu_resource_create_blob(g, cmd); 1010 break; 1011 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 1012 virtio_gpu_resource_unref(g, cmd); 1013 break; 1014 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 1015 virtio_gpu_resource_flush(g, cmd); 1016 break; 1017 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 1018 virtio_gpu_transfer_to_host_2d(g, cmd); 1019 break; 1020 case VIRTIO_GPU_CMD_SET_SCANOUT: 1021 virtio_gpu_set_scanout(g, cmd); 1022 break; 1023 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: 1024 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1025 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 1026 break; 1027 } 1028 virtio_gpu_set_scanout_blob(g, cmd); 1029 break; 1030 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 1031 virtio_gpu_resource_attach_backing(g, cmd); 1032 break; 1033 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 1034 virtio_gpu_resource_detach_backing(g, cmd); 1035 break; 1036 default: 1037 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 1038 break; 1039 } 1040 if (!cmd->finished) { 1041 if (!g->parent_obj.renderer_blocked) { 1042 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 1043 VIRTIO_GPU_RESP_OK_NODATA); 1044 } 1045 } 1046 } 1047 1048 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 1049 { 1050 VirtIOGPU *g = VIRTIO_GPU(vdev); 1051 qemu_bh_schedule(g->ctrl_bh); 1052 } 1053 1054 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 1055 { 1056 VirtIOGPU *g = VIRTIO_GPU(vdev); 1057 qemu_bh_schedule(g->cursor_bh); 1058 } 1059 1060 void virtio_gpu_process_cmdq(VirtIOGPU *g) 1061 { 1062 struct virtio_gpu_ctrl_command *cmd; 1063 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1064 1065 if (g->processing_cmdq) { 1066 return; 1067 } 1068 g->processing_cmdq = true; 1069 while (!QTAILQ_EMPTY(&g->cmdq)) { 1070 cmd = QTAILQ_FIRST(&g->cmdq); 1071 1072 if (g->parent_obj.renderer_blocked) { 1073 break; 1074 } 1075 1076 /* process command */ 1077 vgc->process_cmd(g, cmd); 1078 1079 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1080 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1081 g->stats.requests++; 1082 } 1083 1084 if (!cmd->finished) { 1085 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 1086 g->inflight++; 1087 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1088 if (g->stats.max_inflight < g->inflight) { 1089 g->stats.max_inflight = g->inflight; 1090 } 1091 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 1092 } 1093 } else { 1094 g_free(cmd); 1095 } 1096 } 1097 g->processing_cmdq = false; 1098 } 1099 1100 static void virtio_gpu_process_fenceq(VirtIOGPU *g) 1101 { 1102 struct virtio_gpu_ctrl_command *cmd, *tmp; 1103 1104 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { 1105 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); 1106 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 1107 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1108 g_free(cmd); 1109 g->inflight--; 1110 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1111 fprintf(stderr, "inflight: %3d (-)\r", g->inflight); 1112 } 1113 } 1114 } 1115 1116 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b) 1117 { 1118 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj); 1119 1120 virtio_gpu_process_fenceq(g); 1121 virtio_gpu_process_cmdq(g); 1122 } 1123 1124 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 1125 { 1126 VirtIOGPU *g = VIRTIO_GPU(vdev); 1127 struct virtio_gpu_ctrl_command *cmd; 1128 1129 if (!virtio_queue_ready(vq)) { 1130 return; 1131 } 1132 1133 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1134 while (cmd) { 1135 cmd->vq = vq; 1136 cmd->error = 0; 1137 cmd->finished = false; 1138 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 1139 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1140 } 1141 1142 virtio_gpu_process_cmdq(g); 1143 } 1144 1145 static void virtio_gpu_ctrl_bh(void *opaque) 1146 { 1147 VirtIOGPU *g = opaque; 1148 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1149 1150 vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq); 1151 } 1152 1153 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 1154 { 1155 VirtIOGPU *g = VIRTIO_GPU(vdev); 1156 VirtQueueElement *elem; 1157 size_t s; 1158 struct virtio_gpu_update_cursor cursor_info; 1159 1160 if (!virtio_queue_ready(vq)) { 1161 return; 1162 } 1163 for (;;) { 1164 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 1165 if (!elem) { 1166 break; 1167 } 1168 1169 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 1170 &cursor_info, sizeof(cursor_info)); 1171 if (s != sizeof(cursor_info)) { 1172 qemu_log_mask(LOG_GUEST_ERROR, 1173 "%s: cursor size incorrect %zu vs %zu\n", 1174 __func__, s, sizeof(cursor_info)); 1175 } else { 1176 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 1177 update_cursor(g, &cursor_info); 1178 } 1179 virtqueue_push(vq, elem, 0); 1180 virtio_notify(vdev, vq); 1181 g_free(elem); 1182 } 1183 } 1184 1185 static void virtio_gpu_cursor_bh(void *opaque) 1186 { 1187 VirtIOGPU *g = opaque; 1188 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); 1189 } 1190 1191 static const VMStateDescription vmstate_virtio_gpu_scanout = { 1192 .name = "virtio-gpu-one-scanout", 1193 .version_id = 1, 1194 .fields = (VMStateField[]) { 1195 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1196 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1197 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1198 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1199 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1200 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1201 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1202 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1203 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1204 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1205 VMSTATE_END_OF_LIST() 1206 }, 1207 }; 1208 1209 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1210 .name = "virtio-gpu-scanouts", 1211 .version_id = 1, 1212 .fields = (VMStateField[]) { 1213 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), 1214 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, 1215 struct VirtIOGPU, NULL), 1216 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, 1217 parent_obj.conf.max_outputs, 1, 1218 vmstate_virtio_gpu_scanout, 1219 struct virtio_gpu_scanout), 1220 VMSTATE_END_OF_LIST() 1221 }, 1222 }; 1223 1224 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1225 const VMStateField *field, JSONWriter *vmdesc) 1226 { 1227 VirtIOGPU *g = opaque; 1228 struct virtio_gpu_simple_resource *res; 1229 int i; 1230 1231 /* in 2d mode we should never find unprocessed commands here */ 1232 assert(QTAILQ_EMPTY(&g->cmdq)); 1233 1234 QTAILQ_FOREACH(res, &g->reslist, next) { 1235 qemu_put_be32(f, res->resource_id); 1236 qemu_put_be32(f, res->width); 1237 qemu_put_be32(f, res->height); 1238 qemu_put_be32(f, res->format); 1239 qemu_put_be32(f, res->iov_cnt); 1240 for (i = 0; i < res->iov_cnt; i++) { 1241 qemu_put_be64(f, res->addrs[i]); 1242 qemu_put_be32(f, res->iov[i].iov_len); 1243 } 1244 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1245 pixman_image_get_stride(res->image) * res->height); 1246 } 1247 qemu_put_be32(f, 0); /* end of list */ 1248 1249 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1250 } 1251 1252 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1253 const VMStateField *field) 1254 { 1255 VirtIOGPU *g = opaque; 1256 struct virtio_gpu_simple_resource *res; 1257 struct virtio_gpu_scanout *scanout; 1258 uint32_t resource_id, pformat; 1259 void *bits = NULL; 1260 int i; 1261 1262 g->hostmem = 0; 1263 1264 resource_id = qemu_get_be32(f); 1265 while (resource_id != 0) { 1266 res = virtio_gpu_find_resource(g, resource_id); 1267 if (res) { 1268 return -EINVAL; 1269 } 1270 1271 res = g_new0(struct virtio_gpu_simple_resource, 1); 1272 res->resource_id = resource_id; 1273 res->width = qemu_get_be32(f); 1274 res->height = qemu_get_be32(f); 1275 res->format = qemu_get_be32(f); 1276 res->iov_cnt = qemu_get_be32(f); 1277 1278 /* allocate */ 1279 pformat = virtio_gpu_get_pixman_format(res->format); 1280 if (!pformat) { 1281 g_free(res); 1282 return -EINVAL; 1283 } 1284 1285 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1286 #ifdef WIN32 1287 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn); 1288 if (!bits) { 1289 g_free(res); 1290 return -EINVAL; 1291 } 1292 #endif 1293 res->image = pixman_image_create_bits(pformat, 1294 res->width, res->height, 1295 bits, res->hostmem / res->height); 1296 if (!res->image) { 1297 g_free(res); 1298 return -EINVAL; 1299 } 1300 1301 1302 res->addrs = g_new(uint64_t, res->iov_cnt); 1303 res->iov = g_new(struct iovec, res->iov_cnt); 1304 1305 /* read data */ 1306 for (i = 0; i < res->iov_cnt; i++) { 1307 res->addrs[i] = qemu_get_be64(f); 1308 res->iov[i].iov_len = qemu_get_be32(f); 1309 } 1310 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1311 pixman_image_get_stride(res->image) * res->height); 1312 1313 /* restore mapping */ 1314 for (i = 0; i < res->iov_cnt; i++) { 1315 hwaddr len = res->iov[i].iov_len; 1316 res->iov[i].iov_base = 1317 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len, 1318 DMA_DIRECTION_TO_DEVICE, 1319 MEMTXATTRS_UNSPECIFIED); 1320 1321 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1322 /* Clean up the half-a-mapping we just created... */ 1323 if (res->iov[i].iov_base) { 1324 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 1325 res->iov[i].iov_base, 1326 len, 1327 DMA_DIRECTION_TO_DEVICE, 1328 0); 1329 } 1330 /* ...and the mappings for previous loop iterations */ 1331 res->iov_cnt = i; 1332 virtio_gpu_cleanup_mapping(g, res); 1333 pixman_image_unref(res->image); 1334 g_free(res); 1335 return -EINVAL; 1336 } 1337 } 1338 1339 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1340 g->hostmem += res->hostmem; 1341 1342 resource_id = qemu_get_be32(f); 1343 } 1344 1345 /* load & apply scanout state */ 1346 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1347 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1348 /* FIXME: should take scanout.r.{x,y} into account */ 1349 scanout = &g->parent_obj.scanout[i]; 1350 if (!scanout->resource_id) { 1351 continue; 1352 } 1353 res = virtio_gpu_find_resource(g, scanout->resource_id); 1354 if (!res) { 1355 return -EINVAL; 1356 } 1357 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1358 if (!scanout->ds) { 1359 return -EINVAL; 1360 } 1361 #ifdef WIN32 1362 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0); 1363 #endif 1364 1365 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1366 dpy_gfx_update_full(scanout->con); 1367 if (scanout->cursor.resource_id) { 1368 update_cursor(g, &scanout->cursor); 1369 } 1370 res->scanout_bitmask |= (1 << i); 1371 } 1372 1373 return 0; 1374 } 1375 1376 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1377 { 1378 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1379 VirtIOGPU *g = VIRTIO_GPU(qdev); 1380 1381 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1382 if (!virtio_gpu_have_udmabuf()) { 1383 error_setg(errp, "cannot enable blob resources without udmabuf"); 1384 return; 1385 } 1386 1387 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { 1388 error_setg(errp, "blobs and virgl are not compatible (yet)"); 1389 return; 1390 } 1391 } 1392 1393 if (!virtio_gpu_base_device_realize(qdev, 1394 virtio_gpu_handle_ctrl_cb, 1395 virtio_gpu_handle_cursor_cb, 1396 errp)) { 1397 return; 1398 } 1399 1400 g->ctrl_vq = virtio_get_queue(vdev, 0); 1401 g->cursor_vq = virtio_get_queue(vdev, 1); 1402 g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g, 1403 &qdev->mem_reentrancy_guard); 1404 g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g, 1405 &qdev->mem_reentrancy_guard); 1406 QTAILQ_INIT(&g->reslist); 1407 QTAILQ_INIT(&g->cmdq); 1408 QTAILQ_INIT(&g->fenceq); 1409 } 1410 1411 void virtio_gpu_reset(VirtIODevice *vdev) 1412 { 1413 VirtIOGPU *g = VIRTIO_GPU(vdev); 1414 struct virtio_gpu_simple_resource *res, *tmp; 1415 struct virtio_gpu_ctrl_command *cmd; 1416 1417 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1418 virtio_gpu_resource_destroy(g, res); 1419 } 1420 1421 while (!QTAILQ_EMPTY(&g->cmdq)) { 1422 cmd = QTAILQ_FIRST(&g->cmdq); 1423 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1424 g_free(cmd); 1425 } 1426 1427 while (!QTAILQ_EMPTY(&g->fenceq)) { 1428 cmd = QTAILQ_FIRST(&g->fenceq); 1429 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1430 g->inflight--; 1431 g_free(cmd); 1432 } 1433 1434 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); 1435 } 1436 1437 static void 1438 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 1439 { 1440 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1441 1442 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 1443 } 1444 1445 static void 1446 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 1447 { 1448 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1449 const struct virtio_gpu_config *vgconfig = 1450 (const struct virtio_gpu_config *)config; 1451 1452 if (vgconfig->events_clear) { 1453 g->virtio_config.events_read &= ~vgconfig->events_clear; 1454 } 1455 } 1456 1457 /* 1458 * For historical reasons virtio_gpu does not adhere to virtio migration 1459 * scheme as described in doc/virtio-migration.txt, in a sense that no 1460 * save/load callback are provided to the core. Instead the device data 1461 * is saved/loaded after the core data. 1462 * 1463 * Because of this we need a special vmsd. 1464 */ 1465 static const VMStateDescription vmstate_virtio_gpu = { 1466 .name = "virtio-gpu", 1467 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1468 .version_id = VIRTIO_GPU_VM_VERSION, 1469 .fields = (VMStateField[]) { 1470 VMSTATE_VIRTIO_DEVICE /* core */, 1471 { 1472 .name = "virtio-gpu", 1473 .info = &(const VMStateInfo) { 1474 .name = "virtio-gpu", 1475 .get = virtio_gpu_load, 1476 .put = virtio_gpu_save, 1477 }, 1478 .flags = VMS_SINGLE, 1479 } /* device */, 1480 VMSTATE_END_OF_LIST() 1481 }, 1482 }; 1483 1484 static Property virtio_gpu_properties[] = { 1485 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), 1486 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 1487 256 * MiB), 1488 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags, 1489 VIRTIO_GPU_FLAG_BLOB_ENABLED, false), 1490 DEFINE_PROP_END_OF_LIST(), 1491 }; 1492 1493 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1494 { 1495 DeviceClass *dc = DEVICE_CLASS(klass); 1496 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1497 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); 1498 VirtIOGPUBaseClass *vgbc = &vgc->parent; 1499 1500 vgc->handle_ctrl = virtio_gpu_handle_ctrl; 1501 vgc->process_cmd = virtio_gpu_simple_process_cmd; 1502 vgc->update_cursor_data = virtio_gpu_update_cursor_data; 1503 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed; 1504 1505 vdc->realize = virtio_gpu_device_realize; 1506 vdc->reset = virtio_gpu_reset; 1507 vdc->get_config = virtio_gpu_get_config; 1508 vdc->set_config = virtio_gpu_set_config; 1509 1510 dc->vmsd = &vmstate_virtio_gpu; 1511 device_class_set_props(dc, virtio_gpu_properties); 1512 } 1513 1514 static const TypeInfo virtio_gpu_info = { 1515 .name = TYPE_VIRTIO_GPU, 1516 .parent = TYPE_VIRTIO_GPU_BASE, 1517 .instance_size = sizeof(VirtIOGPU), 1518 .class_size = sizeof(VirtIOGPUClass), 1519 .class_init = virtio_gpu_class_init, 1520 }; 1521 module_obj(TYPE_VIRTIO_GPU); 1522 module_kconfig(VIRTIO_GPU); 1523 1524 static void virtio_register_types(void) 1525 { 1526 type_register_static(&virtio_gpu_info); 1527 } 1528 1529 type_init(virtio_register_types) 1530