1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "sysemu/dma.h" 20 #include "sysemu/sysemu.h" 21 #include "hw/virtio/virtio.h" 22 #include "migration/qemu-file-types.h" 23 #include "hw/virtio/virtio-gpu.h" 24 #include "hw/virtio/virtio-gpu-bswap.h" 25 #include "hw/virtio/virtio-gpu-pixman.h" 26 #include "hw/virtio/virtio-bus.h" 27 #include "hw/qdev-properties.h" 28 #include "qemu/log.h" 29 #include "qemu/module.h" 30 #include "qapi/error.h" 31 #include "qemu/error-report.h" 32 33 #define VIRTIO_GPU_VM_VERSION 1 34 35 static struct virtio_gpu_simple_resource* 36 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 37 static struct virtio_gpu_simple_resource * 38 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 39 bool require_backing, 40 const char *caller, uint32_t *error); 41 42 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 43 struct virtio_gpu_simple_resource *res); 44 45 void virtio_gpu_update_cursor_data(VirtIOGPU *g, 46 struct virtio_gpu_scanout *s, 47 uint32_t resource_id) 48 { 49 struct virtio_gpu_simple_resource *res; 50 uint32_t pixels; 51 void *data; 52 53 res = virtio_gpu_find_check_resource(g, resource_id, false, 54 __func__, NULL); 55 if (!res) { 56 return; 57 } 58 59 if (res->blob_size) { 60 if (res->blob_size < (s->current_cursor->width * 61 s->current_cursor->height * 4)) { 62 return; 63 } 64 data = res->blob; 65 } else { 66 if (pixman_image_get_width(res->image) != s->current_cursor->width || 67 pixman_image_get_height(res->image) != s->current_cursor->height) { 68 return; 69 } 70 data = pixman_image_get_data(res->image); 71 } 72 73 pixels = s->current_cursor->width * s->current_cursor->height; 74 memcpy(s->current_cursor->data, data, 75 pixels * sizeof(uint32_t)); 76 } 77 78 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 79 { 80 struct virtio_gpu_scanout *s; 81 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 82 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 83 84 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { 85 return; 86 } 87 s = &g->parent_obj.scanout[cursor->pos.scanout_id]; 88 89 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 90 cursor->pos.x, 91 cursor->pos.y, 92 move ? "move" : "update", 93 cursor->resource_id); 94 95 if (!move) { 96 if (!s->current_cursor) { 97 s->current_cursor = cursor_alloc(64, 64); 98 } 99 100 s->current_cursor->hot_x = cursor->hot_x; 101 s->current_cursor->hot_y = cursor->hot_y; 102 103 if (cursor->resource_id > 0) { 104 vgc->update_cursor_data(g, s, cursor->resource_id); 105 } 106 dpy_cursor_define(s->con, s->current_cursor); 107 108 s->cursor = *cursor; 109 } else { 110 s->cursor.pos.x = cursor->pos.x; 111 s->cursor.pos.y = cursor->pos.y; 112 } 113 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 114 cursor->resource_id ? 1 : 0); 115 } 116 117 static struct virtio_gpu_simple_resource * 118 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 119 { 120 struct virtio_gpu_simple_resource *res; 121 122 QTAILQ_FOREACH(res, &g->reslist, next) { 123 if (res->resource_id == resource_id) { 124 return res; 125 } 126 } 127 return NULL; 128 } 129 130 static struct virtio_gpu_simple_resource * 131 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 132 bool require_backing, 133 const char *caller, uint32_t *error) 134 { 135 struct virtio_gpu_simple_resource *res; 136 137 res = virtio_gpu_find_resource(g, resource_id); 138 if (!res) { 139 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n", 140 caller, resource_id); 141 if (error) { 142 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 143 } 144 return NULL; 145 } 146 147 if (require_backing) { 148 if (!res->iov || (!res->image && !res->blob)) { 149 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n", 150 caller, resource_id); 151 if (error) { 152 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 153 } 154 return NULL; 155 } 156 } 157 158 return res; 159 } 160 161 void virtio_gpu_ctrl_response(VirtIOGPU *g, 162 struct virtio_gpu_ctrl_command *cmd, 163 struct virtio_gpu_ctrl_hdr *resp, 164 size_t resp_len) 165 { 166 size_t s; 167 168 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 169 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 170 resp->fence_id = cmd->cmd_hdr.fence_id; 171 resp->ctx_id = cmd->cmd_hdr.ctx_id; 172 } 173 virtio_gpu_ctrl_hdr_bswap(resp); 174 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 175 if (s != resp_len) { 176 qemu_log_mask(LOG_GUEST_ERROR, 177 "%s: response size incorrect %zu vs %zu\n", 178 __func__, s, resp_len); 179 } 180 virtqueue_push(cmd->vq, &cmd->elem, s); 181 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 182 cmd->finished = true; 183 } 184 185 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 186 struct virtio_gpu_ctrl_command *cmd, 187 enum virtio_gpu_ctrl_type type) 188 { 189 struct virtio_gpu_ctrl_hdr resp; 190 191 memset(&resp, 0, sizeof(resp)); 192 resp.type = type; 193 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 194 } 195 196 void virtio_gpu_get_display_info(VirtIOGPU *g, 197 struct virtio_gpu_ctrl_command *cmd) 198 { 199 struct virtio_gpu_resp_display_info display_info; 200 201 trace_virtio_gpu_cmd_get_display_info(); 202 memset(&display_info, 0, sizeof(display_info)); 203 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 204 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); 205 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 206 sizeof(display_info)); 207 } 208 209 void virtio_gpu_get_edid(VirtIOGPU *g, 210 struct virtio_gpu_ctrl_command *cmd) 211 { 212 struct virtio_gpu_resp_edid edid; 213 struct virtio_gpu_cmd_get_edid get_edid; 214 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 215 216 VIRTIO_GPU_FILL_CMD(get_edid); 217 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 218 219 if (get_edid.scanout >= b->conf.max_outputs) { 220 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 221 return; 222 } 223 224 trace_virtio_gpu_cmd_get_edid(get_edid.scanout); 225 memset(&edid, 0, sizeof(edid)); 226 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; 227 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid); 228 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); 229 } 230 231 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 232 uint32_t width, uint32_t height) 233 { 234 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 235 * pixman_image_create_bits will fail in case it overflow. 236 */ 237 238 int bpp = PIXMAN_FORMAT_BPP(pformat); 239 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 240 return height * stride; 241 } 242 243 #ifdef WIN32 244 static void 245 win32_pixman_image_destroy(pixman_image_t *image, void *data) 246 { 247 HANDLE handle = data; 248 249 qemu_win32_map_free(pixman_image_get_data(image), handle, &error_warn); 250 } 251 #endif 252 253 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 254 struct virtio_gpu_ctrl_command *cmd) 255 { 256 pixman_format_code_t pformat; 257 struct virtio_gpu_simple_resource *res; 258 struct virtio_gpu_resource_create_2d c2d; 259 260 VIRTIO_GPU_FILL_CMD(c2d); 261 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 262 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 263 c2d.width, c2d.height); 264 265 if (c2d.resource_id == 0) { 266 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 267 __func__); 268 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 269 return; 270 } 271 272 res = virtio_gpu_find_resource(g, c2d.resource_id); 273 if (res) { 274 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 275 __func__, c2d.resource_id); 276 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 277 return; 278 } 279 280 res = g_new0(struct virtio_gpu_simple_resource, 1); 281 282 res->width = c2d.width; 283 res->height = c2d.height; 284 res->format = c2d.format; 285 res->resource_id = c2d.resource_id; 286 287 pformat = virtio_gpu_get_pixman_format(c2d.format); 288 if (!pformat) { 289 qemu_log_mask(LOG_GUEST_ERROR, 290 "%s: host couldn't handle guest format %d\n", 291 __func__, c2d.format); 292 g_free(res); 293 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 294 return; 295 } 296 297 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 298 if (res->hostmem + g->hostmem < g->conf_max_hostmem) { 299 void *bits = NULL; 300 #ifdef WIN32 301 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn); 302 if (!bits) { 303 goto end; 304 } 305 #endif 306 res->image = pixman_image_create_bits(pformat, 307 c2d.width, 308 c2d.height, 309 bits, res->hostmem / c2d.height); 310 #ifdef WIN32 311 if (res->image) { 312 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle); 313 } 314 #endif 315 } 316 317 #ifdef WIN32 318 end: 319 #endif 320 if (!res->image) { 321 qemu_log_mask(LOG_GUEST_ERROR, 322 "%s: resource creation failed %d %d %d\n", 323 __func__, c2d.resource_id, c2d.width, c2d.height); 324 g_free(res); 325 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 326 return; 327 } 328 329 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 330 g->hostmem += res->hostmem; 331 } 332 333 static void virtio_gpu_resource_create_blob(VirtIOGPU *g, 334 struct virtio_gpu_ctrl_command *cmd) 335 { 336 struct virtio_gpu_simple_resource *res; 337 struct virtio_gpu_resource_create_blob cblob; 338 int ret; 339 340 VIRTIO_GPU_FILL_CMD(cblob); 341 virtio_gpu_create_blob_bswap(&cblob); 342 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); 343 344 if (cblob.resource_id == 0) { 345 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 346 __func__); 347 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 348 return; 349 } 350 351 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST && 352 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) { 353 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n", 354 __func__); 355 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 356 return; 357 } 358 359 if (virtio_gpu_find_resource(g, cblob.resource_id)) { 360 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 361 __func__, cblob.resource_id); 362 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 363 return; 364 } 365 366 res = g_new0(struct virtio_gpu_simple_resource, 1); 367 res->resource_id = cblob.resource_id; 368 res->blob_size = cblob.size; 369 370 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), 371 cmd, &res->addrs, &res->iov, 372 &res->iov_cnt); 373 if (ret != 0) { 374 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 375 g_free(res); 376 return; 377 } 378 379 virtio_gpu_init_udmabuf(res); 380 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 381 } 382 383 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 384 { 385 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 386 struct virtio_gpu_simple_resource *res; 387 388 if (scanout->resource_id == 0) { 389 return; 390 } 391 392 res = virtio_gpu_find_resource(g, scanout->resource_id); 393 if (res) { 394 res->scanout_bitmask &= ~(1 << scanout_id); 395 } 396 397 dpy_gfx_replace_surface(scanout->con, NULL); 398 scanout->resource_id = 0; 399 scanout->ds = NULL; 400 scanout->width = 0; 401 scanout->height = 0; 402 } 403 404 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 405 struct virtio_gpu_simple_resource *res) 406 { 407 int i; 408 409 if (res->scanout_bitmask) { 410 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 411 if (res->scanout_bitmask & (1 << i)) { 412 virtio_gpu_disable_scanout(g, i); 413 } 414 } 415 } 416 417 qemu_pixman_image_unref(res->image); 418 virtio_gpu_cleanup_mapping(g, res); 419 QTAILQ_REMOVE(&g->reslist, res, next); 420 g->hostmem -= res->hostmem; 421 g_free(res); 422 } 423 424 static void virtio_gpu_resource_unref(VirtIOGPU *g, 425 struct virtio_gpu_ctrl_command *cmd) 426 { 427 struct virtio_gpu_simple_resource *res; 428 struct virtio_gpu_resource_unref unref; 429 430 VIRTIO_GPU_FILL_CMD(unref); 431 virtio_gpu_bswap_32(&unref, sizeof(unref)); 432 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 433 434 res = virtio_gpu_find_resource(g, unref.resource_id); 435 if (!res) { 436 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 437 __func__, unref.resource_id); 438 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 439 return; 440 } 441 virtio_gpu_resource_destroy(g, res); 442 } 443 444 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 445 struct virtio_gpu_ctrl_command *cmd) 446 { 447 struct virtio_gpu_simple_resource *res; 448 int h, bpp; 449 uint32_t src_offset, dst_offset, stride; 450 pixman_format_code_t format; 451 struct virtio_gpu_transfer_to_host_2d t2d; 452 void *img_data; 453 454 VIRTIO_GPU_FILL_CMD(t2d); 455 virtio_gpu_t2d_bswap(&t2d); 456 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 457 458 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true, 459 __func__, &cmd->error); 460 if (!res || res->blob) { 461 return; 462 } 463 464 if (t2d.r.x > res->width || 465 t2d.r.y > res->height || 466 t2d.r.width > res->width || 467 t2d.r.height > res->height || 468 t2d.r.x + t2d.r.width > res->width || 469 t2d.r.y + t2d.r.height > res->height) { 470 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 471 " bounds for resource %d: %d %d %d %d vs %d %d\n", 472 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 473 t2d.r.width, t2d.r.height, res->width, res->height); 474 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 475 return; 476 } 477 478 format = pixman_image_get_format(res->image); 479 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 480 stride = pixman_image_get_stride(res->image); 481 img_data = pixman_image_get_data(res->image); 482 483 if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) { 484 for (h = 0; h < t2d.r.height; h++) { 485 src_offset = t2d.offset + stride * h; 486 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 487 488 iov_to_buf(res->iov, res->iov_cnt, src_offset, 489 (uint8_t *)img_data + dst_offset, 490 t2d.r.width * bpp); 491 } 492 } else { 493 src_offset = t2d.offset; 494 dst_offset = t2d.r.y * stride + t2d.r.x * bpp; 495 iov_to_buf(res->iov, res->iov_cnt, src_offset, 496 (uint8_t *)img_data + dst_offset, 497 stride * t2d.r.height); 498 } 499 } 500 501 static void virtio_gpu_resource_flush(VirtIOGPU *g, 502 struct virtio_gpu_ctrl_command *cmd) 503 { 504 struct virtio_gpu_simple_resource *res; 505 struct virtio_gpu_resource_flush rf; 506 struct virtio_gpu_scanout *scanout; 507 pixman_region16_t flush_region; 508 bool within_bounds = false; 509 bool update_submitted = false; 510 int i; 511 512 VIRTIO_GPU_FILL_CMD(rf); 513 virtio_gpu_bswap_32(&rf, sizeof(rf)); 514 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 515 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 516 517 res = virtio_gpu_find_check_resource(g, rf.resource_id, false, 518 __func__, &cmd->error); 519 if (!res) { 520 return; 521 } 522 523 if (res->blob) { 524 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 525 scanout = &g->parent_obj.scanout[i]; 526 if (scanout->resource_id == res->resource_id && 527 rf.r.x < scanout->x + scanout->width && 528 rf.r.x + rf.r.width >= scanout->x && 529 rf.r.y < scanout->y + scanout->height && 530 rf.r.y + rf.r.height >= scanout->y) { 531 within_bounds = true; 532 533 if (console_has_gl(scanout->con)) { 534 dpy_gl_update(scanout->con, 0, 0, scanout->width, 535 scanout->height); 536 update_submitted = true; 537 } 538 } 539 } 540 541 if (update_submitted) { 542 return; 543 } 544 if (!within_bounds) { 545 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts" 546 " bounds for flush %d: %d %d %d %d\n", 547 __func__, rf.resource_id, rf.r.x, rf.r.y, 548 rf.r.width, rf.r.height); 549 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 550 return; 551 } 552 } 553 554 if (!res->blob && 555 (rf.r.x > res->width || 556 rf.r.y > res->height || 557 rf.r.width > res->width || 558 rf.r.height > res->height || 559 rf.r.x + rf.r.width > res->width || 560 rf.r.y + rf.r.height > res->height)) { 561 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 562 " bounds for resource %d: %d %d %d %d vs %d %d\n", 563 __func__, rf.resource_id, rf.r.x, rf.r.y, 564 rf.r.width, rf.r.height, res->width, res->height); 565 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 566 return; 567 } 568 569 pixman_region_init_rect(&flush_region, 570 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 571 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 572 pixman_region16_t region, finalregion; 573 pixman_box16_t *extents; 574 575 if (!(res->scanout_bitmask & (1 << i))) { 576 continue; 577 } 578 scanout = &g->parent_obj.scanout[i]; 579 580 pixman_region_init(&finalregion); 581 pixman_region_init_rect(®ion, scanout->x, scanout->y, 582 scanout->width, scanout->height); 583 584 pixman_region_intersect(&finalregion, &flush_region, ®ion); 585 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 586 extents = pixman_region_extents(&finalregion); 587 /* work out the area we need to update for each console */ 588 dpy_gfx_update(g->parent_obj.scanout[i].con, 589 extents->x1, extents->y1, 590 extents->x2 - extents->x1, 591 extents->y2 - extents->y1); 592 593 pixman_region_fini(®ion); 594 pixman_region_fini(&finalregion); 595 } 596 pixman_region_fini(&flush_region); 597 } 598 599 static void virtio_unref_resource(pixman_image_t *image, void *data) 600 { 601 pixman_image_unref(data); 602 } 603 604 static void virtio_gpu_update_scanout(VirtIOGPU *g, 605 uint32_t scanout_id, 606 struct virtio_gpu_simple_resource *res, 607 struct virtio_gpu_rect *r) 608 { 609 struct virtio_gpu_simple_resource *ores; 610 struct virtio_gpu_scanout *scanout; 611 612 scanout = &g->parent_obj.scanout[scanout_id]; 613 ores = virtio_gpu_find_resource(g, scanout->resource_id); 614 if (ores) { 615 ores->scanout_bitmask &= ~(1 << scanout_id); 616 } 617 618 res->scanout_bitmask |= (1 << scanout_id); 619 scanout->resource_id = res->resource_id; 620 scanout->x = r->x; 621 scanout->y = r->y; 622 scanout->width = r->width; 623 scanout->height = r->height; 624 } 625 626 static void virtio_gpu_do_set_scanout(VirtIOGPU *g, 627 uint32_t scanout_id, 628 struct virtio_gpu_framebuffer *fb, 629 struct virtio_gpu_simple_resource *res, 630 struct virtio_gpu_rect *r, 631 uint32_t *error) 632 { 633 struct virtio_gpu_scanout *scanout; 634 uint8_t *data; 635 636 scanout = &g->parent_obj.scanout[scanout_id]; 637 638 if (r->x > fb->width || 639 r->y > fb->height || 640 r->width < 16 || 641 r->height < 16 || 642 r->width > fb->width || 643 r->height > fb->height || 644 r->x + r->width > fb->width || 645 r->y + r->height > fb->height) { 646 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 647 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", 648 __func__, scanout_id, res->resource_id, 649 r->x, r->y, r->width, r->height, 650 fb->width, fb->height); 651 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 652 return; 653 } 654 655 g->parent_obj.enable = 1; 656 657 if (res->blob) { 658 if (console_has_gl(scanout->con)) { 659 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) { 660 virtio_gpu_update_scanout(g, scanout_id, res, r); 661 } else { 662 *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 663 } 664 return; 665 } 666 667 data = res->blob; 668 } else { 669 data = (uint8_t *)pixman_image_get_data(res->image); 670 } 671 672 /* create a surface for this scanout */ 673 if ((res->blob && !console_has_gl(scanout->con)) || 674 !scanout->ds || 675 surface_data(scanout->ds) != data + fb->offset || 676 scanout->width != r->width || 677 scanout->height != r->height) { 678 pixman_image_t *rect; 679 void *ptr = data + fb->offset; 680 rect = pixman_image_create_bits(fb->format, r->width, r->height, 681 ptr, fb->stride); 682 683 if (res->image) { 684 pixman_image_ref(res->image); 685 pixman_image_set_destroy_function(rect, virtio_unref_resource, 686 res->image); 687 } 688 689 /* realloc the surface ptr */ 690 scanout->ds = qemu_create_displaysurface_pixman(rect); 691 if (!scanout->ds) { 692 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 693 return; 694 } 695 #ifdef WIN32 696 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset); 697 #endif 698 699 pixman_image_unref(rect); 700 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, 701 scanout->ds); 702 } 703 704 virtio_gpu_update_scanout(g, scanout_id, res, r); 705 } 706 707 static void virtio_gpu_set_scanout(VirtIOGPU *g, 708 struct virtio_gpu_ctrl_command *cmd) 709 { 710 struct virtio_gpu_simple_resource *res; 711 struct virtio_gpu_framebuffer fb = { 0 }; 712 struct virtio_gpu_set_scanout ss; 713 714 VIRTIO_GPU_FILL_CMD(ss); 715 virtio_gpu_bswap_32(&ss, sizeof(ss)); 716 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 717 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 718 719 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 720 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 721 __func__, ss.scanout_id); 722 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 723 return; 724 } 725 726 if (ss.resource_id == 0) { 727 virtio_gpu_disable_scanout(g, ss.scanout_id); 728 return; 729 } 730 731 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 732 __func__, &cmd->error); 733 if (!res) { 734 return; 735 } 736 737 fb.format = pixman_image_get_format(res->image); 738 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 739 fb.width = pixman_image_get_width(res->image); 740 fb.height = pixman_image_get_height(res->image); 741 fb.stride = pixman_image_get_stride(res->image); 742 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 743 744 virtio_gpu_do_set_scanout(g, ss.scanout_id, 745 &fb, res, &ss.r, &cmd->error); 746 } 747 748 static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, 749 struct virtio_gpu_ctrl_command *cmd) 750 { 751 struct virtio_gpu_simple_resource *res; 752 struct virtio_gpu_framebuffer fb = { 0 }; 753 struct virtio_gpu_set_scanout_blob ss; 754 uint64_t fbend; 755 756 VIRTIO_GPU_FILL_CMD(ss); 757 virtio_gpu_scanout_blob_bswap(&ss); 758 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, 759 ss.r.width, ss.r.height, ss.r.x, 760 ss.r.y); 761 762 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 763 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 764 __func__, ss.scanout_id); 765 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 766 return; 767 } 768 769 if (ss.resource_id == 0) { 770 virtio_gpu_disable_scanout(g, ss.scanout_id); 771 return; 772 } 773 774 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 775 __func__, &cmd->error); 776 if (!res) { 777 return; 778 } 779 780 fb.format = virtio_gpu_get_pixman_format(ss.format); 781 if (!fb.format) { 782 qemu_log_mask(LOG_GUEST_ERROR, 783 "%s: host couldn't handle guest format %d\n", 784 __func__, ss.format); 785 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 786 return; 787 } 788 789 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 790 fb.width = ss.width; 791 fb.height = ss.height; 792 fb.stride = ss.strides[0]; 793 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 794 795 fbend = fb.offset; 796 fbend += fb.stride * (ss.r.height - 1); 797 fbend += fb.bytes_pp * ss.r.width; 798 if (fbend > res->blob_size) { 799 qemu_log_mask(LOG_GUEST_ERROR, 800 "%s: fb end out of range\n", 801 __func__); 802 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 803 return; 804 } 805 806 virtio_gpu_do_set_scanout(g, ss.scanout_id, 807 &fb, res, &ss.r, &cmd->error); 808 } 809 810 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 811 uint32_t nr_entries, uint32_t offset, 812 struct virtio_gpu_ctrl_command *cmd, 813 uint64_t **addr, struct iovec **iov, 814 uint32_t *niov) 815 { 816 struct virtio_gpu_mem_entry *ents; 817 size_t esize, s; 818 int e, v; 819 820 if (nr_entries > 16384) { 821 qemu_log_mask(LOG_GUEST_ERROR, 822 "%s: nr_entries is too big (%d > 16384)\n", 823 __func__, nr_entries); 824 return -1; 825 } 826 827 esize = sizeof(*ents) * nr_entries; 828 ents = g_malloc(esize); 829 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 830 offset, ents, esize); 831 if (s != esize) { 832 qemu_log_mask(LOG_GUEST_ERROR, 833 "%s: command data size incorrect %zu vs %zu\n", 834 __func__, s, esize); 835 g_free(ents); 836 return -1; 837 } 838 839 *iov = NULL; 840 if (addr) { 841 *addr = NULL; 842 } 843 for (e = 0, v = 0; e < nr_entries; e++) { 844 uint64_t a = le64_to_cpu(ents[e].addr); 845 uint32_t l = le32_to_cpu(ents[e].length); 846 hwaddr len; 847 void *map; 848 849 do { 850 len = l; 851 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len, 852 DMA_DIRECTION_TO_DEVICE, 853 MEMTXATTRS_UNSPECIFIED); 854 if (!map) { 855 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 856 " element %d\n", __func__, e); 857 virtio_gpu_cleanup_mapping_iov(g, *iov, v); 858 g_free(ents); 859 *iov = NULL; 860 if (addr) { 861 g_free(*addr); 862 *addr = NULL; 863 } 864 return -1; 865 } 866 867 if (!(v % 16)) { 868 *iov = g_renew(struct iovec, *iov, v + 16); 869 if (addr) { 870 *addr = g_renew(uint64_t, *addr, v + 16); 871 } 872 } 873 (*iov)[v].iov_base = map; 874 (*iov)[v].iov_len = len; 875 if (addr) { 876 (*addr)[v] = a; 877 } 878 879 a += len; 880 l -= len; 881 v += 1; 882 } while (l > 0); 883 } 884 *niov = v; 885 886 g_free(ents); 887 return 0; 888 } 889 890 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 891 struct iovec *iov, uint32_t count) 892 { 893 int i; 894 895 for (i = 0; i < count; i++) { 896 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 897 iov[i].iov_base, iov[i].iov_len, 898 DMA_DIRECTION_TO_DEVICE, 899 iov[i].iov_len); 900 } 901 g_free(iov); 902 } 903 904 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 905 struct virtio_gpu_simple_resource *res) 906 { 907 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 908 res->iov = NULL; 909 res->iov_cnt = 0; 910 g_free(res->addrs); 911 res->addrs = NULL; 912 913 if (res->blob) { 914 virtio_gpu_fini_udmabuf(res); 915 } 916 } 917 918 static void 919 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 920 struct virtio_gpu_ctrl_command *cmd) 921 { 922 struct virtio_gpu_simple_resource *res; 923 struct virtio_gpu_resource_attach_backing ab; 924 int ret; 925 926 VIRTIO_GPU_FILL_CMD(ab); 927 virtio_gpu_bswap_32(&ab, sizeof(ab)); 928 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 929 930 res = virtio_gpu_find_resource(g, ab.resource_id); 931 if (!res) { 932 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 933 __func__, ab.resource_id); 934 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 935 return; 936 } 937 938 if (res->iov) { 939 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 940 return; 941 } 942 943 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd, 944 &res->addrs, &res->iov, &res->iov_cnt); 945 if (ret != 0) { 946 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 947 return; 948 } 949 } 950 951 static void 952 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 953 struct virtio_gpu_ctrl_command *cmd) 954 { 955 struct virtio_gpu_simple_resource *res; 956 struct virtio_gpu_resource_detach_backing detach; 957 958 VIRTIO_GPU_FILL_CMD(detach); 959 virtio_gpu_bswap_32(&detach, sizeof(detach)); 960 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 961 962 res = virtio_gpu_find_check_resource(g, detach.resource_id, true, 963 __func__, &cmd->error); 964 if (!res) { 965 return; 966 } 967 virtio_gpu_cleanup_mapping(g, res); 968 } 969 970 void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 971 struct virtio_gpu_ctrl_command *cmd) 972 { 973 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 974 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 975 976 switch (cmd->cmd_hdr.type) { 977 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 978 virtio_gpu_get_display_info(g, cmd); 979 break; 980 case VIRTIO_GPU_CMD_GET_EDID: 981 virtio_gpu_get_edid(g, cmd); 982 break; 983 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 984 virtio_gpu_resource_create_2d(g, cmd); 985 break; 986 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: 987 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 988 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 989 break; 990 } 991 virtio_gpu_resource_create_blob(g, cmd); 992 break; 993 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 994 virtio_gpu_resource_unref(g, cmd); 995 break; 996 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 997 virtio_gpu_resource_flush(g, cmd); 998 break; 999 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 1000 virtio_gpu_transfer_to_host_2d(g, cmd); 1001 break; 1002 case VIRTIO_GPU_CMD_SET_SCANOUT: 1003 virtio_gpu_set_scanout(g, cmd); 1004 break; 1005 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: 1006 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1007 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 1008 break; 1009 } 1010 virtio_gpu_set_scanout_blob(g, cmd); 1011 break; 1012 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 1013 virtio_gpu_resource_attach_backing(g, cmd); 1014 break; 1015 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 1016 virtio_gpu_resource_detach_backing(g, cmd); 1017 break; 1018 default: 1019 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 1020 break; 1021 } 1022 if (!cmd->finished) { 1023 if (!g->parent_obj.renderer_blocked) { 1024 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 1025 VIRTIO_GPU_RESP_OK_NODATA); 1026 } 1027 } 1028 } 1029 1030 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 1031 { 1032 VirtIOGPU *g = VIRTIO_GPU(vdev); 1033 qemu_bh_schedule(g->ctrl_bh); 1034 } 1035 1036 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 1037 { 1038 VirtIOGPU *g = VIRTIO_GPU(vdev); 1039 qemu_bh_schedule(g->cursor_bh); 1040 } 1041 1042 void virtio_gpu_process_cmdq(VirtIOGPU *g) 1043 { 1044 struct virtio_gpu_ctrl_command *cmd; 1045 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1046 1047 if (g->processing_cmdq) { 1048 return; 1049 } 1050 g->processing_cmdq = true; 1051 while (!QTAILQ_EMPTY(&g->cmdq)) { 1052 cmd = QTAILQ_FIRST(&g->cmdq); 1053 1054 if (g->parent_obj.renderer_blocked) { 1055 break; 1056 } 1057 1058 /* process command */ 1059 vgc->process_cmd(g, cmd); 1060 1061 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1062 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1063 g->stats.requests++; 1064 } 1065 1066 if (!cmd->finished) { 1067 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 1068 g->inflight++; 1069 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1070 if (g->stats.max_inflight < g->inflight) { 1071 g->stats.max_inflight = g->inflight; 1072 } 1073 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 1074 } 1075 } else { 1076 g_free(cmd); 1077 } 1078 } 1079 g->processing_cmdq = false; 1080 } 1081 1082 static void virtio_gpu_process_fenceq(VirtIOGPU *g) 1083 { 1084 struct virtio_gpu_ctrl_command *cmd, *tmp; 1085 1086 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { 1087 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); 1088 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 1089 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1090 g_free(cmd); 1091 g->inflight--; 1092 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1093 fprintf(stderr, "inflight: %3d (-)\r", g->inflight); 1094 } 1095 } 1096 } 1097 1098 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b) 1099 { 1100 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj); 1101 1102 virtio_gpu_process_fenceq(g); 1103 virtio_gpu_process_cmdq(g); 1104 } 1105 1106 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 1107 { 1108 VirtIOGPU *g = VIRTIO_GPU(vdev); 1109 struct virtio_gpu_ctrl_command *cmd; 1110 1111 if (!virtio_queue_ready(vq)) { 1112 return; 1113 } 1114 1115 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1116 while (cmd) { 1117 cmd->vq = vq; 1118 cmd->error = 0; 1119 cmd->finished = false; 1120 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 1121 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1122 } 1123 1124 virtio_gpu_process_cmdq(g); 1125 } 1126 1127 static void virtio_gpu_ctrl_bh(void *opaque) 1128 { 1129 VirtIOGPU *g = opaque; 1130 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1131 1132 vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq); 1133 } 1134 1135 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 1136 { 1137 VirtIOGPU *g = VIRTIO_GPU(vdev); 1138 VirtQueueElement *elem; 1139 size_t s; 1140 struct virtio_gpu_update_cursor cursor_info; 1141 1142 if (!virtio_queue_ready(vq)) { 1143 return; 1144 } 1145 for (;;) { 1146 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 1147 if (!elem) { 1148 break; 1149 } 1150 1151 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 1152 &cursor_info, sizeof(cursor_info)); 1153 if (s != sizeof(cursor_info)) { 1154 qemu_log_mask(LOG_GUEST_ERROR, 1155 "%s: cursor size incorrect %zu vs %zu\n", 1156 __func__, s, sizeof(cursor_info)); 1157 } else { 1158 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 1159 update_cursor(g, &cursor_info); 1160 } 1161 virtqueue_push(vq, elem, 0); 1162 virtio_notify(vdev, vq); 1163 g_free(elem); 1164 } 1165 } 1166 1167 static void virtio_gpu_cursor_bh(void *opaque) 1168 { 1169 VirtIOGPU *g = opaque; 1170 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); 1171 } 1172 1173 static const VMStateDescription vmstate_virtio_gpu_scanout = { 1174 .name = "virtio-gpu-one-scanout", 1175 .version_id = 1, 1176 .fields = (VMStateField[]) { 1177 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1178 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1179 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1180 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1181 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1182 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1183 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1184 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1185 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1186 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1187 VMSTATE_END_OF_LIST() 1188 }, 1189 }; 1190 1191 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1192 .name = "virtio-gpu-scanouts", 1193 .version_id = 1, 1194 .fields = (VMStateField[]) { 1195 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), 1196 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, 1197 struct VirtIOGPU, NULL), 1198 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, 1199 parent_obj.conf.max_outputs, 1, 1200 vmstate_virtio_gpu_scanout, 1201 struct virtio_gpu_scanout), 1202 VMSTATE_END_OF_LIST() 1203 }, 1204 }; 1205 1206 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1207 const VMStateField *field, JSONWriter *vmdesc) 1208 { 1209 VirtIOGPU *g = opaque; 1210 struct virtio_gpu_simple_resource *res; 1211 int i; 1212 1213 /* in 2d mode we should never find unprocessed commands here */ 1214 assert(QTAILQ_EMPTY(&g->cmdq)); 1215 1216 QTAILQ_FOREACH(res, &g->reslist, next) { 1217 qemu_put_be32(f, res->resource_id); 1218 qemu_put_be32(f, res->width); 1219 qemu_put_be32(f, res->height); 1220 qemu_put_be32(f, res->format); 1221 qemu_put_be32(f, res->iov_cnt); 1222 for (i = 0; i < res->iov_cnt; i++) { 1223 qemu_put_be64(f, res->addrs[i]); 1224 qemu_put_be32(f, res->iov[i].iov_len); 1225 } 1226 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1227 pixman_image_get_stride(res->image) * res->height); 1228 } 1229 qemu_put_be32(f, 0); /* end of list */ 1230 1231 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1232 } 1233 1234 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1235 const VMStateField *field) 1236 { 1237 VirtIOGPU *g = opaque; 1238 struct virtio_gpu_simple_resource *res; 1239 struct virtio_gpu_scanout *scanout; 1240 uint32_t resource_id, pformat; 1241 void *bits = NULL; 1242 int i; 1243 1244 g->hostmem = 0; 1245 1246 resource_id = qemu_get_be32(f); 1247 while (resource_id != 0) { 1248 res = virtio_gpu_find_resource(g, resource_id); 1249 if (res) { 1250 return -EINVAL; 1251 } 1252 1253 res = g_new0(struct virtio_gpu_simple_resource, 1); 1254 res->resource_id = resource_id; 1255 res->width = qemu_get_be32(f); 1256 res->height = qemu_get_be32(f); 1257 res->format = qemu_get_be32(f); 1258 res->iov_cnt = qemu_get_be32(f); 1259 1260 /* allocate */ 1261 pformat = virtio_gpu_get_pixman_format(res->format); 1262 if (!pformat) { 1263 g_free(res); 1264 return -EINVAL; 1265 } 1266 1267 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1268 #ifdef WIN32 1269 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn); 1270 if (!bits) { 1271 g_free(res); 1272 return -EINVAL; 1273 } 1274 #endif 1275 res->image = pixman_image_create_bits(pformat, 1276 res->width, res->height, 1277 bits, res->hostmem / res->height); 1278 if (!res->image) { 1279 g_free(res); 1280 return -EINVAL; 1281 } 1282 1283 1284 res->addrs = g_new(uint64_t, res->iov_cnt); 1285 res->iov = g_new(struct iovec, res->iov_cnt); 1286 1287 /* read data */ 1288 for (i = 0; i < res->iov_cnt; i++) { 1289 res->addrs[i] = qemu_get_be64(f); 1290 res->iov[i].iov_len = qemu_get_be32(f); 1291 } 1292 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1293 pixman_image_get_stride(res->image) * res->height); 1294 1295 /* restore mapping */ 1296 for (i = 0; i < res->iov_cnt; i++) { 1297 hwaddr len = res->iov[i].iov_len; 1298 res->iov[i].iov_base = 1299 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len, 1300 DMA_DIRECTION_TO_DEVICE, 1301 MEMTXATTRS_UNSPECIFIED); 1302 1303 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1304 /* Clean up the half-a-mapping we just created... */ 1305 if (res->iov[i].iov_base) { 1306 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 1307 res->iov[i].iov_base, 1308 len, 1309 DMA_DIRECTION_TO_DEVICE, 1310 0); 1311 } 1312 /* ...and the mappings for previous loop iterations */ 1313 res->iov_cnt = i; 1314 virtio_gpu_cleanup_mapping(g, res); 1315 pixman_image_unref(res->image); 1316 g_free(res); 1317 return -EINVAL; 1318 } 1319 } 1320 1321 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1322 g->hostmem += res->hostmem; 1323 1324 resource_id = qemu_get_be32(f); 1325 } 1326 1327 /* load & apply scanout state */ 1328 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1329 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1330 /* FIXME: should take scanout.r.{x,y} into account */ 1331 scanout = &g->parent_obj.scanout[i]; 1332 if (!scanout->resource_id) { 1333 continue; 1334 } 1335 res = virtio_gpu_find_resource(g, scanout->resource_id); 1336 if (!res) { 1337 return -EINVAL; 1338 } 1339 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1340 if (!scanout->ds) { 1341 return -EINVAL; 1342 } 1343 #ifdef WIN32 1344 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0); 1345 #endif 1346 1347 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1348 dpy_gfx_update_full(scanout->con); 1349 if (scanout->cursor.resource_id) { 1350 update_cursor(g, &scanout->cursor); 1351 } 1352 res->scanout_bitmask |= (1 << i); 1353 } 1354 1355 return 0; 1356 } 1357 1358 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1359 { 1360 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1361 VirtIOGPU *g = VIRTIO_GPU(qdev); 1362 1363 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1364 if (!virtio_gpu_have_udmabuf()) { 1365 error_setg(errp, "cannot enable blob resources without udmabuf"); 1366 return; 1367 } 1368 1369 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { 1370 error_setg(errp, "blobs and virgl are not compatible (yet)"); 1371 return; 1372 } 1373 } 1374 1375 if (!virtio_gpu_base_device_realize(qdev, 1376 virtio_gpu_handle_ctrl_cb, 1377 virtio_gpu_handle_cursor_cb, 1378 errp)) { 1379 return; 1380 } 1381 1382 g->ctrl_vq = virtio_get_queue(vdev, 0); 1383 g->cursor_vq = virtio_get_queue(vdev, 1); 1384 g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g, 1385 &qdev->mem_reentrancy_guard); 1386 g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g, 1387 &qdev->mem_reentrancy_guard); 1388 QTAILQ_INIT(&g->reslist); 1389 QTAILQ_INIT(&g->cmdq); 1390 QTAILQ_INIT(&g->fenceq); 1391 } 1392 1393 void virtio_gpu_reset(VirtIODevice *vdev) 1394 { 1395 VirtIOGPU *g = VIRTIO_GPU(vdev); 1396 struct virtio_gpu_simple_resource *res, *tmp; 1397 struct virtio_gpu_ctrl_command *cmd; 1398 1399 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1400 virtio_gpu_resource_destroy(g, res); 1401 } 1402 1403 while (!QTAILQ_EMPTY(&g->cmdq)) { 1404 cmd = QTAILQ_FIRST(&g->cmdq); 1405 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1406 g_free(cmd); 1407 } 1408 1409 while (!QTAILQ_EMPTY(&g->fenceq)) { 1410 cmd = QTAILQ_FIRST(&g->fenceq); 1411 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1412 g->inflight--; 1413 g_free(cmd); 1414 } 1415 1416 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); 1417 } 1418 1419 static void 1420 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 1421 { 1422 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1423 1424 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 1425 } 1426 1427 static void 1428 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 1429 { 1430 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1431 const struct virtio_gpu_config *vgconfig = 1432 (const struct virtio_gpu_config *)config; 1433 1434 if (vgconfig->events_clear) { 1435 g->virtio_config.events_read &= ~vgconfig->events_clear; 1436 } 1437 } 1438 1439 /* 1440 * For historical reasons virtio_gpu does not adhere to virtio migration 1441 * scheme as described in doc/virtio-migration.txt, in a sense that no 1442 * save/load callback are provided to the core. Instead the device data 1443 * is saved/loaded after the core data. 1444 * 1445 * Because of this we need a special vmsd. 1446 */ 1447 static const VMStateDescription vmstate_virtio_gpu = { 1448 .name = "virtio-gpu", 1449 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1450 .version_id = VIRTIO_GPU_VM_VERSION, 1451 .fields = (VMStateField[]) { 1452 VMSTATE_VIRTIO_DEVICE /* core */, 1453 { 1454 .name = "virtio-gpu", 1455 .info = &(const VMStateInfo) { 1456 .name = "virtio-gpu", 1457 .get = virtio_gpu_load, 1458 .put = virtio_gpu_save, 1459 }, 1460 .flags = VMS_SINGLE, 1461 } /* device */, 1462 VMSTATE_END_OF_LIST() 1463 }, 1464 }; 1465 1466 static Property virtio_gpu_properties[] = { 1467 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), 1468 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 1469 256 * MiB), 1470 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags, 1471 VIRTIO_GPU_FLAG_BLOB_ENABLED, false), 1472 DEFINE_PROP_END_OF_LIST(), 1473 }; 1474 1475 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1476 { 1477 DeviceClass *dc = DEVICE_CLASS(klass); 1478 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1479 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); 1480 VirtIOGPUBaseClass *vgbc = &vgc->parent; 1481 1482 vgc->handle_ctrl = virtio_gpu_handle_ctrl; 1483 vgc->process_cmd = virtio_gpu_simple_process_cmd; 1484 vgc->update_cursor_data = virtio_gpu_update_cursor_data; 1485 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed; 1486 1487 vdc->realize = virtio_gpu_device_realize; 1488 vdc->reset = virtio_gpu_reset; 1489 vdc->get_config = virtio_gpu_get_config; 1490 vdc->set_config = virtio_gpu_set_config; 1491 1492 dc->vmsd = &vmstate_virtio_gpu; 1493 device_class_set_props(dc, virtio_gpu_properties); 1494 } 1495 1496 static const TypeInfo virtio_gpu_info = { 1497 .name = TYPE_VIRTIO_GPU, 1498 .parent = TYPE_VIRTIO_GPU_BASE, 1499 .instance_size = sizeof(VirtIOGPU), 1500 .class_size = sizeof(VirtIOGPUClass), 1501 .class_init = virtio_gpu_class_init, 1502 }; 1503 module_obj(TYPE_VIRTIO_GPU); 1504 module_kconfig(VIRTIO_GPU); 1505 1506 static void virtio_register_types(void) 1507 { 1508 type_register_static(&virtio_gpu_info); 1509 } 1510 1511 type_init(virtio_register_types) 1512