1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu/iov.h" 17 #include "sysemu/cpus.h" 18 #include "ui/console.h" 19 #include "ui/rect.h" 20 #include "trace.h" 21 #include "sysemu/dma.h" 22 #include "sysemu/sysemu.h" 23 #include "hw/virtio/virtio.h" 24 #include "migration/qemu-file-types.h" 25 #include "hw/virtio/virtio-gpu.h" 26 #include "hw/virtio/virtio-gpu-bswap.h" 27 #include "hw/virtio/virtio-gpu-pixman.h" 28 #include "hw/virtio/virtio-bus.h" 29 #include "hw/qdev-properties.h" 30 #include "qemu/log.h" 31 #include "qemu/module.h" 32 #include "qapi/error.h" 33 #include "qemu/error-report.h" 34 35 #define VIRTIO_GPU_VM_VERSION 1 36 37 static struct virtio_gpu_simple_resource * 38 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 39 bool require_backing, 40 const char *caller, uint32_t *error); 41 42 static void virtio_gpu_reset_bh(void *opaque); 43 44 void virtio_gpu_update_cursor_data(VirtIOGPU *g, 45 struct virtio_gpu_scanout *s, 46 uint32_t resource_id) 47 { 48 struct virtio_gpu_simple_resource *res; 49 uint32_t pixels; 50 void *data; 51 52 res = virtio_gpu_find_check_resource(g, resource_id, false, 53 __func__, NULL); 54 if (!res) { 55 return; 56 } 57 58 if (res->blob_size) { 59 if (res->blob_size < (s->current_cursor->width * 60 s->current_cursor->height * 4)) { 61 return; 62 } 63 data = res->blob; 64 } else { 65 if (pixman_image_get_width(res->image) != s->current_cursor->width || 66 pixman_image_get_height(res->image) != s->current_cursor->height) { 67 return; 68 } 69 data = pixman_image_get_data(res->image); 70 } 71 72 pixels = s->current_cursor->width * s->current_cursor->height; 73 memcpy(s->current_cursor->data, data, 74 pixels * sizeof(uint32_t)); 75 } 76 77 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 78 { 79 struct virtio_gpu_scanout *s; 80 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 81 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 82 83 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { 84 return; 85 } 86 s = &g->parent_obj.scanout[cursor->pos.scanout_id]; 87 88 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 89 cursor->pos.x, 90 cursor->pos.y, 91 move ? "move" : "update", 92 cursor->resource_id); 93 94 if (!move) { 95 if (!s->current_cursor) { 96 s->current_cursor = cursor_alloc(64, 64); 97 } 98 99 s->current_cursor->hot_x = cursor->hot_x; 100 s->current_cursor->hot_y = cursor->hot_y; 101 102 if (cursor->resource_id > 0) { 103 vgc->update_cursor_data(g, s, cursor->resource_id); 104 } 105 dpy_cursor_define(s->con, s->current_cursor); 106 107 s->cursor = *cursor; 108 } else { 109 s->cursor.pos.x = cursor->pos.x; 110 s->cursor.pos.y = cursor->pos.y; 111 } 112 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 113 cursor->resource_id ? 1 : 0); 114 } 115 116 struct virtio_gpu_simple_resource * 117 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 118 { 119 struct virtio_gpu_simple_resource *res; 120 121 QTAILQ_FOREACH(res, &g->reslist, next) { 122 if (res->resource_id == resource_id) { 123 return res; 124 } 125 } 126 return NULL; 127 } 128 129 static struct virtio_gpu_simple_resource * 130 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 131 bool require_backing, 132 const char *caller, uint32_t *error) 133 { 134 struct virtio_gpu_simple_resource *res; 135 136 res = virtio_gpu_find_resource(g, resource_id); 137 if (!res) { 138 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n", 139 caller, resource_id); 140 if (error) { 141 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 142 } 143 return NULL; 144 } 145 146 if (require_backing) { 147 if (!res->iov || (!res->image && !res->blob)) { 148 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n", 149 caller, resource_id); 150 if (error) { 151 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 152 } 153 return NULL; 154 } 155 } 156 157 return res; 158 } 159 160 void virtio_gpu_ctrl_response(VirtIOGPU *g, 161 struct virtio_gpu_ctrl_command *cmd, 162 struct virtio_gpu_ctrl_hdr *resp, 163 size_t resp_len) 164 { 165 size_t s; 166 167 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 168 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 169 resp->fence_id = cmd->cmd_hdr.fence_id; 170 resp->ctx_id = cmd->cmd_hdr.ctx_id; 171 } 172 virtio_gpu_ctrl_hdr_bswap(resp); 173 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 174 if (s != resp_len) { 175 qemu_log_mask(LOG_GUEST_ERROR, 176 "%s: response size incorrect %zu vs %zu\n", 177 __func__, s, resp_len); 178 } 179 virtqueue_push(cmd->vq, &cmd->elem, s); 180 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 181 cmd->finished = true; 182 } 183 184 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 185 struct virtio_gpu_ctrl_command *cmd, 186 enum virtio_gpu_ctrl_type type) 187 { 188 struct virtio_gpu_ctrl_hdr resp; 189 190 memset(&resp, 0, sizeof(resp)); 191 resp.type = type; 192 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 193 } 194 195 void virtio_gpu_get_display_info(VirtIOGPU *g, 196 struct virtio_gpu_ctrl_command *cmd) 197 { 198 struct virtio_gpu_resp_display_info display_info; 199 200 trace_virtio_gpu_cmd_get_display_info(); 201 memset(&display_info, 0, sizeof(display_info)); 202 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 203 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); 204 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 205 sizeof(display_info)); 206 } 207 208 void virtio_gpu_get_edid(VirtIOGPU *g, 209 struct virtio_gpu_ctrl_command *cmd) 210 { 211 struct virtio_gpu_resp_edid edid; 212 struct virtio_gpu_cmd_get_edid get_edid; 213 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 214 215 VIRTIO_GPU_FILL_CMD(get_edid); 216 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 217 218 if (get_edid.scanout >= b->conf.max_outputs) { 219 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 220 return; 221 } 222 223 trace_virtio_gpu_cmd_get_edid(get_edid.scanout); 224 memset(&edid, 0, sizeof(edid)); 225 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; 226 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid); 227 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); 228 } 229 230 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 231 uint32_t width, uint32_t height) 232 { 233 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 234 * pixman_image_create_bits will fail in case it overflow. 235 */ 236 237 int bpp = PIXMAN_FORMAT_BPP(pformat); 238 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 239 return height * stride; 240 } 241 242 #ifdef WIN32 243 static void 244 win32_pixman_image_destroy(pixman_image_t *image, void *data) 245 { 246 HANDLE handle = data; 247 248 qemu_win32_map_free(pixman_image_get_data(image), handle, &error_warn); 249 } 250 #endif 251 252 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 253 struct virtio_gpu_ctrl_command *cmd) 254 { 255 pixman_format_code_t pformat; 256 struct virtio_gpu_simple_resource *res; 257 struct virtio_gpu_resource_create_2d c2d; 258 259 VIRTIO_GPU_FILL_CMD(c2d); 260 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 261 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 262 c2d.width, c2d.height); 263 264 if (c2d.resource_id == 0) { 265 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 266 __func__); 267 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 268 return; 269 } 270 271 res = virtio_gpu_find_resource(g, c2d.resource_id); 272 if (res) { 273 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 274 __func__, c2d.resource_id); 275 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 276 return; 277 } 278 279 res = g_new0(struct virtio_gpu_simple_resource, 1); 280 281 res->width = c2d.width; 282 res->height = c2d.height; 283 res->format = c2d.format; 284 res->resource_id = c2d.resource_id; 285 286 pformat = virtio_gpu_get_pixman_format(c2d.format); 287 if (!pformat) { 288 qemu_log_mask(LOG_GUEST_ERROR, 289 "%s: host couldn't handle guest format %d\n", 290 __func__, c2d.format); 291 g_free(res); 292 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 293 return; 294 } 295 296 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 297 if (res->hostmem + g->hostmem < g->conf_max_hostmem) { 298 void *bits = NULL; 299 #ifdef WIN32 300 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn); 301 if (!bits) { 302 goto end; 303 } 304 #endif 305 res->image = pixman_image_create_bits( 306 pformat, 307 c2d.width, 308 c2d.height, 309 bits, c2d.height ? res->hostmem / c2d.height : 0); 310 #ifdef WIN32 311 if (res->image) { 312 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle); 313 } 314 #endif 315 } 316 317 #ifdef WIN32 318 end: 319 #endif 320 if (!res->image) { 321 qemu_log_mask(LOG_GUEST_ERROR, 322 "%s: resource creation failed %d %d %d\n", 323 __func__, c2d.resource_id, c2d.width, c2d.height); 324 g_free(res); 325 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 326 return; 327 } 328 329 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 330 g->hostmem += res->hostmem; 331 } 332 333 static void virtio_gpu_resource_create_blob(VirtIOGPU *g, 334 struct virtio_gpu_ctrl_command *cmd) 335 { 336 struct virtio_gpu_simple_resource *res; 337 struct virtio_gpu_resource_create_blob cblob; 338 int ret; 339 340 VIRTIO_GPU_FILL_CMD(cblob); 341 virtio_gpu_create_blob_bswap(&cblob); 342 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); 343 344 if (cblob.resource_id == 0) { 345 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 346 __func__); 347 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 348 return; 349 } 350 351 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST && 352 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) { 353 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n", 354 __func__); 355 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 356 return; 357 } 358 359 if (virtio_gpu_find_resource(g, cblob.resource_id)) { 360 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 361 __func__, cblob.resource_id); 362 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 363 return; 364 } 365 366 res = g_new0(struct virtio_gpu_simple_resource, 1); 367 res->resource_id = cblob.resource_id; 368 res->blob_size = cblob.size; 369 370 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), 371 cmd, &res->addrs, &res->iov, 372 &res->iov_cnt); 373 if (ret != 0) { 374 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 375 g_free(res); 376 return; 377 } 378 379 virtio_gpu_init_udmabuf(res); 380 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 381 } 382 383 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 384 { 385 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 386 struct virtio_gpu_simple_resource *res; 387 388 if (scanout->resource_id == 0) { 389 return; 390 } 391 392 res = virtio_gpu_find_resource(g, scanout->resource_id); 393 if (res) { 394 res->scanout_bitmask &= ~(1 << scanout_id); 395 } 396 397 dpy_gfx_replace_surface(scanout->con, NULL); 398 scanout->resource_id = 0; 399 scanout->ds = NULL; 400 scanout->width = 0; 401 scanout->height = 0; 402 } 403 404 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 405 struct virtio_gpu_simple_resource *res, 406 Error **errp) 407 { 408 int i; 409 410 if (res->scanout_bitmask) { 411 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 412 if (res->scanout_bitmask & (1 << i)) { 413 virtio_gpu_disable_scanout(g, i); 414 } 415 } 416 } 417 418 qemu_pixman_image_unref(res->image); 419 virtio_gpu_cleanup_mapping(g, res); 420 QTAILQ_REMOVE(&g->reslist, res, next); 421 g->hostmem -= res->hostmem; 422 g_free(res); 423 } 424 425 static void virtio_gpu_resource_unref(VirtIOGPU *g, 426 struct virtio_gpu_ctrl_command *cmd) 427 { 428 struct virtio_gpu_simple_resource *res; 429 struct virtio_gpu_resource_unref unref; 430 431 VIRTIO_GPU_FILL_CMD(unref); 432 virtio_gpu_bswap_32(&unref, sizeof(unref)); 433 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 434 435 res = virtio_gpu_find_resource(g, unref.resource_id); 436 if (!res) { 437 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 438 __func__, unref.resource_id); 439 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 440 return; 441 } 442 /* 443 * virtio_gpu_resource_destroy does not set any errors, so pass a NULL errp 444 * to ignore them. 445 */ 446 virtio_gpu_resource_destroy(g, res, NULL); 447 } 448 449 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 450 struct virtio_gpu_ctrl_command *cmd) 451 { 452 struct virtio_gpu_simple_resource *res; 453 int h, bpp; 454 uint32_t src_offset, dst_offset, stride; 455 pixman_format_code_t format; 456 struct virtio_gpu_transfer_to_host_2d t2d; 457 void *img_data; 458 459 VIRTIO_GPU_FILL_CMD(t2d); 460 virtio_gpu_t2d_bswap(&t2d); 461 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 462 463 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true, 464 __func__, &cmd->error); 465 if (!res || res->blob) { 466 return; 467 } 468 469 if (t2d.r.x > res->width || 470 t2d.r.y > res->height || 471 t2d.r.width > res->width || 472 t2d.r.height > res->height || 473 t2d.r.x + t2d.r.width > res->width || 474 t2d.r.y + t2d.r.height > res->height) { 475 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 476 " bounds for resource %d: %d %d %d %d vs %d %d\n", 477 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 478 t2d.r.width, t2d.r.height, res->width, res->height); 479 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 480 return; 481 } 482 483 format = pixman_image_get_format(res->image); 484 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 485 stride = pixman_image_get_stride(res->image); 486 img_data = pixman_image_get_data(res->image); 487 488 if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) { 489 for (h = 0; h < t2d.r.height; h++) { 490 src_offset = t2d.offset + stride * h; 491 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 492 493 iov_to_buf(res->iov, res->iov_cnt, src_offset, 494 (uint8_t *)img_data + dst_offset, 495 t2d.r.width * bpp); 496 } 497 } else { 498 src_offset = t2d.offset; 499 dst_offset = t2d.r.y * stride + t2d.r.x * bpp; 500 iov_to_buf(res->iov, res->iov_cnt, src_offset, 501 (uint8_t *)img_data + dst_offset, 502 stride * t2d.r.height); 503 } 504 } 505 506 static void virtio_gpu_resource_flush(VirtIOGPU *g, 507 struct virtio_gpu_ctrl_command *cmd) 508 { 509 struct virtio_gpu_simple_resource *res; 510 struct virtio_gpu_resource_flush rf; 511 struct virtio_gpu_scanout *scanout; 512 QemuRect flush_rect; 513 bool within_bounds = false; 514 bool update_submitted = false; 515 int i; 516 517 VIRTIO_GPU_FILL_CMD(rf); 518 virtio_gpu_bswap_32(&rf, sizeof(rf)); 519 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 520 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 521 522 res = virtio_gpu_find_check_resource(g, rf.resource_id, false, 523 __func__, &cmd->error); 524 if (!res) { 525 return; 526 } 527 528 if (res->blob) { 529 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 530 scanout = &g->parent_obj.scanout[i]; 531 if (scanout->resource_id == res->resource_id && 532 rf.r.x < scanout->x + scanout->width && 533 rf.r.x + rf.r.width >= scanout->x && 534 rf.r.y < scanout->y + scanout->height && 535 rf.r.y + rf.r.height >= scanout->y) { 536 within_bounds = true; 537 538 if (console_has_gl(scanout->con)) { 539 dpy_gl_update(scanout->con, 0, 0, scanout->width, 540 scanout->height); 541 update_submitted = true; 542 } 543 } 544 } 545 546 if (update_submitted) { 547 return; 548 } 549 if (!within_bounds) { 550 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts" 551 " bounds for flush %d: %d %d %d %d\n", 552 __func__, rf.resource_id, rf.r.x, rf.r.y, 553 rf.r.width, rf.r.height); 554 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 555 return; 556 } 557 } 558 559 if (!res->blob && 560 (rf.r.x > res->width || 561 rf.r.y > res->height || 562 rf.r.width > res->width || 563 rf.r.height > res->height || 564 rf.r.x + rf.r.width > res->width || 565 rf.r.y + rf.r.height > res->height)) { 566 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 567 " bounds for resource %d: %d %d %d %d vs %d %d\n", 568 __func__, rf.resource_id, rf.r.x, rf.r.y, 569 rf.r.width, rf.r.height, res->width, res->height); 570 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 571 return; 572 } 573 574 qemu_rect_init(&flush_rect, rf.r.x, rf.r.y, rf.r.width, rf.r.height); 575 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 576 QemuRect rect; 577 578 if (!(res->scanout_bitmask & (1 << i))) { 579 continue; 580 } 581 scanout = &g->parent_obj.scanout[i]; 582 583 qemu_rect_init(&rect, scanout->x, scanout->y, 584 scanout->width, scanout->height); 585 586 /* work out the area we need to update for each console */ 587 if (qemu_rect_intersect(&flush_rect, &rect, &rect)) { 588 qemu_rect_translate(&rect, -scanout->x, -scanout->y); 589 dpy_gfx_update(g->parent_obj.scanout[i].con, 590 rect.x, rect.y, rect.width, rect.height); 591 } 592 } 593 } 594 595 static void virtio_unref_resource(pixman_image_t *image, void *data) 596 { 597 pixman_image_unref(data); 598 } 599 600 static void virtio_gpu_update_scanout(VirtIOGPU *g, 601 uint32_t scanout_id, 602 struct virtio_gpu_simple_resource *res, 603 struct virtio_gpu_rect *r) 604 { 605 struct virtio_gpu_simple_resource *ores; 606 struct virtio_gpu_scanout *scanout; 607 608 scanout = &g->parent_obj.scanout[scanout_id]; 609 ores = virtio_gpu_find_resource(g, scanout->resource_id); 610 if (ores) { 611 ores->scanout_bitmask &= ~(1 << scanout_id); 612 } 613 614 res->scanout_bitmask |= (1 << scanout_id); 615 scanout->resource_id = res->resource_id; 616 scanout->x = r->x; 617 scanout->y = r->y; 618 scanout->width = r->width; 619 scanout->height = r->height; 620 } 621 622 static void virtio_gpu_do_set_scanout(VirtIOGPU *g, 623 uint32_t scanout_id, 624 struct virtio_gpu_framebuffer *fb, 625 struct virtio_gpu_simple_resource *res, 626 struct virtio_gpu_rect *r, 627 uint32_t *error) 628 { 629 struct virtio_gpu_scanout *scanout; 630 uint8_t *data; 631 632 scanout = &g->parent_obj.scanout[scanout_id]; 633 634 if (r->x > fb->width || 635 r->y > fb->height || 636 r->width < 16 || 637 r->height < 16 || 638 r->width > fb->width || 639 r->height > fb->height || 640 r->x + r->width > fb->width || 641 r->y + r->height > fb->height) { 642 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 643 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", 644 __func__, scanout_id, res->resource_id, 645 r->x, r->y, r->width, r->height, 646 fb->width, fb->height); 647 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 648 return; 649 } 650 651 g->parent_obj.enable = 1; 652 653 if (res->blob) { 654 if (console_has_gl(scanout->con)) { 655 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) { 656 virtio_gpu_update_scanout(g, scanout_id, res, r); 657 } else { 658 *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 659 } 660 return; 661 } 662 663 data = res->blob; 664 } else { 665 data = (uint8_t *)pixman_image_get_data(res->image); 666 } 667 668 /* create a surface for this scanout */ 669 if ((res->blob && !console_has_gl(scanout->con)) || 670 !scanout->ds || 671 surface_data(scanout->ds) != data + fb->offset || 672 scanout->width != r->width || 673 scanout->height != r->height) { 674 pixman_image_t *rect; 675 void *ptr = data + fb->offset; 676 rect = pixman_image_create_bits(fb->format, r->width, r->height, 677 ptr, fb->stride); 678 679 if (res->image) { 680 pixman_image_ref(res->image); 681 pixman_image_set_destroy_function(rect, virtio_unref_resource, 682 res->image); 683 } 684 685 /* realloc the surface ptr */ 686 scanout->ds = qemu_create_displaysurface_pixman(rect); 687 #ifdef WIN32 688 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset); 689 #endif 690 691 pixman_image_unref(rect); 692 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, 693 scanout->ds); 694 } 695 696 virtio_gpu_update_scanout(g, scanout_id, res, r); 697 } 698 699 static void virtio_gpu_set_scanout(VirtIOGPU *g, 700 struct virtio_gpu_ctrl_command *cmd) 701 { 702 struct virtio_gpu_simple_resource *res; 703 struct virtio_gpu_framebuffer fb = { 0 }; 704 struct virtio_gpu_set_scanout ss; 705 706 VIRTIO_GPU_FILL_CMD(ss); 707 virtio_gpu_bswap_32(&ss, sizeof(ss)); 708 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 709 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 710 711 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 712 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 713 __func__, ss.scanout_id); 714 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 715 return; 716 } 717 718 if (ss.resource_id == 0) { 719 virtio_gpu_disable_scanout(g, ss.scanout_id); 720 return; 721 } 722 723 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 724 __func__, &cmd->error); 725 if (!res) { 726 return; 727 } 728 729 fb.format = pixman_image_get_format(res->image); 730 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 731 fb.width = pixman_image_get_width(res->image); 732 fb.height = pixman_image_get_height(res->image); 733 fb.stride = pixman_image_get_stride(res->image); 734 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 735 736 virtio_gpu_do_set_scanout(g, ss.scanout_id, 737 &fb, res, &ss.r, &cmd->error); 738 } 739 740 static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, 741 struct virtio_gpu_ctrl_command *cmd) 742 { 743 struct virtio_gpu_simple_resource *res; 744 struct virtio_gpu_framebuffer fb = { 0 }; 745 struct virtio_gpu_set_scanout_blob ss; 746 uint64_t fbend; 747 748 VIRTIO_GPU_FILL_CMD(ss); 749 virtio_gpu_scanout_blob_bswap(&ss); 750 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, 751 ss.r.width, ss.r.height, ss.r.x, 752 ss.r.y); 753 754 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 755 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 756 __func__, ss.scanout_id); 757 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 758 return; 759 } 760 761 if (ss.resource_id == 0) { 762 virtio_gpu_disable_scanout(g, ss.scanout_id); 763 return; 764 } 765 766 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 767 __func__, &cmd->error); 768 if (!res) { 769 return; 770 } 771 772 fb.format = virtio_gpu_get_pixman_format(ss.format); 773 if (!fb.format) { 774 qemu_log_mask(LOG_GUEST_ERROR, 775 "%s: host couldn't handle guest format %d\n", 776 __func__, ss.format); 777 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 778 return; 779 } 780 781 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 782 fb.width = ss.width; 783 fb.height = ss.height; 784 fb.stride = ss.strides[0]; 785 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 786 787 fbend = fb.offset; 788 fbend += fb.stride * (ss.r.height - 1); 789 fbend += fb.bytes_pp * ss.r.width; 790 if (fbend > res->blob_size) { 791 qemu_log_mask(LOG_GUEST_ERROR, 792 "%s: fb end out of range\n", 793 __func__); 794 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 795 return; 796 } 797 798 virtio_gpu_do_set_scanout(g, ss.scanout_id, 799 &fb, res, &ss.r, &cmd->error); 800 } 801 802 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 803 uint32_t nr_entries, uint32_t offset, 804 struct virtio_gpu_ctrl_command *cmd, 805 uint64_t **addr, struct iovec **iov, 806 uint32_t *niov) 807 { 808 struct virtio_gpu_mem_entry *ents; 809 size_t esize, s; 810 int e, v; 811 812 if (nr_entries > 16384) { 813 qemu_log_mask(LOG_GUEST_ERROR, 814 "%s: nr_entries is too big (%d > 16384)\n", 815 __func__, nr_entries); 816 return -1; 817 } 818 819 esize = sizeof(*ents) * nr_entries; 820 ents = g_malloc(esize); 821 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 822 offset, ents, esize); 823 if (s != esize) { 824 qemu_log_mask(LOG_GUEST_ERROR, 825 "%s: command data size incorrect %zu vs %zu\n", 826 __func__, s, esize); 827 g_free(ents); 828 return -1; 829 } 830 831 *iov = NULL; 832 if (addr) { 833 *addr = NULL; 834 } 835 for (e = 0, v = 0; e < nr_entries; e++) { 836 uint64_t a = le64_to_cpu(ents[e].addr); 837 uint32_t l = le32_to_cpu(ents[e].length); 838 hwaddr len; 839 void *map; 840 841 do { 842 len = l; 843 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len, 844 DMA_DIRECTION_TO_DEVICE, 845 MEMTXATTRS_UNSPECIFIED); 846 if (!map) { 847 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 848 " element %d\n", __func__, e); 849 virtio_gpu_cleanup_mapping_iov(g, *iov, v); 850 g_free(ents); 851 *iov = NULL; 852 if (addr) { 853 g_free(*addr); 854 *addr = NULL; 855 } 856 return -1; 857 } 858 859 if (!(v % 16)) { 860 *iov = g_renew(struct iovec, *iov, v + 16); 861 if (addr) { 862 *addr = g_renew(uint64_t, *addr, v + 16); 863 } 864 } 865 (*iov)[v].iov_base = map; 866 (*iov)[v].iov_len = len; 867 if (addr) { 868 (*addr)[v] = a; 869 } 870 871 a += len; 872 l -= len; 873 v += 1; 874 } while (l > 0); 875 } 876 *niov = v; 877 878 g_free(ents); 879 return 0; 880 } 881 882 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 883 struct iovec *iov, uint32_t count) 884 { 885 int i; 886 887 for (i = 0; i < count; i++) { 888 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 889 iov[i].iov_base, iov[i].iov_len, 890 DMA_DIRECTION_TO_DEVICE, 891 iov[i].iov_len); 892 } 893 g_free(iov); 894 } 895 896 void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 897 struct virtio_gpu_simple_resource *res) 898 { 899 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 900 res->iov = NULL; 901 res->iov_cnt = 0; 902 g_free(res->addrs); 903 res->addrs = NULL; 904 905 if (res->blob) { 906 virtio_gpu_fini_udmabuf(res); 907 } 908 } 909 910 static void 911 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 912 struct virtio_gpu_ctrl_command *cmd) 913 { 914 struct virtio_gpu_simple_resource *res; 915 struct virtio_gpu_resource_attach_backing ab; 916 int ret; 917 918 VIRTIO_GPU_FILL_CMD(ab); 919 virtio_gpu_bswap_32(&ab, sizeof(ab)); 920 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 921 922 res = virtio_gpu_find_resource(g, ab.resource_id); 923 if (!res) { 924 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 925 __func__, ab.resource_id); 926 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 927 return; 928 } 929 930 if (res->iov) { 931 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 932 return; 933 } 934 935 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd, 936 &res->addrs, &res->iov, &res->iov_cnt); 937 if (ret != 0) { 938 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 939 return; 940 } 941 } 942 943 static void 944 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 945 struct virtio_gpu_ctrl_command *cmd) 946 { 947 struct virtio_gpu_simple_resource *res; 948 struct virtio_gpu_resource_detach_backing detach; 949 950 VIRTIO_GPU_FILL_CMD(detach); 951 virtio_gpu_bswap_32(&detach, sizeof(detach)); 952 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 953 954 res = virtio_gpu_find_check_resource(g, detach.resource_id, true, 955 __func__, &cmd->error); 956 if (!res) { 957 return; 958 } 959 virtio_gpu_cleanup_mapping(g, res); 960 } 961 962 void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 963 struct virtio_gpu_ctrl_command *cmd) 964 { 965 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 966 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 967 968 switch (cmd->cmd_hdr.type) { 969 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 970 virtio_gpu_get_display_info(g, cmd); 971 break; 972 case VIRTIO_GPU_CMD_GET_EDID: 973 virtio_gpu_get_edid(g, cmd); 974 break; 975 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 976 virtio_gpu_resource_create_2d(g, cmd); 977 break; 978 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: 979 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 980 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 981 break; 982 } 983 virtio_gpu_resource_create_blob(g, cmd); 984 break; 985 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 986 virtio_gpu_resource_unref(g, cmd); 987 break; 988 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 989 virtio_gpu_resource_flush(g, cmd); 990 break; 991 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 992 virtio_gpu_transfer_to_host_2d(g, cmd); 993 break; 994 case VIRTIO_GPU_CMD_SET_SCANOUT: 995 virtio_gpu_set_scanout(g, cmd); 996 break; 997 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: 998 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 999 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 1000 break; 1001 } 1002 virtio_gpu_set_scanout_blob(g, cmd); 1003 break; 1004 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 1005 virtio_gpu_resource_attach_backing(g, cmd); 1006 break; 1007 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 1008 virtio_gpu_resource_detach_backing(g, cmd); 1009 break; 1010 default: 1011 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 1012 break; 1013 } 1014 if (!cmd->finished) { 1015 if (!g->parent_obj.renderer_blocked) { 1016 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 1017 VIRTIO_GPU_RESP_OK_NODATA); 1018 } 1019 } 1020 } 1021 1022 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 1023 { 1024 VirtIOGPU *g = VIRTIO_GPU(vdev); 1025 qemu_bh_schedule(g->ctrl_bh); 1026 } 1027 1028 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 1029 { 1030 VirtIOGPU *g = VIRTIO_GPU(vdev); 1031 qemu_bh_schedule(g->cursor_bh); 1032 } 1033 1034 void virtio_gpu_process_cmdq(VirtIOGPU *g) 1035 { 1036 struct virtio_gpu_ctrl_command *cmd; 1037 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1038 1039 if (g->processing_cmdq) { 1040 return; 1041 } 1042 g->processing_cmdq = true; 1043 while (!QTAILQ_EMPTY(&g->cmdq)) { 1044 cmd = QTAILQ_FIRST(&g->cmdq); 1045 1046 if (g->parent_obj.renderer_blocked) { 1047 break; 1048 } 1049 1050 /* process command */ 1051 vgc->process_cmd(g, cmd); 1052 1053 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1054 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1055 g->stats.requests++; 1056 } 1057 1058 if (!cmd->finished) { 1059 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 1060 g->inflight++; 1061 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1062 if (g->stats.max_inflight < g->inflight) { 1063 g->stats.max_inflight = g->inflight; 1064 } 1065 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 1066 } 1067 } else { 1068 g_free(cmd); 1069 } 1070 } 1071 g->processing_cmdq = false; 1072 } 1073 1074 static void virtio_gpu_process_fenceq(VirtIOGPU *g) 1075 { 1076 struct virtio_gpu_ctrl_command *cmd, *tmp; 1077 1078 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { 1079 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); 1080 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 1081 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1082 g_free(cmd); 1083 g->inflight--; 1084 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1085 fprintf(stderr, "inflight: %3d (-)\r", g->inflight); 1086 } 1087 } 1088 } 1089 1090 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b) 1091 { 1092 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj); 1093 1094 virtio_gpu_process_fenceq(g); 1095 virtio_gpu_process_cmdq(g); 1096 } 1097 1098 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 1099 { 1100 VirtIOGPU *g = VIRTIO_GPU(vdev); 1101 struct virtio_gpu_ctrl_command *cmd; 1102 1103 if (!virtio_queue_ready(vq)) { 1104 return; 1105 } 1106 1107 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1108 while (cmd) { 1109 cmd->vq = vq; 1110 cmd->error = 0; 1111 cmd->finished = false; 1112 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 1113 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1114 } 1115 1116 virtio_gpu_process_cmdq(g); 1117 } 1118 1119 static void virtio_gpu_ctrl_bh(void *opaque) 1120 { 1121 VirtIOGPU *g = opaque; 1122 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1123 1124 vgc->handle_ctrl(VIRTIO_DEVICE(g), g->ctrl_vq); 1125 } 1126 1127 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 1128 { 1129 VirtIOGPU *g = VIRTIO_GPU(vdev); 1130 VirtQueueElement *elem; 1131 size_t s; 1132 struct virtio_gpu_update_cursor cursor_info; 1133 1134 if (!virtio_queue_ready(vq)) { 1135 return; 1136 } 1137 for (;;) { 1138 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 1139 if (!elem) { 1140 break; 1141 } 1142 1143 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 1144 &cursor_info, sizeof(cursor_info)); 1145 if (s != sizeof(cursor_info)) { 1146 qemu_log_mask(LOG_GUEST_ERROR, 1147 "%s: cursor size incorrect %zu vs %zu\n", 1148 __func__, s, sizeof(cursor_info)); 1149 } else { 1150 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 1151 update_cursor(g, &cursor_info); 1152 } 1153 virtqueue_push(vq, elem, 0); 1154 virtio_notify(vdev, vq); 1155 g_free(elem); 1156 } 1157 } 1158 1159 static void virtio_gpu_cursor_bh(void *opaque) 1160 { 1161 VirtIOGPU *g = opaque; 1162 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); 1163 } 1164 1165 static const VMStateDescription vmstate_virtio_gpu_scanout = { 1166 .name = "virtio-gpu-one-scanout", 1167 .version_id = 1, 1168 .fields = (const VMStateField[]) { 1169 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1170 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1171 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1172 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1173 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1174 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1175 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1176 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1177 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1178 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1179 VMSTATE_END_OF_LIST() 1180 }, 1181 }; 1182 1183 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1184 .name = "virtio-gpu-scanouts", 1185 .version_id = 1, 1186 .fields = (const VMStateField[]) { 1187 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), 1188 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, 1189 struct VirtIOGPU, NULL), 1190 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, 1191 parent_obj.conf.max_outputs, 1, 1192 vmstate_virtio_gpu_scanout, 1193 struct virtio_gpu_scanout), 1194 VMSTATE_END_OF_LIST() 1195 }, 1196 }; 1197 1198 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1199 const VMStateField *field, JSONWriter *vmdesc) 1200 { 1201 VirtIOGPU *g = opaque; 1202 struct virtio_gpu_simple_resource *res; 1203 int i; 1204 1205 /* in 2d mode we should never find unprocessed commands here */ 1206 assert(QTAILQ_EMPTY(&g->cmdq)); 1207 1208 QTAILQ_FOREACH(res, &g->reslist, next) { 1209 if (res->blob_size) { 1210 continue; 1211 } 1212 qemu_put_be32(f, res->resource_id); 1213 qemu_put_be32(f, res->width); 1214 qemu_put_be32(f, res->height); 1215 qemu_put_be32(f, res->format); 1216 qemu_put_be32(f, res->iov_cnt); 1217 for (i = 0; i < res->iov_cnt; i++) { 1218 qemu_put_be64(f, res->addrs[i]); 1219 qemu_put_be32(f, res->iov[i].iov_len); 1220 } 1221 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1222 pixman_image_get_stride(res->image) * res->height); 1223 } 1224 qemu_put_be32(f, 0); /* end of list */ 1225 1226 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1227 } 1228 1229 static bool virtio_gpu_load_restore_mapping(VirtIOGPU *g, 1230 struct virtio_gpu_simple_resource *res) 1231 { 1232 int i; 1233 1234 for (i = 0; i < res->iov_cnt; i++) { 1235 hwaddr len = res->iov[i].iov_len; 1236 res->iov[i].iov_base = 1237 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len, 1238 DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED); 1239 1240 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1241 /* Clean up the half-a-mapping we just created... */ 1242 if (res->iov[i].iov_base) { 1243 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, res->iov[i].iov_base, 1244 len, DMA_DIRECTION_TO_DEVICE, 0); 1245 } 1246 /* ...and the mappings for previous loop iterations */ 1247 res->iov_cnt = i; 1248 virtio_gpu_cleanup_mapping(g, res); 1249 return false; 1250 } 1251 } 1252 1253 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1254 g->hostmem += res->hostmem; 1255 return true; 1256 } 1257 1258 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1259 const VMStateField *field) 1260 { 1261 VirtIOGPU *g = opaque; 1262 struct virtio_gpu_simple_resource *res; 1263 uint32_t resource_id, pformat; 1264 void *bits = NULL; 1265 int i; 1266 1267 g->hostmem = 0; 1268 1269 resource_id = qemu_get_be32(f); 1270 while (resource_id != 0) { 1271 res = virtio_gpu_find_resource(g, resource_id); 1272 if (res) { 1273 return -EINVAL; 1274 } 1275 1276 res = g_new0(struct virtio_gpu_simple_resource, 1); 1277 res->resource_id = resource_id; 1278 res->width = qemu_get_be32(f); 1279 res->height = qemu_get_be32(f); 1280 res->format = qemu_get_be32(f); 1281 res->iov_cnt = qemu_get_be32(f); 1282 1283 /* allocate */ 1284 pformat = virtio_gpu_get_pixman_format(res->format); 1285 if (!pformat) { 1286 g_free(res); 1287 return -EINVAL; 1288 } 1289 1290 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1291 #ifdef WIN32 1292 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn); 1293 if (!bits) { 1294 g_free(res); 1295 return -EINVAL; 1296 } 1297 #endif 1298 res->image = pixman_image_create_bits( 1299 pformat, 1300 res->width, res->height, 1301 bits, res->height ? res->hostmem / res->height : 0); 1302 if (!res->image) { 1303 g_free(res); 1304 return -EINVAL; 1305 } 1306 #ifdef WIN32 1307 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle); 1308 #endif 1309 1310 res->addrs = g_new(uint64_t, res->iov_cnt); 1311 res->iov = g_new(struct iovec, res->iov_cnt); 1312 1313 /* read data */ 1314 for (i = 0; i < res->iov_cnt; i++) { 1315 res->addrs[i] = qemu_get_be64(f); 1316 res->iov[i].iov_len = qemu_get_be32(f); 1317 } 1318 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1319 pixman_image_get_stride(res->image) * res->height); 1320 1321 if (!virtio_gpu_load_restore_mapping(g, res)) { 1322 pixman_image_unref(res->image); 1323 g_free(res); 1324 return -EINVAL; 1325 } 1326 1327 resource_id = qemu_get_be32(f); 1328 } 1329 1330 /* load & apply scanout state */ 1331 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1332 1333 return 0; 1334 } 1335 1336 static int virtio_gpu_blob_save(QEMUFile *f, void *opaque, size_t size, 1337 const VMStateField *field, JSONWriter *vmdesc) 1338 { 1339 VirtIOGPU *g = opaque; 1340 struct virtio_gpu_simple_resource *res; 1341 int i; 1342 1343 /* in 2d mode we should never find unprocessed commands here */ 1344 assert(QTAILQ_EMPTY(&g->cmdq)); 1345 1346 QTAILQ_FOREACH(res, &g->reslist, next) { 1347 if (!res->blob_size) { 1348 continue; 1349 } 1350 qemu_put_be32(f, res->resource_id); 1351 qemu_put_be32(f, res->blob_size); 1352 qemu_put_be32(f, res->iov_cnt); 1353 for (i = 0; i < res->iov_cnt; i++) { 1354 qemu_put_be64(f, res->addrs[i]); 1355 qemu_put_be32(f, res->iov[i].iov_len); 1356 } 1357 } 1358 qemu_put_be32(f, 0); /* end of list */ 1359 1360 return 0; 1361 } 1362 1363 static int virtio_gpu_blob_load(QEMUFile *f, void *opaque, size_t size, 1364 const VMStateField *field) 1365 { 1366 VirtIOGPU *g = opaque; 1367 struct virtio_gpu_simple_resource *res; 1368 uint32_t resource_id; 1369 int i; 1370 1371 resource_id = qemu_get_be32(f); 1372 while (resource_id != 0) { 1373 res = virtio_gpu_find_resource(g, resource_id); 1374 if (res) { 1375 return -EINVAL; 1376 } 1377 1378 res = g_new0(struct virtio_gpu_simple_resource, 1); 1379 res->resource_id = resource_id; 1380 res->blob_size = qemu_get_be32(f); 1381 res->iov_cnt = qemu_get_be32(f); 1382 res->addrs = g_new(uint64_t, res->iov_cnt); 1383 res->iov = g_new(struct iovec, res->iov_cnt); 1384 1385 /* read data */ 1386 for (i = 0; i < res->iov_cnt; i++) { 1387 res->addrs[i] = qemu_get_be64(f); 1388 res->iov[i].iov_len = qemu_get_be32(f); 1389 } 1390 1391 if (!virtio_gpu_load_restore_mapping(g, res)) { 1392 g_free(res); 1393 return -EINVAL; 1394 } 1395 1396 virtio_gpu_init_udmabuf(res); 1397 1398 resource_id = qemu_get_be32(f); 1399 } 1400 1401 return 0; 1402 } 1403 1404 static int virtio_gpu_post_load(void *opaque, int version_id) 1405 { 1406 VirtIOGPU *g = opaque; 1407 struct virtio_gpu_scanout *scanout; 1408 struct virtio_gpu_simple_resource *res; 1409 int i; 1410 1411 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1412 /* FIXME: should take scanout.r.{x,y} into account */ 1413 scanout = &g->parent_obj.scanout[i]; 1414 if (!scanout->resource_id) { 1415 continue; 1416 } 1417 res = virtio_gpu_find_resource(g, scanout->resource_id); 1418 if (!res) { 1419 return -EINVAL; 1420 } 1421 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1422 #ifdef WIN32 1423 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0); 1424 #endif 1425 1426 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1427 dpy_gfx_update_full(scanout->con); 1428 if (scanout->cursor.resource_id) { 1429 update_cursor(g, &scanout->cursor); 1430 } 1431 res->scanout_bitmask |= (1 << i); 1432 } 1433 1434 return 0; 1435 } 1436 1437 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1438 { 1439 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1440 VirtIOGPU *g = VIRTIO_GPU(qdev); 1441 1442 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1443 if (!virtio_gpu_rutabaga_enabled(g->parent_obj.conf) && 1444 !virtio_gpu_have_udmabuf()) { 1445 error_setg(errp, "need rutabaga or udmabuf for blob resources"); 1446 return; 1447 } 1448 1449 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { 1450 error_setg(errp, "blobs and virgl are not compatible (yet)"); 1451 return; 1452 } 1453 } 1454 1455 if (!virtio_gpu_base_device_realize(qdev, 1456 virtio_gpu_handle_ctrl_cb, 1457 virtio_gpu_handle_cursor_cb, 1458 errp)) { 1459 return; 1460 } 1461 1462 g->ctrl_vq = virtio_get_queue(vdev, 0); 1463 g->cursor_vq = virtio_get_queue(vdev, 1); 1464 g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g, 1465 &qdev->mem_reentrancy_guard); 1466 g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g, 1467 &qdev->mem_reentrancy_guard); 1468 g->reset_bh = qemu_bh_new(virtio_gpu_reset_bh, g); 1469 qemu_cond_init(&g->reset_cond); 1470 QTAILQ_INIT(&g->reslist); 1471 QTAILQ_INIT(&g->cmdq); 1472 QTAILQ_INIT(&g->fenceq); 1473 } 1474 1475 static void virtio_gpu_device_unrealize(DeviceState *qdev) 1476 { 1477 VirtIOGPU *g = VIRTIO_GPU(qdev); 1478 1479 g_clear_pointer(&g->ctrl_bh, qemu_bh_delete); 1480 g_clear_pointer(&g->cursor_bh, qemu_bh_delete); 1481 g_clear_pointer(&g->reset_bh, qemu_bh_delete); 1482 qemu_cond_destroy(&g->reset_cond); 1483 virtio_gpu_base_device_unrealize(qdev); 1484 } 1485 1486 static void virtio_gpu_reset_bh(void *opaque) 1487 { 1488 VirtIOGPU *g = VIRTIO_GPU(opaque); 1489 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1490 struct virtio_gpu_simple_resource *res, *tmp; 1491 uint32_t resource_id; 1492 Error *local_err = NULL; 1493 int i = 0; 1494 1495 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1496 resource_id = res->resource_id; 1497 vgc->resource_destroy(g, res, &local_err); 1498 if (local_err) { 1499 error_append_hint(&local_err, "%s: %s resource_destroy" 1500 "for resource_id = %"PRIu32" failed.\n", 1501 __func__, object_get_typename(OBJECT(g)), 1502 resource_id); 1503 /* error_report_err frees the error object for us */ 1504 error_report_err(local_err); 1505 local_err = NULL; 1506 } 1507 } 1508 1509 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1510 dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL); 1511 } 1512 1513 g->reset_finished = true; 1514 qemu_cond_signal(&g->reset_cond); 1515 } 1516 1517 void virtio_gpu_reset(VirtIODevice *vdev) 1518 { 1519 VirtIOGPU *g = VIRTIO_GPU(vdev); 1520 struct virtio_gpu_ctrl_command *cmd; 1521 1522 if (qemu_in_vcpu_thread()) { 1523 g->reset_finished = false; 1524 qemu_bh_schedule(g->reset_bh); 1525 while (!g->reset_finished) { 1526 qemu_cond_wait_bql(&g->reset_cond); 1527 } 1528 } else { 1529 aio_bh_call(g->reset_bh); 1530 } 1531 1532 while (!QTAILQ_EMPTY(&g->cmdq)) { 1533 cmd = QTAILQ_FIRST(&g->cmdq); 1534 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1535 g_free(cmd); 1536 } 1537 1538 while (!QTAILQ_EMPTY(&g->fenceq)) { 1539 cmd = QTAILQ_FIRST(&g->fenceq); 1540 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1541 g->inflight--; 1542 g_free(cmd); 1543 } 1544 1545 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); 1546 } 1547 1548 static void 1549 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 1550 { 1551 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1552 1553 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 1554 } 1555 1556 static void 1557 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 1558 { 1559 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1560 const struct virtio_gpu_config *vgconfig = 1561 (const struct virtio_gpu_config *)config; 1562 1563 if (vgconfig->events_clear) { 1564 g->virtio_config.events_read &= ~vgconfig->events_clear; 1565 } 1566 } 1567 1568 static bool virtio_gpu_blob_state_needed(void *opaque) 1569 { 1570 VirtIOGPU *g = VIRTIO_GPU(opaque); 1571 1572 return virtio_gpu_blob_enabled(g->parent_obj.conf); 1573 } 1574 1575 const VMStateDescription vmstate_virtio_gpu_blob_state = { 1576 .name = "virtio-gpu/blob", 1577 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1578 .version_id = VIRTIO_GPU_VM_VERSION, 1579 .needed = virtio_gpu_blob_state_needed, 1580 .fields = (const VMStateField[]){ 1581 { 1582 .name = "virtio-gpu/blob", 1583 .info = &(const VMStateInfo) { 1584 .name = "blob", 1585 .get = virtio_gpu_blob_load, 1586 .put = virtio_gpu_blob_save, 1587 }, 1588 .flags = VMS_SINGLE, 1589 } /* device */, 1590 VMSTATE_END_OF_LIST() 1591 }, 1592 }; 1593 1594 /* 1595 * For historical reasons virtio_gpu does not adhere to virtio migration 1596 * scheme as described in doc/virtio-migration.txt, in a sense that no 1597 * save/load callback are provided to the core. Instead the device data 1598 * is saved/loaded after the core data. 1599 * 1600 * Because of this we need a special vmsd. 1601 */ 1602 static const VMStateDescription vmstate_virtio_gpu = { 1603 .name = "virtio-gpu", 1604 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1605 .version_id = VIRTIO_GPU_VM_VERSION, 1606 .fields = (const VMStateField[]) { 1607 VMSTATE_VIRTIO_DEVICE /* core */, 1608 { 1609 .name = "virtio-gpu", 1610 .info = &(const VMStateInfo) { 1611 .name = "virtio-gpu", 1612 .get = virtio_gpu_load, 1613 .put = virtio_gpu_save, 1614 }, 1615 .flags = VMS_SINGLE, 1616 } /* device */, 1617 VMSTATE_END_OF_LIST() 1618 }, 1619 .subsections = (const VMStateDescription * const []) { 1620 &vmstate_virtio_gpu_blob_state, 1621 NULL 1622 }, 1623 .post_load = virtio_gpu_post_load, 1624 }; 1625 1626 static Property virtio_gpu_properties[] = { 1627 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), 1628 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 1629 256 * MiB), 1630 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags, 1631 VIRTIO_GPU_FLAG_BLOB_ENABLED, false), 1632 DEFINE_PROP_SIZE("hostmem", VirtIOGPU, parent_obj.conf.hostmem, 0), 1633 DEFINE_PROP_END_OF_LIST(), 1634 }; 1635 1636 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1637 { 1638 DeviceClass *dc = DEVICE_CLASS(klass); 1639 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1640 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); 1641 VirtIOGPUBaseClass *vgbc = &vgc->parent; 1642 1643 vgc->handle_ctrl = virtio_gpu_handle_ctrl; 1644 vgc->process_cmd = virtio_gpu_simple_process_cmd; 1645 vgc->update_cursor_data = virtio_gpu_update_cursor_data; 1646 vgc->resource_destroy = virtio_gpu_resource_destroy; 1647 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed; 1648 1649 vdc->realize = virtio_gpu_device_realize; 1650 vdc->unrealize = virtio_gpu_device_unrealize; 1651 vdc->reset = virtio_gpu_reset; 1652 vdc->get_config = virtio_gpu_get_config; 1653 vdc->set_config = virtio_gpu_set_config; 1654 1655 dc->vmsd = &vmstate_virtio_gpu; 1656 device_class_set_props(dc, virtio_gpu_properties); 1657 } 1658 1659 static const TypeInfo virtio_gpu_info = { 1660 .name = TYPE_VIRTIO_GPU, 1661 .parent = TYPE_VIRTIO_GPU_BASE, 1662 .instance_size = sizeof(VirtIOGPU), 1663 .class_size = sizeof(VirtIOGPUClass), 1664 .class_init = virtio_gpu_class_init, 1665 }; 1666 module_obj(TYPE_VIRTIO_GPU); 1667 module_kconfig(VIRTIO_GPU); 1668 1669 static void virtio_register_types(void) 1670 { 1671 type_register_static(&virtio_gpu_info); 1672 } 1673 1674 type_init(virtio_register_types) 1675