1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "sysemu/dma.h" 20 #include "sysemu/sysemu.h" 21 #include "hw/virtio/virtio.h" 22 #include "migration/qemu-file-types.h" 23 #include "hw/virtio/virtio-gpu.h" 24 #include "hw/virtio/virtio-gpu-bswap.h" 25 #include "hw/virtio/virtio-gpu-pixman.h" 26 #include "hw/virtio/virtio-bus.h" 27 #include "hw/display/edid.h" 28 #include "hw/qdev-properties.h" 29 #include "qemu/log.h" 30 #include "qemu/module.h" 31 #include "qapi/error.h" 32 #include "qemu/error-report.h" 33 34 #define VIRTIO_GPU_VM_VERSION 1 35 36 static struct virtio_gpu_simple_resource* 37 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 38 static struct virtio_gpu_simple_resource * 39 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 40 bool require_backing, 41 const char *caller, uint32_t *error); 42 43 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 44 struct virtio_gpu_simple_resource *res); 45 46 void virtio_gpu_update_cursor_data(VirtIOGPU *g, 47 struct virtio_gpu_scanout *s, 48 uint32_t resource_id) 49 { 50 struct virtio_gpu_simple_resource *res; 51 uint32_t pixels; 52 void *data; 53 54 res = virtio_gpu_find_check_resource(g, resource_id, false, 55 __func__, NULL); 56 if (!res) { 57 return; 58 } 59 60 if (res->blob_size) { 61 if (res->blob_size < (s->current_cursor->width * 62 s->current_cursor->height * 4)) { 63 return; 64 } 65 data = res->blob; 66 } else { 67 if (pixman_image_get_width(res->image) != s->current_cursor->width || 68 pixman_image_get_height(res->image) != s->current_cursor->height) { 69 return; 70 } 71 data = pixman_image_get_data(res->image); 72 } 73 74 pixels = s->current_cursor->width * s->current_cursor->height; 75 memcpy(s->current_cursor->data, data, 76 pixels * sizeof(uint32_t)); 77 } 78 79 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 80 { 81 struct virtio_gpu_scanout *s; 82 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 83 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 84 85 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { 86 return; 87 } 88 s = &g->parent_obj.scanout[cursor->pos.scanout_id]; 89 90 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 91 cursor->pos.x, 92 cursor->pos.y, 93 move ? "move" : "update", 94 cursor->resource_id); 95 96 if (!move) { 97 if (!s->current_cursor) { 98 s->current_cursor = cursor_alloc(64, 64); 99 } 100 101 s->current_cursor->hot_x = cursor->hot_x; 102 s->current_cursor->hot_y = cursor->hot_y; 103 104 if (cursor->resource_id > 0) { 105 vgc->update_cursor_data(g, s, cursor->resource_id); 106 } 107 dpy_cursor_define(s->con, s->current_cursor); 108 109 s->cursor = *cursor; 110 } else { 111 s->cursor.pos.x = cursor->pos.x; 112 s->cursor.pos.y = cursor->pos.y; 113 } 114 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 115 cursor->resource_id ? 1 : 0); 116 } 117 118 static struct virtio_gpu_simple_resource * 119 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 120 { 121 struct virtio_gpu_simple_resource *res; 122 123 QTAILQ_FOREACH(res, &g->reslist, next) { 124 if (res->resource_id == resource_id) { 125 return res; 126 } 127 } 128 return NULL; 129 } 130 131 static struct virtio_gpu_simple_resource * 132 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 133 bool require_backing, 134 const char *caller, uint32_t *error) 135 { 136 struct virtio_gpu_simple_resource *res; 137 138 res = virtio_gpu_find_resource(g, resource_id); 139 if (!res) { 140 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n", 141 caller, resource_id); 142 if (error) { 143 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 144 } 145 return NULL; 146 } 147 148 if (require_backing) { 149 if (!res->iov || (!res->image && !res->blob)) { 150 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n", 151 caller, resource_id); 152 if (error) { 153 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 154 } 155 return NULL; 156 } 157 } 158 159 return res; 160 } 161 162 void virtio_gpu_ctrl_response(VirtIOGPU *g, 163 struct virtio_gpu_ctrl_command *cmd, 164 struct virtio_gpu_ctrl_hdr *resp, 165 size_t resp_len) 166 { 167 size_t s; 168 169 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 170 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 171 resp->fence_id = cmd->cmd_hdr.fence_id; 172 resp->ctx_id = cmd->cmd_hdr.ctx_id; 173 } 174 virtio_gpu_ctrl_hdr_bswap(resp); 175 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 176 if (s != resp_len) { 177 qemu_log_mask(LOG_GUEST_ERROR, 178 "%s: response size incorrect %zu vs %zu\n", 179 __func__, s, resp_len); 180 } 181 virtqueue_push(cmd->vq, &cmd->elem, s); 182 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 183 cmd->finished = true; 184 } 185 186 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 187 struct virtio_gpu_ctrl_command *cmd, 188 enum virtio_gpu_ctrl_type type) 189 { 190 struct virtio_gpu_ctrl_hdr resp; 191 192 memset(&resp, 0, sizeof(resp)); 193 resp.type = type; 194 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 195 } 196 197 void virtio_gpu_get_display_info(VirtIOGPU *g, 198 struct virtio_gpu_ctrl_command *cmd) 199 { 200 struct virtio_gpu_resp_display_info display_info; 201 202 trace_virtio_gpu_cmd_get_display_info(); 203 memset(&display_info, 0, sizeof(display_info)); 204 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 205 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); 206 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 207 sizeof(display_info)); 208 } 209 210 static void 211 virtio_gpu_generate_edid(VirtIOGPU *g, int scanout, 212 struct virtio_gpu_resp_edid *edid) 213 { 214 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 215 qemu_edid_info info = { 216 .width_mm = b->req_state[scanout].width_mm, 217 .height_mm = b->req_state[scanout].height_mm, 218 .prefx = b->req_state[scanout].width, 219 .prefy = b->req_state[scanout].height, 220 }; 221 222 edid->size = cpu_to_le32(sizeof(edid->edid)); 223 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info); 224 } 225 226 void virtio_gpu_get_edid(VirtIOGPU *g, 227 struct virtio_gpu_ctrl_command *cmd) 228 { 229 struct virtio_gpu_resp_edid edid; 230 struct virtio_gpu_cmd_get_edid get_edid; 231 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 232 233 VIRTIO_GPU_FILL_CMD(get_edid); 234 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 235 236 if (get_edid.scanout >= b->conf.max_outputs) { 237 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 238 return; 239 } 240 241 trace_virtio_gpu_cmd_get_edid(get_edid.scanout); 242 memset(&edid, 0, sizeof(edid)); 243 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; 244 virtio_gpu_generate_edid(g, get_edid.scanout, &edid); 245 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); 246 } 247 248 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 249 uint32_t width, uint32_t height) 250 { 251 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 252 * pixman_image_create_bits will fail in case it overflow. 253 */ 254 255 int bpp = PIXMAN_FORMAT_BPP(pformat); 256 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 257 return height * stride; 258 } 259 260 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 261 struct virtio_gpu_ctrl_command *cmd) 262 { 263 pixman_format_code_t pformat; 264 struct virtio_gpu_simple_resource *res; 265 struct virtio_gpu_resource_create_2d c2d; 266 267 VIRTIO_GPU_FILL_CMD(c2d); 268 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 269 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 270 c2d.width, c2d.height); 271 272 if (c2d.resource_id == 0) { 273 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 274 __func__); 275 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 276 return; 277 } 278 279 res = virtio_gpu_find_resource(g, c2d.resource_id); 280 if (res) { 281 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 282 __func__, c2d.resource_id); 283 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 284 return; 285 } 286 287 res = g_new0(struct virtio_gpu_simple_resource, 1); 288 289 res->width = c2d.width; 290 res->height = c2d.height; 291 res->format = c2d.format; 292 res->resource_id = c2d.resource_id; 293 294 pformat = virtio_gpu_get_pixman_format(c2d.format); 295 if (!pformat) { 296 qemu_log_mask(LOG_GUEST_ERROR, 297 "%s: host couldn't handle guest format %d\n", 298 __func__, c2d.format); 299 g_free(res); 300 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 301 return; 302 } 303 304 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 305 if (res->hostmem + g->hostmem < g->conf_max_hostmem) { 306 res->image = pixman_image_create_bits(pformat, 307 c2d.width, 308 c2d.height, 309 NULL, 0); 310 } 311 312 if (!res->image) { 313 qemu_log_mask(LOG_GUEST_ERROR, 314 "%s: resource creation failed %d %d %d\n", 315 __func__, c2d.resource_id, c2d.width, c2d.height); 316 g_free(res); 317 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 318 return; 319 } 320 321 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 322 g->hostmem += res->hostmem; 323 } 324 325 static void virtio_gpu_resource_create_blob(VirtIOGPU *g, 326 struct virtio_gpu_ctrl_command *cmd) 327 { 328 struct virtio_gpu_simple_resource *res; 329 struct virtio_gpu_resource_create_blob cblob; 330 int ret; 331 332 VIRTIO_GPU_FILL_CMD(cblob); 333 virtio_gpu_create_blob_bswap(&cblob); 334 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); 335 336 if (cblob.resource_id == 0) { 337 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 338 __func__); 339 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 340 return; 341 } 342 343 res = virtio_gpu_find_resource(g, cblob.resource_id); 344 if (res) { 345 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 346 __func__, cblob.resource_id); 347 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 348 return; 349 } 350 351 res = g_new0(struct virtio_gpu_simple_resource, 1); 352 res->resource_id = cblob.resource_id; 353 res->blob_size = cblob.size; 354 355 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST && 356 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) { 357 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n", 358 __func__); 359 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 360 g_free(res); 361 return; 362 } 363 364 if (res->iov) { 365 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 366 return; 367 } 368 369 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), 370 cmd, &res->addrs, &res->iov, 371 &res->iov_cnt); 372 if (ret != 0) { 373 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 374 return; 375 } 376 377 virtio_gpu_init_udmabuf(res); 378 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 379 } 380 381 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 382 { 383 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 384 struct virtio_gpu_simple_resource *res; 385 386 if (scanout->resource_id == 0) { 387 return; 388 } 389 390 res = virtio_gpu_find_resource(g, scanout->resource_id); 391 if (res) { 392 res->scanout_bitmask &= ~(1 << scanout_id); 393 } 394 395 dpy_gfx_replace_surface(scanout->con, NULL); 396 scanout->resource_id = 0; 397 scanout->ds = NULL; 398 scanout->width = 0; 399 scanout->height = 0; 400 } 401 402 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 403 struct virtio_gpu_simple_resource *res) 404 { 405 int i; 406 407 if (res->scanout_bitmask) { 408 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 409 if (res->scanout_bitmask & (1 << i)) { 410 virtio_gpu_disable_scanout(g, i); 411 } 412 } 413 } 414 415 qemu_pixman_image_unref(res->image); 416 virtio_gpu_cleanup_mapping(g, res); 417 QTAILQ_REMOVE(&g->reslist, res, next); 418 g->hostmem -= res->hostmem; 419 g_free(res); 420 } 421 422 static void virtio_gpu_resource_unref(VirtIOGPU *g, 423 struct virtio_gpu_ctrl_command *cmd) 424 { 425 struct virtio_gpu_simple_resource *res; 426 struct virtio_gpu_resource_unref unref; 427 428 VIRTIO_GPU_FILL_CMD(unref); 429 virtio_gpu_bswap_32(&unref, sizeof(unref)); 430 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 431 432 res = virtio_gpu_find_resource(g, unref.resource_id); 433 if (!res) { 434 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 435 __func__, unref.resource_id); 436 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 437 return; 438 } 439 virtio_gpu_resource_destroy(g, res); 440 } 441 442 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 443 struct virtio_gpu_ctrl_command *cmd) 444 { 445 struct virtio_gpu_simple_resource *res; 446 int h; 447 uint32_t src_offset, dst_offset, stride; 448 int bpp; 449 pixman_format_code_t format; 450 struct virtio_gpu_transfer_to_host_2d t2d; 451 452 VIRTIO_GPU_FILL_CMD(t2d); 453 virtio_gpu_t2d_bswap(&t2d); 454 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 455 456 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true, 457 __func__, &cmd->error); 458 if (!res || res->blob) { 459 return; 460 } 461 462 if (t2d.r.x > res->width || 463 t2d.r.y > res->height || 464 t2d.r.width > res->width || 465 t2d.r.height > res->height || 466 t2d.r.x + t2d.r.width > res->width || 467 t2d.r.y + t2d.r.height > res->height) { 468 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 469 " bounds for resource %d: %d %d %d %d vs %d %d\n", 470 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 471 t2d.r.width, t2d.r.height, res->width, res->height); 472 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 473 return; 474 } 475 476 format = pixman_image_get_format(res->image); 477 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 478 stride = pixman_image_get_stride(res->image); 479 480 if (t2d.offset || t2d.r.x || t2d.r.y || 481 t2d.r.width != pixman_image_get_width(res->image)) { 482 void *img_data = pixman_image_get_data(res->image); 483 for (h = 0; h < t2d.r.height; h++) { 484 src_offset = t2d.offset + stride * h; 485 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 486 487 iov_to_buf(res->iov, res->iov_cnt, src_offset, 488 (uint8_t *)img_data 489 + dst_offset, t2d.r.width * bpp); 490 } 491 } else { 492 iov_to_buf(res->iov, res->iov_cnt, 0, 493 pixman_image_get_data(res->image), 494 pixman_image_get_stride(res->image) 495 * pixman_image_get_height(res->image)); 496 } 497 } 498 499 static void virtio_gpu_resource_flush(VirtIOGPU *g, 500 struct virtio_gpu_ctrl_command *cmd) 501 { 502 struct virtio_gpu_simple_resource *res; 503 struct virtio_gpu_resource_flush rf; 504 struct virtio_gpu_scanout *scanout; 505 pixman_region16_t flush_region; 506 int i; 507 508 VIRTIO_GPU_FILL_CMD(rf); 509 virtio_gpu_bswap_32(&rf, sizeof(rf)); 510 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 511 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 512 513 res = virtio_gpu_find_check_resource(g, rf.resource_id, false, 514 __func__, &cmd->error); 515 if (!res) { 516 return; 517 } 518 519 if (res->blob) { 520 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 521 scanout = &g->parent_obj.scanout[i]; 522 if (scanout->resource_id == res->resource_id && 523 console_has_gl(scanout->con)) { 524 dpy_gl_update(scanout->con, 0, 0, scanout->width, 525 scanout->height); 526 return; 527 } 528 } 529 } 530 531 if (!res->blob && 532 (rf.r.x > res->width || 533 rf.r.y > res->height || 534 rf.r.width > res->width || 535 rf.r.height > res->height || 536 rf.r.x + rf.r.width > res->width || 537 rf.r.y + rf.r.height > res->height)) { 538 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 539 " bounds for resource %d: %d %d %d %d vs %d %d\n", 540 __func__, rf.resource_id, rf.r.x, rf.r.y, 541 rf.r.width, rf.r.height, res->width, res->height); 542 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 543 return; 544 } 545 546 pixman_region_init_rect(&flush_region, 547 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 548 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 549 pixman_region16_t region, finalregion; 550 pixman_box16_t *extents; 551 552 if (!(res->scanout_bitmask & (1 << i))) { 553 continue; 554 } 555 scanout = &g->parent_obj.scanout[i]; 556 557 pixman_region_init(&finalregion); 558 pixman_region_init_rect(®ion, scanout->x, scanout->y, 559 scanout->width, scanout->height); 560 561 pixman_region_intersect(&finalregion, &flush_region, ®ion); 562 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 563 extents = pixman_region_extents(&finalregion); 564 /* work out the area we need to update for each console */ 565 dpy_gfx_update(g->parent_obj.scanout[i].con, 566 extents->x1, extents->y1, 567 extents->x2 - extents->x1, 568 extents->y2 - extents->y1); 569 570 pixman_region_fini(®ion); 571 pixman_region_fini(&finalregion); 572 } 573 pixman_region_fini(&flush_region); 574 } 575 576 static void virtio_unref_resource(pixman_image_t *image, void *data) 577 { 578 pixman_image_unref(data); 579 } 580 581 static void virtio_gpu_update_scanout(VirtIOGPU *g, 582 uint32_t scanout_id, 583 struct virtio_gpu_simple_resource *res, 584 struct virtio_gpu_rect *r) 585 { 586 struct virtio_gpu_simple_resource *ores; 587 struct virtio_gpu_scanout *scanout; 588 589 scanout = &g->parent_obj.scanout[scanout_id]; 590 ores = virtio_gpu_find_resource(g, scanout->resource_id); 591 if (ores) { 592 ores->scanout_bitmask &= ~(1 << scanout_id); 593 } 594 595 res->scanout_bitmask |= (1 << scanout_id); 596 scanout->resource_id = res->resource_id; 597 scanout->x = r->x; 598 scanout->y = r->y; 599 scanout->width = r->width; 600 scanout->height = r->height; 601 } 602 603 static void virtio_gpu_do_set_scanout(VirtIOGPU *g, 604 uint32_t scanout_id, 605 struct virtio_gpu_framebuffer *fb, 606 struct virtio_gpu_simple_resource *res, 607 struct virtio_gpu_rect *r, 608 uint32_t *error) 609 { 610 struct virtio_gpu_scanout *scanout; 611 uint8_t *data; 612 613 if (scanout_id >= g->parent_obj.conf.max_outputs) { 614 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 615 __func__, scanout_id); 616 *error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 617 return; 618 } 619 scanout = &g->parent_obj.scanout[scanout_id]; 620 621 if (r->x > fb->width || 622 r->y > fb->height || 623 r->width < 16 || 624 r->height < 16 || 625 r->width > fb->width || 626 r->height > fb->height || 627 r->x + r->width > fb->width || 628 r->y + r->height > fb->height) { 629 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 630 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", 631 __func__, scanout_id, res->resource_id, 632 r->x, r->y, r->width, r->height, 633 fb->width, fb->height); 634 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 635 return; 636 } 637 638 g->parent_obj.enable = 1; 639 640 if (res->blob) { 641 if (console_has_gl(scanout->con)) { 642 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb)) { 643 virtio_gpu_update_scanout(g, scanout_id, res, r); 644 return; 645 } 646 } 647 648 data = res->blob; 649 } else { 650 data = (uint8_t *)pixman_image_get_data(res->image); 651 } 652 653 /* create a surface for this scanout */ 654 if ((res->blob && !console_has_gl(scanout->con)) || 655 !scanout->ds || 656 surface_data(scanout->ds) != data + fb->offset || 657 scanout->width != r->width || 658 scanout->height != r->height) { 659 pixman_image_t *rect; 660 void *ptr = data + fb->offset; 661 rect = pixman_image_create_bits(fb->format, r->width, r->height, 662 ptr, fb->stride); 663 664 if (res->image) { 665 pixman_image_ref(res->image); 666 pixman_image_set_destroy_function(rect, virtio_unref_resource, 667 res->image); 668 } 669 670 /* realloc the surface ptr */ 671 scanout->ds = qemu_create_displaysurface_pixman(rect); 672 if (!scanout->ds) { 673 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 674 return; 675 } 676 677 pixman_image_unref(rect); 678 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, 679 scanout->ds); 680 } 681 682 virtio_gpu_update_scanout(g, scanout_id, res, r); 683 } 684 685 static void virtio_gpu_set_scanout(VirtIOGPU *g, 686 struct virtio_gpu_ctrl_command *cmd) 687 { 688 struct virtio_gpu_simple_resource *res; 689 struct virtio_gpu_framebuffer fb = { 0 }; 690 struct virtio_gpu_set_scanout ss; 691 692 VIRTIO_GPU_FILL_CMD(ss); 693 virtio_gpu_bswap_32(&ss, sizeof(ss)); 694 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 695 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 696 697 if (ss.resource_id == 0) { 698 virtio_gpu_disable_scanout(g, ss.scanout_id); 699 return; 700 } 701 702 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 703 __func__, &cmd->error); 704 if (!res) { 705 return; 706 } 707 708 fb.format = pixman_image_get_format(res->image); 709 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 710 fb.width = pixman_image_get_width(res->image); 711 fb.height = pixman_image_get_height(res->image); 712 fb.stride = pixman_image_get_stride(res->image); 713 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 714 715 virtio_gpu_do_set_scanout(g, ss.scanout_id, 716 &fb, res, &ss.r, &cmd->error); 717 } 718 719 static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, 720 struct virtio_gpu_ctrl_command *cmd) 721 { 722 struct virtio_gpu_simple_resource *res; 723 struct virtio_gpu_framebuffer fb = { 0 }; 724 struct virtio_gpu_set_scanout_blob ss; 725 uint64_t fbend; 726 727 VIRTIO_GPU_FILL_CMD(ss); 728 virtio_gpu_scanout_blob_bswap(&ss); 729 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, 730 ss.r.width, ss.r.height, ss.r.x, 731 ss.r.y); 732 733 if (ss.resource_id == 0) { 734 virtio_gpu_disable_scanout(g, ss.scanout_id); 735 return; 736 } 737 738 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 739 __func__, &cmd->error); 740 if (!res) { 741 return; 742 } 743 744 fb.format = virtio_gpu_get_pixman_format(ss.format); 745 if (!fb.format) { 746 qemu_log_mask(LOG_GUEST_ERROR, 747 "%s: host couldn't handle guest format %d\n", 748 __func__, ss.format); 749 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 750 return; 751 } 752 753 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 754 fb.width = ss.width; 755 fb.height = ss.height; 756 fb.stride = ss.strides[0]; 757 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 758 759 fbend = fb.offset; 760 fbend += fb.stride * (ss.r.height - 1); 761 fbend += fb.bytes_pp * ss.r.width; 762 if (fbend > res->blob_size) { 763 qemu_log_mask(LOG_GUEST_ERROR, 764 "%s: fb end out of range\n", 765 __func__); 766 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 767 return; 768 } 769 770 virtio_gpu_do_set_scanout(g, ss.scanout_id, 771 &fb, res, &ss.r, &cmd->error); 772 } 773 774 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 775 uint32_t nr_entries, uint32_t offset, 776 struct virtio_gpu_ctrl_command *cmd, 777 uint64_t **addr, struct iovec **iov, 778 uint32_t *niov) 779 { 780 struct virtio_gpu_mem_entry *ents; 781 size_t esize, s; 782 int e, v; 783 784 if (nr_entries > 16384) { 785 qemu_log_mask(LOG_GUEST_ERROR, 786 "%s: nr_entries is too big (%d > 16384)\n", 787 __func__, nr_entries); 788 return -1; 789 } 790 791 esize = sizeof(*ents) * nr_entries; 792 ents = g_malloc(esize); 793 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 794 offset, ents, esize); 795 if (s != esize) { 796 qemu_log_mask(LOG_GUEST_ERROR, 797 "%s: command data size incorrect %zu vs %zu\n", 798 __func__, s, esize); 799 g_free(ents); 800 return -1; 801 } 802 803 *iov = NULL; 804 if (addr) { 805 *addr = NULL; 806 } 807 for (e = 0, v = 0; e < nr_entries; e++) { 808 uint64_t a = le64_to_cpu(ents[e].addr); 809 uint32_t l = le32_to_cpu(ents[e].length); 810 hwaddr len; 811 void *map; 812 813 do { 814 len = l; 815 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, 816 a, &len, DMA_DIRECTION_TO_DEVICE); 817 if (!map) { 818 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 819 " element %d\n", __func__, e); 820 virtio_gpu_cleanup_mapping_iov(g, *iov, v); 821 g_free(ents); 822 *iov = NULL; 823 if (addr) { 824 g_free(*addr); 825 *addr = NULL; 826 } 827 return -1; 828 } 829 830 if (!(v % 16)) { 831 *iov = g_realloc(*iov, sizeof(struct iovec) * (v + 16)); 832 if (addr) { 833 *addr = g_realloc(*addr, sizeof(uint64_t) * (v + 16)); 834 } 835 } 836 (*iov)[v].iov_base = map; 837 (*iov)[v].iov_len = len; 838 if (addr) { 839 (*addr)[v] = a; 840 } 841 842 a += len; 843 l -= len; 844 v += 1; 845 } while (l > 0); 846 } 847 *niov = v; 848 849 g_free(ents); 850 return 0; 851 } 852 853 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 854 struct iovec *iov, uint32_t count) 855 { 856 int i; 857 858 for (i = 0; i < count; i++) { 859 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 860 iov[i].iov_base, iov[i].iov_len, 861 DMA_DIRECTION_TO_DEVICE, 862 iov[i].iov_len); 863 } 864 g_free(iov); 865 } 866 867 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 868 struct virtio_gpu_simple_resource *res) 869 { 870 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 871 res->iov = NULL; 872 res->iov_cnt = 0; 873 g_free(res->addrs); 874 res->addrs = NULL; 875 876 if (res->blob) { 877 virtio_gpu_fini_udmabuf(res); 878 } 879 } 880 881 static void 882 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 883 struct virtio_gpu_ctrl_command *cmd) 884 { 885 struct virtio_gpu_simple_resource *res; 886 struct virtio_gpu_resource_attach_backing ab; 887 int ret; 888 889 VIRTIO_GPU_FILL_CMD(ab); 890 virtio_gpu_bswap_32(&ab, sizeof(ab)); 891 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 892 893 res = virtio_gpu_find_resource(g, ab.resource_id); 894 if (!res) { 895 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 896 __func__, ab.resource_id); 897 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 898 return; 899 } 900 901 if (res->iov) { 902 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 903 return; 904 } 905 906 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd, 907 &res->addrs, &res->iov, &res->iov_cnt); 908 if (ret != 0) { 909 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 910 return; 911 } 912 } 913 914 static void 915 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 916 struct virtio_gpu_ctrl_command *cmd) 917 { 918 struct virtio_gpu_simple_resource *res; 919 struct virtio_gpu_resource_detach_backing detach; 920 921 VIRTIO_GPU_FILL_CMD(detach); 922 virtio_gpu_bswap_32(&detach, sizeof(detach)); 923 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 924 925 res = virtio_gpu_find_check_resource(g, detach.resource_id, true, 926 __func__, &cmd->error); 927 if (!res) { 928 return; 929 } 930 virtio_gpu_cleanup_mapping(g, res); 931 } 932 933 void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 934 struct virtio_gpu_ctrl_command *cmd) 935 { 936 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 937 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 938 939 switch (cmd->cmd_hdr.type) { 940 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 941 virtio_gpu_get_display_info(g, cmd); 942 break; 943 case VIRTIO_GPU_CMD_GET_EDID: 944 virtio_gpu_get_edid(g, cmd); 945 break; 946 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 947 virtio_gpu_resource_create_2d(g, cmd); 948 break; 949 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: 950 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 951 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 952 break; 953 } 954 virtio_gpu_resource_create_blob(g, cmd); 955 break; 956 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 957 virtio_gpu_resource_unref(g, cmd); 958 break; 959 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 960 virtio_gpu_resource_flush(g, cmd); 961 break; 962 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 963 virtio_gpu_transfer_to_host_2d(g, cmd); 964 break; 965 case VIRTIO_GPU_CMD_SET_SCANOUT: 966 virtio_gpu_set_scanout(g, cmd); 967 break; 968 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: 969 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 970 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 971 break; 972 } 973 virtio_gpu_set_scanout_blob(g, cmd); 974 break; 975 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 976 virtio_gpu_resource_attach_backing(g, cmd); 977 break; 978 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 979 virtio_gpu_resource_detach_backing(g, cmd); 980 break; 981 default: 982 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 983 break; 984 } 985 if (!cmd->finished) { 986 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 987 VIRTIO_GPU_RESP_OK_NODATA); 988 } 989 } 990 991 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 992 { 993 VirtIOGPU *g = VIRTIO_GPU(vdev); 994 qemu_bh_schedule(g->ctrl_bh); 995 } 996 997 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 998 { 999 VirtIOGPU *g = VIRTIO_GPU(vdev); 1000 qemu_bh_schedule(g->cursor_bh); 1001 } 1002 1003 void virtio_gpu_process_cmdq(VirtIOGPU *g) 1004 { 1005 struct virtio_gpu_ctrl_command *cmd; 1006 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1007 1008 if (g->processing_cmdq) { 1009 return; 1010 } 1011 g->processing_cmdq = true; 1012 while (!QTAILQ_EMPTY(&g->cmdq)) { 1013 cmd = QTAILQ_FIRST(&g->cmdq); 1014 1015 if (g->parent_obj.renderer_blocked) { 1016 break; 1017 } 1018 1019 /* process command */ 1020 vgc->process_cmd(g, cmd); 1021 1022 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1023 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1024 g->stats.requests++; 1025 } 1026 1027 if (!cmd->finished) { 1028 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 1029 g->inflight++; 1030 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1031 if (g->stats.max_inflight < g->inflight) { 1032 g->stats.max_inflight = g->inflight; 1033 } 1034 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 1035 } 1036 } else { 1037 g_free(cmd); 1038 } 1039 } 1040 g->processing_cmdq = false; 1041 } 1042 1043 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 1044 { 1045 VirtIOGPU *g = VIRTIO_GPU(vdev); 1046 struct virtio_gpu_ctrl_command *cmd; 1047 1048 if (!virtio_queue_ready(vq)) { 1049 return; 1050 } 1051 1052 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1053 while (cmd) { 1054 cmd->vq = vq; 1055 cmd->error = 0; 1056 cmd->finished = false; 1057 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 1058 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1059 } 1060 1061 virtio_gpu_process_cmdq(g); 1062 } 1063 1064 static void virtio_gpu_ctrl_bh(void *opaque) 1065 { 1066 VirtIOGPU *g = opaque; 1067 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1068 1069 vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq); 1070 } 1071 1072 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 1073 { 1074 VirtIOGPU *g = VIRTIO_GPU(vdev); 1075 VirtQueueElement *elem; 1076 size_t s; 1077 struct virtio_gpu_update_cursor cursor_info; 1078 1079 if (!virtio_queue_ready(vq)) { 1080 return; 1081 } 1082 for (;;) { 1083 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 1084 if (!elem) { 1085 break; 1086 } 1087 1088 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 1089 &cursor_info, sizeof(cursor_info)); 1090 if (s != sizeof(cursor_info)) { 1091 qemu_log_mask(LOG_GUEST_ERROR, 1092 "%s: cursor size incorrect %zu vs %zu\n", 1093 __func__, s, sizeof(cursor_info)); 1094 } else { 1095 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 1096 update_cursor(g, &cursor_info); 1097 } 1098 virtqueue_push(vq, elem, 0); 1099 virtio_notify(vdev, vq); 1100 g_free(elem); 1101 } 1102 } 1103 1104 static void virtio_gpu_cursor_bh(void *opaque) 1105 { 1106 VirtIOGPU *g = opaque; 1107 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); 1108 } 1109 1110 static const VMStateDescription vmstate_virtio_gpu_scanout = { 1111 .name = "virtio-gpu-one-scanout", 1112 .version_id = 1, 1113 .fields = (VMStateField[]) { 1114 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1115 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1116 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1117 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1118 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1119 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1120 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1121 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1122 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1123 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1124 VMSTATE_END_OF_LIST() 1125 }, 1126 }; 1127 1128 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1129 .name = "virtio-gpu-scanouts", 1130 .version_id = 1, 1131 .fields = (VMStateField[]) { 1132 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), 1133 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, 1134 struct VirtIOGPU, NULL), 1135 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, 1136 parent_obj.conf.max_outputs, 1, 1137 vmstate_virtio_gpu_scanout, 1138 struct virtio_gpu_scanout), 1139 VMSTATE_END_OF_LIST() 1140 }, 1141 }; 1142 1143 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1144 const VMStateField *field, JSONWriter *vmdesc) 1145 { 1146 VirtIOGPU *g = opaque; 1147 struct virtio_gpu_simple_resource *res; 1148 int i; 1149 1150 /* in 2d mode we should never find unprocessed commands here */ 1151 assert(QTAILQ_EMPTY(&g->cmdq)); 1152 1153 QTAILQ_FOREACH(res, &g->reslist, next) { 1154 qemu_put_be32(f, res->resource_id); 1155 qemu_put_be32(f, res->width); 1156 qemu_put_be32(f, res->height); 1157 qemu_put_be32(f, res->format); 1158 qemu_put_be32(f, res->iov_cnt); 1159 for (i = 0; i < res->iov_cnt; i++) { 1160 qemu_put_be64(f, res->addrs[i]); 1161 qemu_put_be32(f, res->iov[i].iov_len); 1162 } 1163 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1164 pixman_image_get_stride(res->image) * res->height); 1165 } 1166 qemu_put_be32(f, 0); /* end of list */ 1167 1168 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1169 } 1170 1171 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1172 const VMStateField *field) 1173 { 1174 VirtIOGPU *g = opaque; 1175 struct virtio_gpu_simple_resource *res; 1176 struct virtio_gpu_scanout *scanout; 1177 uint32_t resource_id, pformat; 1178 int i; 1179 1180 g->hostmem = 0; 1181 1182 resource_id = qemu_get_be32(f); 1183 while (resource_id != 0) { 1184 res = virtio_gpu_find_resource(g, resource_id); 1185 if (res) { 1186 return -EINVAL; 1187 } 1188 1189 res = g_new0(struct virtio_gpu_simple_resource, 1); 1190 res->resource_id = resource_id; 1191 res->width = qemu_get_be32(f); 1192 res->height = qemu_get_be32(f); 1193 res->format = qemu_get_be32(f); 1194 res->iov_cnt = qemu_get_be32(f); 1195 1196 /* allocate */ 1197 pformat = virtio_gpu_get_pixman_format(res->format); 1198 if (!pformat) { 1199 g_free(res); 1200 return -EINVAL; 1201 } 1202 res->image = pixman_image_create_bits(pformat, 1203 res->width, res->height, 1204 NULL, 0); 1205 if (!res->image) { 1206 g_free(res); 1207 return -EINVAL; 1208 } 1209 1210 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1211 1212 res->addrs = g_new(uint64_t, res->iov_cnt); 1213 res->iov = g_new(struct iovec, res->iov_cnt); 1214 1215 /* read data */ 1216 for (i = 0; i < res->iov_cnt; i++) { 1217 res->addrs[i] = qemu_get_be64(f); 1218 res->iov[i].iov_len = qemu_get_be32(f); 1219 } 1220 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1221 pixman_image_get_stride(res->image) * res->height); 1222 1223 /* restore mapping */ 1224 for (i = 0; i < res->iov_cnt; i++) { 1225 hwaddr len = res->iov[i].iov_len; 1226 res->iov[i].iov_base = 1227 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, 1228 res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE); 1229 1230 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1231 /* Clean up the half-a-mapping we just created... */ 1232 if (res->iov[i].iov_base) { 1233 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 1234 res->iov[i].iov_base, 1235 len, 1236 DMA_DIRECTION_TO_DEVICE, 1237 0); 1238 } 1239 /* ...and the mappings for previous loop iterations */ 1240 res->iov_cnt = i; 1241 virtio_gpu_cleanup_mapping(g, res); 1242 pixman_image_unref(res->image); 1243 g_free(res); 1244 return -EINVAL; 1245 } 1246 } 1247 1248 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1249 g->hostmem += res->hostmem; 1250 1251 resource_id = qemu_get_be32(f); 1252 } 1253 1254 /* load & apply scanout state */ 1255 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1256 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1257 scanout = &g->parent_obj.scanout[i]; 1258 if (!scanout->resource_id) { 1259 continue; 1260 } 1261 res = virtio_gpu_find_resource(g, scanout->resource_id); 1262 if (!res) { 1263 return -EINVAL; 1264 } 1265 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1266 if (!scanout->ds) { 1267 return -EINVAL; 1268 } 1269 1270 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1271 dpy_gfx_update_full(scanout->con); 1272 if (scanout->cursor.resource_id) { 1273 update_cursor(g, &scanout->cursor); 1274 } 1275 res->scanout_bitmask |= (1 << i); 1276 } 1277 1278 return 0; 1279 } 1280 1281 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1282 { 1283 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1284 VirtIOGPU *g = VIRTIO_GPU(qdev); 1285 1286 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1287 if (!virtio_gpu_have_udmabuf()) { 1288 error_setg(errp, "cannot enable blob resources without udmabuf"); 1289 return; 1290 } 1291 1292 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { 1293 error_setg(errp, "blobs and virgl are not compatible (yet)"); 1294 return; 1295 } 1296 } 1297 1298 if (!virtio_gpu_base_device_realize(qdev, 1299 virtio_gpu_handle_ctrl_cb, 1300 virtio_gpu_handle_cursor_cb, 1301 errp)) { 1302 return; 1303 } 1304 1305 g->ctrl_vq = virtio_get_queue(vdev, 0); 1306 g->cursor_vq = virtio_get_queue(vdev, 1); 1307 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 1308 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 1309 QTAILQ_INIT(&g->reslist); 1310 QTAILQ_INIT(&g->cmdq); 1311 QTAILQ_INIT(&g->fenceq); 1312 } 1313 1314 void virtio_gpu_reset(VirtIODevice *vdev) 1315 { 1316 VirtIOGPU *g = VIRTIO_GPU(vdev); 1317 struct virtio_gpu_simple_resource *res, *tmp; 1318 struct virtio_gpu_ctrl_command *cmd; 1319 1320 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1321 virtio_gpu_resource_destroy(g, res); 1322 } 1323 1324 while (!QTAILQ_EMPTY(&g->cmdq)) { 1325 cmd = QTAILQ_FIRST(&g->cmdq); 1326 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1327 g_free(cmd); 1328 } 1329 1330 while (!QTAILQ_EMPTY(&g->fenceq)) { 1331 cmd = QTAILQ_FIRST(&g->fenceq); 1332 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1333 g->inflight--; 1334 g_free(cmd); 1335 } 1336 1337 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); 1338 } 1339 1340 static void 1341 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 1342 { 1343 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1344 1345 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 1346 } 1347 1348 static void 1349 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 1350 { 1351 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1352 const struct virtio_gpu_config *vgconfig = 1353 (const struct virtio_gpu_config *)config; 1354 1355 if (vgconfig->events_clear) { 1356 g->virtio_config.events_read &= ~vgconfig->events_clear; 1357 } 1358 } 1359 1360 /* 1361 * For historical reasons virtio_gpu does not adhere to virtio migration 1362 * scheme as described in doc/virtio-migration.txt, in a sense that no 1363 * save/load callback are provided to the core. Instead the device data 1364 * is saved/loaded after the core data. 1365 * 1366 * Because of this we need a special vmsd. 1367 */ 1368 static const VMStateDescription vmstate_virtio_gpu = { 1369 .name = "virtio-gpu", 1370 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1371 .version_id = VIRTIO_GPU_VM_VERSION, 1372 .fields = (VMStateField[]) { 1373 VMSTATE_VIRTIO_DEVICE /* core */, 1374 { 1375 .name = "virtio-gpu", 1376 .info = &(const VMStateInfo) { 1377 .name = "virtio-gpu", 1378 .get = virtio_gpu_load, 1379 .put = virtio_gpu_save, 1380 }, 1381 .flags = VMS_SINGLE, 1382 } /* device */, 1383 VMSTATE_END_OF_LIST() 1384 }, 1385 }; 1386 1387 static Property virtio_gpu_properties[] = { 1388 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), 1389 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 1390 256 * MiB), 1391 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags, 1392 VIRTIO_GPU_FLAG_BLOB_ENABLED, false), 1393 DEFINE_PROP_END_OF_LIST(), 1394 }; 1395 1396 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1397 { 1398 DeviceClass *dc = DEVICE_CLASS(klass); 1399 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1400 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); 1401 1402 vgc->handle_ctrl = virtio_gpu_handle_ctrl; 1403 vgc->process_cmd = virtio_gpu_simple_process_cmd; 1404 vgc->update_cursor_data = virtio_gpu_update_cursor_data; 1405 1406 vdc->realize = virtio_gpu_device_realize; 1407 vdc->reset = virtio_gpu_reset; 1408 vdc->get_config = virtio_gpu_get_config; 1409 vdc->set_config = virtio_gpu_set_config; 1410 1411 dc->vmsd = &vmstate_virtio_gpu; 1412 device_class_set_props(dc, virtio_gpu_properties); 1413 } 1414 1415 static const TypeInfo virtio_gpu_info = { 1416 .name = TYPE_VIRTIO_GPU, 1417 .parent = TYPE_VIRTIO_GPU_BASE, 1418 .instance_size = sizeof(VirtIOGPU), 1419 .class_size = sizeof(VirtIOGPUClass), 1420 .class_init = virtio_gpu_class_init, 1421 }; 1422 1423 static void virtio_register_types(void) 1424 { 1425 type_register_static(&virtio_gpu_info); 1426 } 1427 1428 type_init(virtio_register_types) 1429