1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "sysemu/dma.h" 20 #include "sysemu/sysemu.h" 21 #include "hw/virtio/virtio.h" 22 #include "migration/qemu-file-types.h" 23 #include "hw/virtio/virtio-gpu.h" 24 #include "hw/virtio/virtio-gpu-bswap.h" 25 #include "hw/virtio/virtio-gpu-pixman.h" 26 #include "hw/virtio/virtio-bus.h" 27 #include "hw/display/edid.h" 28 #include "hw/qdev-properties.h" 29 #include "qemu/log.h" 30 #include "qemu/module.h" 31 #include "qapi/error.h" 32 #include "qemu/error-report.h" 33 34 #define VIRTIO_GPU_VM_VERSION 1 35 36 static struct virtio_gpu_simple_resource* 37 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 38 static struct virtio_gpu_simple_resource * 39 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 40 bool require_backing, 41 const char *caller, uint32_t *error); 42 43 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 44 struct virtio_gpu_simple_resource *res); 45 46 void virtio_gpu_update_cursor_data(VirtIOGPU *g, 47 struct virtio_gpu_scanout *s, 48 uint32_t resource_id) 49 { 50 struct virtio_gpu_simple_resource *res; 51 uint32_t pixels; 52 void *data; 53 54 res = virtio_gpu_find_check_resource(g, resource_id, false, 55 __func__, NULL); 56 if (!res) { 57 return; 58 } 59 60 if (res->blob_size) { 61 if (res->blob_size < (s->current_cursor->width * 62 s->current_cursor->height * 4)) { 63 return; 64 } 65 data = res->blob; 66 } else { 67 if (pixman_image_get_width(res->image) != s->current_cursor->width || 68 pixman_image_get_height(res->image) != s->current_cursor->height) { 69 return; 70 } 71 data = pixman_image_get_data(res->image); 72 } 73 74 pixels = s->current_cursor->width * s->current_cursor->height; 75 memcpy(s->current_cursor->data, data, 76 pixels * sizeof(uint32_t)); 77 } 78 79 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 80 { 81 struct virtio_gpu_scanout *s; 82 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 83 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 84 85 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { 86 return; 87 } 88 s = &g->parent_obj.scanout[cursor->pos.scanout_id]; 89 90 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 91 cursor->pos.x, 92 cursor->pos.y, 93 move ? "move" : "update", 94 cursor->resource_id); 95 96 if (!move) { 97 if (!s->current_cursor) { 98 s->current_cursor = cursor_alloc(64, 64); 99 } 100 101 s->current_cursor->hot_x = cursor->hot_x; 102 s->current_cursor->hot_y = cursor->hot_y; 103 104 if (cursor->resource_id > 0) { 105 vgc->update_cursor_data(g, s, cursor->resource_id); 106 } 107 dpy_cursor_define(s->con, s->current_cursor); 108 109 s->cursor = *cursor; 110 } else { 111 s->cursor.pos.x = cursor->pos.x; 112 s->cursor.pos.y = cursor->pos.y; 113 } 114 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 115 cursor->resource_id ? 1 : 0); 116 } 117 118 static struct virtio_gpu_simple_resource * 119 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 120 { 121 struct virtio_gpu_simple_resource *res; 122 123 QTAILQ_FOREACH(res, &g->reslist, next) { 124 if (res->resource_id == resource_id) { 125 return res; 126 } 127 } 128 return NULL; 129 } 130 131 static struct virtio_gpu_simple_resource * 132 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 133 bool require_backing, 134 const char *caller, uint32_t *error) 135 { 136 struct virtio_gpu_simple_resource *res; 137 138 res = virtio_gpu_find_resource(g, resource_id); 139 if (!res) { 140 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n", 141 caller, resource_id); 142 if (error) { 143 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 144 } 145 return NULL; 146 } 147 148 if (require_backing) { 149 if (!res->iov || (!res->image && !res->blob)) { 150 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n", 151 caller, resource_id); 152 if (error) { 153 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 154 } 155 return NULL; 156 } 157 } 158 159 return res; 160 } 161 162 void virtio_gpu_ctrl_response(VirtIOGPU *g, 163 struct virtio_gpu_ctrl_command *cmd, 164 struct virtio_gpu_ctrl_hdr *resp, 165 size_t resp_len) 166 { 167 size_t s; 168 169 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 170 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 171 resp->fence_id = cmd->cmd_hdr.fence_id; 172 resp->ctx_id = cmd->cmd_hdr.ctx_id; 173 } 174 virtio_gpu_ctrl_hdr_bswap(resp); 175 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 176 if (s != resp_len) { 177 qemu_log_mask(LOG_GUEST_ERROR, 178 "%s: response size incorrect %zu vs %zu\n", 179 __func__, s, resp_len); 180 } 181 virtqueue_push(cmd->vq, &cmd->elem, s); 182 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 183 cmd->finished = true; 184 } 185 186 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 187 struct virtio_gpu_ctrl_command *cmd, 188 enum virtio_gpu_ctrl_type type) 189 { 190 struct virtio_gpu_ctrl_hdr resp; 191 192 memset(&resp, 0, sizeof(resp)); 193 resp.type = type; 194 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 195 } 196 197 void virtio_gpu_get_display_info(VirtIOGPU *g, 198 struct virtio_gpu_ctrl_command *cmd) 199 { 200 struct virtio_gpu_resp_display_info display_info; 201 202 trace_virtio_gpu_cmd_get_display_info(); 203 memset(&display_info, 0, sizeof(display_info)); 204 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 205 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); 206 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 207 sizeof(display_info)); 208 } 209 210 static void 211 virtio_gpu_generate_edid(VirtIOGPU *g, int scanout, 212 struct virtio_gpu_resp_edid *edid) 213 { 214 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 215 qemu_edid_info info = { 216 .width_mm = b->req_state[scanout].width_mm, 217 .height_mm = b->req_state[scanout].height_mm, 218 .prefx = b->req_state[scanout].width, 219 .prefy = b->req_state[scanout].height, 220 .refresh_rate = b->req_state[scanout].refresh_rate, 221 }; 222 223 edid->size = cpu_to_le32(sizeof(edid->edid)); 224 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info); 225 } 226 227 void virtio_gpu_get_edid(VirtIOGPU *g, 228 struct virtio_gpu_ctrl_command *cmd) 229 { 230 struct virtio_gpu_resp_edid edid; 231 struct virtio_gpu_cmd_get_edid get_edid; 232 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 233 234 VIRTIO_GPU_FILL_CMD(get_edid); 235 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 236 237 if (get_edid.scanout >= b->conf.max_outputs) { 238 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 239 return; 240 } 241 242 trace_virtio_gpu_cmd_get_edid(get_edid.scanout); 243 memset(&edid, 0, sizeof(edid)); 244 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; 245 virtio_gpu_generate_edid(g, get_edid.scanout, &edid); 246 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); 247 } 248 249 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 250 uint32_t width, uint32_t height) 251 { 252 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 253 * pixman_image_create_bits will fail in case it overflow. 254 */ 255 256 int bpp = PIXMAN_FORMAT_BPP(pformat); 257 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 258 return height * stride; 259 } 260 261 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 262 struct virtio_gpu_ctrl_command *cmd) 263 { 264 pixman_format_code_t pformat; 265 struct virtio_gpu_simple_resource *res; 266 struct virtio_gpu_resource_create_2d c2d; 267 268 VIRTIO_GPU_FILL_CMD(c2d); 269 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 270 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 271 c2d.width, c2d.height); 272 273 if (c2d.resource_id == 0) { 274 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 275 __func__); 276 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 277 return; 278 } 279 280 res = virtio_gpu_find_resource(g, c2d.resource_id); 281 if (res) { 282 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 283 __func__, c2d.resource_id); 284 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 285 return; 286 } 287 288 res = g_new0(struct virtio_gpu_simple_resource, 1); 289 290 res->width = c2d.width; 291 res->height = c2d.height; 292 res->format = c2d.format; 293 res->resource_id = c2d.resource_id; 294 295 pformat = virtio_gpu_get_pixman_format(c2d.format); 296 if (!pformat) { 297 qemu_log_mask(LOG_GUEST_ERROR, 298 "%s: host couldn't handle guest format %d\n", 299 __func__, c2d.format); 300 g_free(res); 301 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 302 return; 303 } 304 305 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 306 if (res->hostmem + g->hostmem < g->conf_max_hostmem) { 307 res->image = pixman_image_create_bits(pformat, 308 c2d.width, 309 c2d.height, 310 NULL, 0); 311 } 312 313 if (!res->image) { 314 qemu_log_mask(LOG_GUEST_ERROR, 315 "%s: resource creation failed %d %d %d\n", 316 __func__, c2d.resource_id, c2d.width, c2d.height); 317 g_free(res); 318 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 319 return; 320 } 321 322 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 323 g->hostmem += res->hostmem; 324 } 325 326 static void virtio_gpu_resource_create_blob(VirtIOGPU *g, 327 struct virtio_gpu_ctrl_command *cmd) 328 { 329 struct virtio_gpu_simple_resource *res; 330 struct virtio_gpu_resource_create_blob cblob; 331 int ret; 332 333 VIRTIO_GPU_FILL_CMD(cblob); 334 virtio_gpu_create_blob_bswap(&cblob); 335 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); 336 337 if (cblob.resource_id == 0) { 338 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 339 __func__); 340 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 341 return; 342 } 343 344 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST && 345 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) { 346 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n", 347 __func__); 348 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 349 return; 350 } 351 352 if (virtio_gpu_find_resource(g, cblob.resource_id)) { 353 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 354 __func__, cblob.resource_id); 355 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 356 return; 357 } 358 359 res = g_new0(struct virtio_gpu_simple_resource, 1); 360 res->resource_id = cblob.resource_id; 361 res->blob_size = cblob.size; 362 363 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), 364 cmd, &res->addrs, &res->iov, 365 &res->iov_cnt); 366 if (ret != 0) { 367 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 368 g_free(res); 369 return; 370 } 371 372 virtio_gpu_init_udmabuf(res); 373 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 374 } 375 376 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 377 { 378 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 379 struct virtio_gpu_simple_resource *res; 380 381 if (scanout->resource_id == 0) { 382 return; 383 } 384 385 res = virtio_gpu_find_resource(g, scanout->resource_id); 386 if (res) { 387 res->scanout_bitmask &= ~(1 << scanout_id); 388 } 389 390 dpy_gfx_replace_surface(scanout->con, NULL); 391 scanout->resource_id = 0; 392 scanout->ds = NULL; 393 scanout->width = 0; 394 scanout->height = 0; 395 } 396 397 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 398 struct virtio_gpu_simple_resource *res) 399 { 400 int i; 401 402 if (res->scanout_bitmask) { 403 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 404 if (res->scanout_bitmask & (1 << i)) { 405 virtio_gpu_disable_scanout(g, i); 406 } 407 } 408 } 409 410 qemu_pixman_image_unref(res->image); 411 virtio_gpu_cleanup_mapping(g, res); 412 QTAILQ_REMOVE(&g->reslist, res, next); 413 g->hostmem -= res->hostmem; 414 g_free(res); 415 } 416 417 static void virtio_gpu_resource_unref(VirtIOGPU *g, 418 struct virtio_gpu_ctrl_command *cmd) 419 { 420 struct virtio_gpu_simple_resource *res; 421 struct virtio_gpu_resource_unref unref; 422 423 VIRTIO_GPU_FILL_CMD(unref); 424 virtio_gpu_bswap_32(&unref, sizeof(unref)); 425 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 426 427 res = virtio_gpu_find_resource(g, unref.resource_id); 428 if (!res) { 429 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 430 __func__, unref.resource_id); 431 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 432 return; 433 } 434 virtio_gpu_resource_destroy(g, res); 435 } 436 437 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 438 struct virtio_gpu_ctrl_command *cmd) 439 { 440 struct virtio_gpu_simple_resource *res; 441 int h; 442 uint32_t src_offset, dst_offset, stride; 443 int bpp; 444 pixman_format_code_t format; 445 struct virtio_gpu_transfer_to_host_2d t2d; 446 447 VIRTIO_GPU_FILL_CMD(t2d); 448 virtio_gpu_t2d_bswap(&t2d); 449 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 450 451 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true, 452 __func__, &cmd->error); 453 if (!res || res->blob) { 454 return; 455 } 456 457 if (t2d.r.x > res->width || 458 t2d.r.y > res->height || 459 t2d.r.width > res->width || 460 t2d.r.height > res->height || 461 t2d.r.x + t2d.r.width > res->width || 462 t2d.r.y + t2d.r.height > res->height) { 463 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 464 " bounds for resource %d: %d %d %d %d vs %d %d\n", 465 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 466 t2d.r.width, t2d.r.height, res->width, res->height); 467 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 468 return; 469 } 470 471 format = pixman_image_get_format(res->image); 472 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 473 stride = pixman_image_get_stride(res->image); 474 475 if (t2d.offset || t2d.r.x || t2d.r.y || 476 t2d.r.width != pixman_image_get_width(res->image)) { 477 void *img_data = pixman_image_get_data(res->image); 478 for (h = 0; h < t2d.r.height; h++) { 479 src_offset = t2d.offset + stride * h; 480 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 481 482 iov_to_buf(res->iov, res->iov_cnt, src_offset, 483 (uint8_t *)img_data 484 + dst_offset, t2d.r.width * bpp); 485 } 486 } else { 487 iov_to_buf(res->iov, res->iov_cnt, 0, 488 pixman_image_get_data(res->image), 489 pixman_image_get_stride(res->image) 490 * pixman_image_get_height(res->image)); 491 } 492 } 493 494 static void virtio_gpu_resource_flush(VirtIOGPU *g, 495 struct virtio_gpu_ctrl_command *cmd) 496 { 497 struct virtio_gpu_simple_resource *res; 498 struct virtio_gpu_resource_flush rf; 499 struct virtio_gpu_scanout *scanout; 500 pixman_region16_t flush_region; 501 int i; 502 503 VIRTIO_GPU_FILL_CMD(rf); 504 virtio_gpu_bswap_32(&rf, sizeof(rf)); 505 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 506 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 507 508 res = virtio_gpu_find_check_resource(g, rf.resource_id, false, 509 __func__, &cmd->error); 510 if (!res) { 511 return; 512 } 513 514 if (res->blob) { 515 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 516 scanout = &g->parent_obj.scanout[i]; 517 if (scanout->resource_id == res->resource_id && 518 rf.r.x >= scanout->x && rf.r.y >= scanout->y && 519 rf.r.x + rf.r.width <= scanout->x + scanout->width && 520 rf.r.y + rf.r.height <= scanout->y + scanout->height && 521 console_has_gl(scanout->con)) { 522 dpy_gl_update(scanout->con, 0, 0, scanout->width, 523 scanout->height); 524 } 525 } 526 return; 527 } 528 529 if (!res->blob && 530 (rf.r.x > res->width || 531 rf.r.y > res->height || 532 rf.r.width > res->width || 533 rf.r.height > res->height || 534 rf.r.x + rf.r.width > res->width || 535 rf.r.y + rf.r.height > res->height)) { 536 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 537 " bounds for resource %d: %d %d %d %d vs %d %d\n", 538 __func__, rf.resource_id, rf.r.x, rf.r.y, 539 rf.r.width, rf.r.height, res->width, res->height); 540 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 541 return; 542 } 543 544 pixman_region_init_rect(&flush_region, 545 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 546 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 547 pixman_region16_t region, finalregion; 548 pixman_box16_t *extents; 549 550 if (!(res->scanout_bitmask & (1 << i))) { 551 continue; 552 } 553 scanout = &g->parent_obj.scanout[i]; 554 555 pixman_region_init(&finalregion); 556 pixman_region_init_rect(®ion, scanout->x, scanout->y, 557 scanout->width, scanout->height); 558 559 pixman_region_intersect(&finalregion, &flush_region, ®ion); 560 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 561 extents = pixman_region_extents(&finalregion); 562 /* work out the area we need to update for each console */ 563 dpy_gfx_update(g->parent_obj.scanout[i].con, 564 extents->x1, extents->y1, 565 extents->x2 - extents->x1, 566 extents->y2 - extents->y1); 567 568 pixman_region_fini(®ion); 569 pixman_region_fini(&finalregion); 570 } 571 pixman_region_fini(&flush_region); 572 } 573 574 static void virtio_unref_resource(pixman_image_t *image, void *data) 575 { 576 pixman_image_unref(data); 577 } 578 579 static void virtio_gpu_update_scanout(VirtIOGPU *g, 580 uint32_t scanout_id, 581 struct virtio_gpu_simple_resource *res, 582 struct virtio_gpu_rect *r) 583 { 584 struct virtio_gpu_simple_resource *ores; 585 struct virtio_gpu_scanout *scanout; 586 587 scanout = &g->parent_obj.scanout[scanout_id]; 588 ores = virtio_gpu_find_resource(g, scanout->resource_id); 589 if (ores) { 590 ores->scanout_bitmask &= ~(1 << scanout_id); 591 } 592 593 res->scanout_bitmask |= (1 << scanout_id); 594 scanout->resource_id = res->resource_id; 595 scanout->x = r->x; 596 scanout->y = r->y; 597 scanout->width = r->width; 598 scanout->height = r->height; 599 } 600 601 static void virtio_gpu_do_set_scanout(VirtIOGPU *g, 602 uint32_t scanout_id, 603 struct virtio_gpu_framebuffer *fb, 604 struct virtio_gpu_simple_resource *res, 605 struct virtio_gpu_rect *r, 606 uint32_t *error) 607 { 608 struct virtio_gpu_scanout *scanout; 609 uint8_t *data; 610 611 scanout = &g->parent_obj.scanout[scanout_id]; 612 613 if (r->x > fb->width || 614 r->y > fb->height || 615 r->width < 16 || 616 r->height < 16 || 617 r->width > fb->width || 618 r->height > fb->height || 619 r->x + r->width > fb->width || 620 r->y + r->height > fb->height) { 621 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 622 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", 623 __func__, scanout_id, res->resource_id, 624 r->x, r->y, r->width, r->height, 625 fb->width, fb->height); 626 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 627 return; 628 } 629 630 g->parent_obj.enable = 1; 631 632 if (res->blob) { 633 if (console_has_gl(scanout->con)) { 634 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) { 635 virtio_gpu_update_scanout(g, scanout_id, res, r); 636 return; 637 } 638 } 639 640 data = res->blob; 641 } else { 642 data = (uint8_t *)pixman_image_get_data(res->image); 643 } 644 645 /* create a surface for this scanout */ 646 if ((res->blob && !console_has_gl(scanout->con)) || 647 !scanout->ds || 648 surface_data(scanout->ds) != data + fb->offset || 649 scanout->width != r->width || 650 scanout->height != r->height) { 651 pixman_image_t *rect; 652 void *ptr = data + fb->offset; 653 rect = pixman_image_create_bits(fb->format, r->width, r->height, 654 ptr, fb->stride); 655 656 if (res->image) { 657 pixman_image_ref(res->image); 658 pixman_image_set_destroy_function(rect, virtio_unref_resource, 659 res->image); 660 } 661 662 /* realloc the surface ptr */ 663 scanout->ds = qemu_create_displaysurface_pixman(rect); 664 if (!scanout->ds) { 665 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 666 return; 667 } 668 669 pixman_image_unref(rect); 670 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, 671 scanout->ds); 672 } 673 674 virtio_gpu_update_scanout(g, scanout_id, res, r); 675 } 676 677 static void virtio_gpu_set_scanout(VirtIOGPU *g, 678 struct virtio_gpu_ctrl_command *cmd) 679 { 680 struct virtio_gpu_simple_resource *res; 681 struct virtio_gpu_framebuffer fb = { 0 }; 682 struct virtio_gpu_set_scanout ss; 683 684 VIRTIO_GPU_FILL_CMD(ss); 685 virtio_gpu_bswap_32(&ss, sizeof(ss)); 686 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 687 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 688 689 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 690 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 691 __func__, ss.scanout_id); 692 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 693 return; 694 } 695 696 if (ss.resource_id == 0) { 697 virtio_gpu_disable_scanout(g, ss.scanout_id); 698 return; 699 } 700 701 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 702 __func__, &cmd->error); 703 if (!res) { 704 return; 705 } 706 707 fb.format = pixman_image_get_format(res->image); 708 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 709 fb.width = pixman_image_get_width(res->image); 710 fb.height = pixman_image_get_height(res->image); 711 fb.stride = pixman_image_get_stride(res->image); 712 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 713 714 virtio_gpu_do_set_scanout(g, ss.scanout_id, 715 &fb, res, &ss.r, &cmd->error); 716 } 717 718 static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, 719 struct virtio_gpu_ctrl_command *cmd) 720 { 721 struct virtio_gpu_simple_resource *res; 722 struct virtio_gpu_framebuffer fb = { 0 }; 723 struct virtio_gpu_set_scanout_blob ss; 724 uint64_t fbend; 725 726 VIRTIO_GPU_FILL_CMD(ss); 727 virtio_gpu_scanout_blob_bswap(&ss); 728 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, 729 ss.r.width, ss.r.height, ss.r.x, 730 ss.r.y); 731 732 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 733 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 734 __func__, ss.scanout_id); 735 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 736 return; 737 } 738 739 if (ss.resource_id == 0) { 740 virtio_gpu_disable_scanout(g, ss.scanout_id); 741 return; 742 } 743 744 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 745 __func__, &cmd->error); 746 if (!res) { 747 return; 748 } 749 750 fb.format = virtio_gpu_get_pixman_format(ss.format); 751 if (!fb.format) { 752 qemu_log_mask(LOG_GUEST_ERROR, 753 "%s: host couldn't handle guest format %d\n", 754 __func__, ss.format); 755 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 756 return; 757 } 758 759 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 760 fb.width = ss.width; 761 fb.height = ss.height; 762 fb.stride = ss.strides[0]; 763 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 764 765 fbend = fb.offset; 766 fbend += fb.stride * (ss.r.height - 1); 767 fbend += fb.bytes_pp * ss.r.width; 768 if (fbend > res->blob_size) { 769 qemu_log_mask(LOG_GUEST_ERROR, 770 "%s: fb end out of range\n", 771 __func__); 772 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 773 return; 774 } 775 776 virtio_gpu_do_set_scanout(g, ss.scanout_id, 777 &fb, res, &ss.r, &cmd->error); 778 } 779 780 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 781 uint32_t nr_entries, uint32_t offset, 782 struct virtio_gpu_ctrl_command *cmd, 783 uint64_t **addr, struct iovec **iov, 784 uint32_t *niov) 785 { 786 struct virtio_gpu_mem_entry *ents; 787 size_t esize, s; 788 int e, v; 789 790 if (nr_entries > 16384) { 791 qemu_log_mask(LOG_GUEST_ERROR, 792 "%s: nr_entries is too big (%d > 16384)\n", 793 __func__, nr_entries); 794 return -1; 795 } 796 797 esize = sizeof(*ents) * nr_entries; 798 ents = g_malloc(esize); 799 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 800 offset, ents, esize); 801 if (s != esize) { 802 qemu_log_mask(LOG_GUEST_ERROR, 803 "%s: command data size incorrect %zu vs %zu\n", 804 __func__, s, esize); 805 g_free(ents); 806 return -1; 807 } 808 809 *iov = NULL; 810 if (addr) { 811 *addr = NULL; 812 } 813 for (e = 0, v = 0; e < nr_entries; e++) { 814 uint64_t a = le64_to_cpu(ents[e].addr); 815 uint32_t l = le32_to_cpu(ents[e].length); 816 hwaddr len; 817 void *map; 818 819 do { 820 len = l; 821 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len, 822 DMA_DIRECTION_TO_DEVICE, 823 MEMTXATTRS_UNSPECIFIED); 824 if (!map) { 825 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 826 " element %d\n", __func__, e); 827 virtio_gpu_cleanup_mapping_iov(g, *iov, v); 828 g_free(ents); 829 *iov = NULL; 830 if (addr) { 831 g_free(*addr); 832 *addr = NULL; 833 } 834 return -1; 835 } 836 837 if (!(v % 16)) { 838 *iov = g_renew(struct iovec, *iov, v + 16); 839 if (addr) { 840 *addr = g_renew(uint64_t, *addr, v + 16); 841 } 842 } 843 (*iov)[v].iov_base = map; 844 (*iov)[v].iov_len = len; 845 if (addr) { 846 (*addr)[v] = a; 847 } 848 849 a += len; 850 l -= len; 851 v += 1; 852 } while (l > 0); 853 } 854 *niov = v; 855 856 g_free(ents); 857 return 0; 858 } 859 860 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 861 struct iovec *iov, uint32_t count) 862 { 863 int i; 864 865 for (i = 0; i < count; i++) { 866 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 867 iov[i].iov_base, iov[i].iov_len, 868 DMA_DIRECTION_TO_DEVICE, 869 iov[i].iov_len); 870 } 871 g_free(iov); 872 } 873 874 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 875 struct virtio_gpu_simple_resource *res) 876 { 877 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 878 res->iov = NULL; 879 res->iov_cnt = 0; 880 g_free(res->addrs); 881 res->addrs = NULL; 882 883 if (res->blob) { 884 virtio_gpu_fini_udmabuf(res); 885 } 886 } 887 888 static void 889 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 890 struct virtio_gpu_ctrl_command *cmd) 891 { 892 struct virtio_gpu_simple_resource *res; 893 struct virtio_gpu_resource_attach_backing ab; 894 int ret; 895 896 VIRTIO_GPU_FILL_CMD(ab); 897 virtio_gpu_bswap_32(&ab, sizeof(ab)); 898 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 899 900 res = virtio_gpu_find_resource(g, ab.resource_id); 901 if (!res) { 902 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 903 __func__, ab.resource_id); 904 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 905 return; 906 } 907 908 if (res->iov) { 909 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 910 return; 911 } 912 913 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd, 914 &res->addrs, &res->iov, &res->iov_cnt); 915 if (ret != 0) { 916 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 917 return; 918 } 919 } 920 921 static void 922 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 923 struct virtio_gpu_ctrl_command *cmd) 924 { 925 struct virtio_gpu_simple_resource *res; 926 struct virtio_gpu_resource_detach_backing detach; 927 928 VIRTIO_GPU_FILL_CMD(detach); 929 virtio_gpu_bswap_32(&detach, sizeof(detach)); 930 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 931 932 res = virtio_gpu_find_check_resource(g, detach.resource_id, true, 933 __func__, &cmd->error); 934 if (!res) { 935 return; 936 } 937 virtio_gpu_cleanup_mapping(g, res); 938 } 939 940 void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 941 struct virtio_gpu_ctrl_command *cmd) 942 { 943 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 944 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 945 946 switch (cmd->cmd_hdr.type) { 947 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 948 virtio_gpu_get_display_info(g, cmd); 949 break; 950 case VIRTIO_GPU_CMD_GET_EDID: 951 virtio_gpu_get_edid(g, cmd); 952 break; 953 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 954 virtio_gpu_resource_create_2d(g, cmd); 955 break; 956 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: 957 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 958 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 959 break; 960 } 961 virtio_gpu_resource_create_blob(g, cmd); 962 break; 963 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 964 virtio_gpu_resource_unref(g, cmd); 965 break; 966 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 967 virtio_gpu_resource_flush(g, cmd); 968 break; 969 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 970 virtio_gpu_transfer_to_host_2d(g, cmd); 971 break; 972 case VIRTIO_GPU_CMD_SET_SCANOUT: 973 virtio_gpu_set_scanout(g, cmd); 974 break; 975 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: 976 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 977 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 978 break; 979 } 980 virtio_gpu_set_scanout_blob(g, cmd); 981 break; 982 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 983 virtio_gpu_resource_attach_backing(g, cmd); 984 break; 985 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 986 virtio_gpu_resource_detach_backing(g, cmd); 987 break; 988 default: 989 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 990 break; 991 } 992 if (!cmd->finished) { 993 if (!g->parent_obj.renderer_blocked) { 994 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 995 VIRTIO_GPU_RESP_OK_NODATA); 996 } 997 } 998 } 999 1000 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 1001 { 1002 VirtIOGPU *g = VIRTIO_GPU(vdev); 1003 qemu_bh_schedule(g->ctrl_bh); 1004 } 1005 1006 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 1007 { 1008 VirtIOGPU *g = VIRTIO_GPU(vdev); 1009 qemu_bh_schedule(g->cursor_bh); 1010 } 1011 1012 void virtio_gpu_process_cmdq(VirtIOGPU *g) 1013 { 1014 struct virtio_gpu_ctrl_command *cmd; 1015 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1016 1017 if (g->processing_cmdq) { 1018 return; 1019 } 1020 g->processing_cmdq = true; 1021 while (!QTAILQ_EMPTY(&g->cmdq)) { 1022 cmd = QTAILQ_FIRST(&g->cmdq); 1023 1024 if (g->parent_obj.renderer_blocked) { 1025 break; 1026 } 1027 1028 /* process command */ 1029 vgc->process_cmd(g, cmd); 1030 1031 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1032 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1033 g->stats.requests++; 1034 } 1035 1036 if (!cmd->finished) { 1037 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 1038 g->inflight++; 1039 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1040 if (g->stats.max_inflight < g->inflight) { 1041 g->stats.max_inflight = g->inflight; 1042 } 1043 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 1044 } 1045 } else { 1046 g_free(cmd); 1047 } 1048 } 1049 g->processing_cmdq = false; 1050 } 1051 1052 static void virtio_gpu_process_fenceq(VirtIOGPU *g) 1053 { 1054 struct virtio_gpu_ctrl_command *cmd, *tmp; 1055 1056 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { 1057 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); 1058 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 1059 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1060 g_free(cmd); 1061 g->inflight--; 1062 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1063 fprintf(stderr, "inflight: %3d (-)\r", g->inflight); 1064 } 1065 } 1066 } 1067 1068 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b) 1069 { 1070 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj); 1071 1072 virtio_gpu_process_fenceq(g); 1073 virtio_gpu_process_cmdq(g); 1074 } 1075 1076 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 1077 { 1078 VirtIOGPU *g = VIRTIO_GPU(vdev); 1079 struct virtio_gpu_ctrl_command *cmd; 1080 1081 if (!virtio_queue_ready(vq)) { 1082 return; 1083 } 1084 1085 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1086 while (cmd) { 1087 cmd->vq = vq; 1088 cmd->error = 0; 1089 cmd->finished = false; 1090 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 1091 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1092 } 1093 1094 virtio_gpu_process_cmdq(g); 1095 } 1096 1097 static void virtio_gpu_ctrl_bh(void *opaque) 1098 { 1099 VirtIOGPU *g = opaque; 1100 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1101 1102 vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq); 1103 } 1104 1105 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 1106 { 1107 VirtIOGPU *g = VIRTIO_GPU(vdev); 1108 VirtQueueElement *elem; 1109 size_t s; 1110 struct virtio_gpu_update_cursor cursor_info; 1111 1112 if (!virtio_queue_ready(vq)) { 1113 return; 1114 } 1115 for (;;) { 1116 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 1117 if (!elem) { 1118 break; 1119 } 1120 1121 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 1122 &cursor_info, sizeof(cursor_info)); 1123 if (s != sizeof(cursor_info)) { 1124 qemu_log_mask(LOG_GUEST_ERROR, 1125 "%s: cursor size incorrect %zu vs %zu\n", 1126 __func__, s, sizeof(cursor_info)); 1127 } else { 1128 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 1129 update_cursor(g, &cursor_info); 1130 } 1131 virtqueue_push(vq, elem, 0); 1132 virtio_notify(vdev, vq); 1133 g_free(elem); 1134 } 1135 } 1136 1137 static void virtio_gpu_cursor_bh(void *opaque) 1138 { 1139 VirtIOGPU *g = opaque; 1140 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); 1141 } 1142 1143 static const VMStateDescription vmstate_virtio_gpu_scanout = { 1144 .name = "virtio-gpu-one-scanout", 1145 .version_id = 1, 1146 .fields = (VMStateField[]) { 1147 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1148 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1149 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1150 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1151 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1152 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1153 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1154 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1155 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1156 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1157 VMSTATE_END_OF_LIST() 1158 }, 1159 }; 1160 1161 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1162 .name = "virtio-gpu-scanouts", 1163 .version_id = 1, 1164 .fields = (VMStateField[]) { 1165 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), 1166 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, 1167 struct VirtIOGPU, NULL), 1168 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, 1169 parent_obj.conf.max_outputs, 1, 1170 vmstate_virtio_gpu_scanout, 1171 struct virtio_gpu_scanout), 1172 VMSTATE_END_OF_LIST() 1173 }, 1174 }; 1175 1176 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1177 const VMStateField *field, JSONWriter *vmdesc) 1178 { 1179 VirtIOGPU *g = opaque; 1180 struct virtio_gpu_simple_resource *res; 1181 int i; 1182 1183 /* in 2d mode we should never find unprocessed commands here */ 1184 assert(QTAILQ_EMPTY(&g->cmdq)); 1185 1186 QTAILQ_FOREACH(res, &g->reslist, next) { 1187 qemu_put_be32(f, res->resource_id); 1188 qemu_put_be32(f, res->width); 1189 qemu_put_be32(f, res->height); 1190 qemu_put_be32(f, res->format); 1191 qemu_put_be32(f, res->iov_cnt); 1192 for (i = 0; i < res->iov_cnt; i++) { 1193 qemu_put_be64(f, res->addrs[i]); 1194 qemu_put_be32(f, res->iov[i].iov_len); 1195 } 1196 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1197 pixman_image_get_stride(res->image) * res->height); 1198 } 1199 qemu_put_be32(f, 0); /* end of list */ 1200 1201 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1202 } 1203 1204 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1205 const VMStateField *field) 1206 { 1207 VirtIOGPU *g = opaque; 1208 struct virtio_gpu_simple_resource *res; 1209 struct virtio_gpu_scanout *scanout; 1210 uint32_t resource_id, pformat; 1211 int i; 1212 1213 g->hostmem = 0; 1214 1215 resource_id = qemu_get_be32(f); 1216 while (resource_id != 0) { 1217 res = virtio_gpu_find_resource(g, resource_id); 1218 if (res) { 1219 return -EINVAL; 1220 } 1221 1222 res = g_new0(struct virtio_gpu_simple_resource, 1); 1223 res->resource_id = resource_id; 1224 res->width = qemu_get_be32(f); 1225 res->height = qemu_get_be32(f); 1226 res->format = qemu_get_be32(f); 1227 res->iov_cnt = qemu_get_be32(f); 1228 1229 /* allocate */ 1230 pformat = virtio_gpu_get_pixman_format(res->format); 1231 if (!pformat) { 1232 g_free(res); 1233 return -EINVAL; 1234 } 1235 res->image = pixman_image_create_bits(pformat, 1236 res->width, res->height, 1237 NULL, 0); 1238 if (!res->image) { 1239 g_free(res); 1240 return -EINVAL; 1241 } 1242 1243 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1244 1245 res->addrs = g_new(uint64_t, res->iov_cnt); 1246 res->iov = g_new(struct iovec, res->iov_cnt); 1247 1248 /* read data */ 1249 for (i = 0; i < res->iov_cnt; i++) { 1250 res->addrs[i] = qemu_get_be64(f); 1251 res->iov[i].iov_len = qemu_get_be32(f); 1252 } 1253 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1254 pixman_image_get_stride(res->image) * res->height); 1255 1256 /* restore mapping */ 1257 for (i = 0; i < res->iov_cnt; i++) { 1258 hwaddr len = res->iov[i].iov_len; 1259 res->iov[i].iov_base = 1260 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len, 1261 DMA_DIRECTION_TO_DEVICE, 1262 MEMTXATTRS_UNSPECIFIED); 1263 1264 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1265 /* Clean up the half-a-mapping we just created... */ 1266 if (res->iov[i].iov_base) { 1267 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 1268 res->iov[i].iov_base, 1269 len, 1270 DMA_DIRECTION_TO_DEVICE, 1271 0); 1272 } 1273 /* ...and the mappings for previous loop iterations */ 1274 res->iov_cnt = i; 1275 virtio_gpu_cleanup_mapping(g, res); 1276 pixman_image_unref(res->image); 1277 g_free(res); 1278 return -EINVAL; 1279 } 1280 } 1281 1282 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1283 g->hostmem += res->hostmem; 1284 1285 resource_id = qemu_get_be32(f); 1286 } 1287 1288 /* load & apply scanout state */ 1289 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1290 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1291 scanout = &g->parent_obj.scanout[i]; 1292 if (!scanout->resource_id) { 1293 continue; 1294 } 1295 res = virtio_gpu_find_resource(g, scanout->resource_id); 1296 if (!res) { 1297 return -EINVAL; 1298 } 1299 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1300 if (!scanout->ds) { 1301 return -EINVAL; 1302 } 1303 1304 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1305 dpy_gfx_update_full(scanout->con); 1306 if (scanout->cursor.resource_id) { 1307 update_cursor(g, &scanout->cursor); 1308 } 1309 res->scanout_bitmask |= (1 << i); 1310 } 1311 1312 return 0; 1313 } 1314 1315 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1316 { 1317 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1318 VirtIOGPU *g = VIRTIO_GPU(qdev); 1319 1320 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1321 if (!virtio_gpu_have_udmabuf()) { 1322 error_setg(errp, "cannot enable blob resources without udmabuf"); 1323 return; 1324 } 1325 1326 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { 1327 error_setg(errp, "blobs and virgl are not compatible (yet)"); 1328 return; 1329 } 1330 } 1331 1332 if (!virtio_gpu_base_device_realize(qdev, 1333 virtio_gpu_handle_ctrl_cb, 1334 virtio_gpu_handle_cursor_cb, 1335 errp)) { 1336 return; 1337 } 1338 1339 g->ctrl_vq = virtio_get_queue(vdev, 0); 1340 g->cursor_vq = virtio_get_queue(vdev, 1); 1341 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 1342 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 1343 QTAILQ_INIT(&g->reslist); 1344 QTAILQ_INIT(&g->cmdq); 1345 QTAILQ_INIT(&g->fenceq); 1346 } 1347 1348 void virtio_gpu_reset(VirtIODevice *vdev) 1349 { 1350 VirtIOGPU *g = VIRTIO_GPU(vdev); 1351 struct virtio_gpu_simple_resource *res, *tmp; 1352 struct virtio_gpu_ctrl_command *cmd; 1353 1354 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1355 virtio_gpu_resource_destroy(g, res); 1356 } 1357 1358 while (!QTAILQ_EMPTY(&g->cmdq)) { 1359 cmd = QTAILQ_FIRST(&g->cmdq); 1360 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1361 g_free(cmd); 1362 } 1363 1364 while (!QTAILQ_EMPTY(&g->fenceq)) { 1365 cmd = QTAILQ_FIRST(&g->fenceq); 1366 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1367 g->inflight--; 1368 g_free(cmd); 1369 } 1370 1371 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); 1372 } 1373 1374 static void 1375 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 1376 { 1377 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1378 1379 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 1380 } 1381 1382 static void 1383 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 1384 { 1385 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1386 const struct virtio_gpu_config *vgconfig = 1387 (const struct virtio_gpu_config *)config; 1388 1389 if (vgconfig->events_clear) { 1390 g->virtio_config.events_read &= ~vgconfig->events_clear; 1391 } 1392 } 1393 1394 /* 1395 * For historical reasons virtio_gpu does not adhere to virtio migration 1396 * scheme as described in doc/virtio-migration.txt, in a sense that no 1397 * save/load callback are provided to the core. Instead the device data 1398 * is saved/loaded after the core data. 1399 * 1400 * Because of this we need a special vmsd. 1401 */ 1402 static const VMStateDescription vmstate_virtio_gpu = { 1403 .name = "virtio-gpu", 1404 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1405 .version_id = VIRTIO_GPU_VM_VERSION, 1406 .fields = (VMStateField[]) { 1407 VMSTATE_VIRTIO_DEVICE /* core */, 1408 { 1409 .name = "virtio-gpu", 1410 .info = &(const VMStateInfo) { 1411 .name = "virtio-gpu", 1412 .get = virtio_gpu_load, 1413 .put = virtio_gpu_save, 1414 }, 1415 .flags = VMS_SINGLE, 1416 } /* device */, 1417 VMSTATE_END_OF_LIST() 1418 }, 1419 }; 1420 1421 static Property virtio_gpu_properties[] = { 1422 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), 1423 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 1424 256 * MiB), 1425 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags, 1426 VIRTIO_GPU_FLAG_BLOB_ENABLED, false), 1427 DEFINE_PROP_END_OF_LIST(), 1428 }; 1429 1430 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1431 { 1432 DeviceClass *dc = DEVICE_CLASS(klass); 1433 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1434 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); 1435 VirtIOGPUBaseClass *vgbc = &vgc->parent; 1436 1437 vgc->handle_ctrl = virtio_gpu_handle_ctrl; 1438 vgc->process_cmd = virtio_gpu_simple_process_cmd; 1439 vgc->update_cursor_data = virtio_gpu_update_cursor_data; 1440 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed; 1441 1442 vdc->realize = virtio_gpu_device_realize; 1443 vdc->reset = virtio_gpu_reset; 1444 vdc->get_config = virtio_gpu_get_config; 1445 vdc->set_config = virtio_gpu_set_config; 1446 1447 dc->vmsd = &vmstate_virtio_gpu; 1448 device_class_set_props(dc, virtio_gpu_properties); 1449 } 1450 1451 static const TypeInfo virtio_gpu_info = { 1452 .name = TYPE_VIRTIO_GPU, 1453 .parent = TYPE_VIRTIO_GPU_BASE, 1454 .instance_size = sizeof(VirtIOGPU), 1455 .class_size = sizeof(VirtIOGPUClass), 1456 .class_init = virtio_gpu_class_init, 1457 }; 1458 module_obj(TYPE_VIRTIO_GPU); 1459 module_kconfig(VIRTIO_GPU); 1460 1461 static void virtio_register_types(void) 1462 { 1463 type_register_static(&virtio_gpu_info); 1464 } 1465 1466 type_init(virtio_register_types) 1467