1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "sysemu/dma.h" 20 #include "sysemu/sysemu.h" 21 #include "hw/virtio/virtio.h" 22 #include "migration/qemu-file-types.h" 23 #include "hw/virtio/virtio-gpu.h" 24 #include "hw/virtio/virtio-gpu-bswap.h" 25 #include "hw/virtio/virtio-gpu-pixman.h" 26 #include "hw/virtio/virtio-bus.h" 27 #include "hw/display/edid.h" 28 #include "hw/qdev-properties.h" 29 #include "qemu/log.h" 30 #include "qemu/module.h" 31 #include "qapi/error.h" 32 #include "qemu/error-report.h" 33 34 #define VIRTIO_GPU_VM_VERSION 1 35 36 static struct virtio_gpu_simple_resource* 37 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 38 static struct virtio_gpu_simple_resource * 39 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 40 bool require_backing, 41 const char *caller, uint32_t *error); 42 43 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 44 struct virtio_gpu_simple_resource *res); 45 46 void virtio_gpu_update_cursor_data(VirtIOGPU *g, 47 struct virtio_gpu_scanout *s, 48 uint32_t resource_id) 49 { 50 struct virtio_gpu_simple_resource *res; 51 uint32_t pixels; 52 void *data; 53 54 res = virtio_gpu_find_check_resource(g, resource_id, false, 55 __func__, NULL); 56 if (!res) { 57 return; 58 } 59 60 if (res->blob_size) { 61 if (res->blob_size < (s->current_cursor->width * 62 s->current_cursor->height * 4)) { 63 return; 64 } 65 data = res->blob; 66 } else { 67 if (pixman_image_get_width(res->image) != s->current_cursor->width || 68 pixman_image_get_height(res->image) != s->current_cursor->height) { 69 return; 70 } 71 data = pixman_image_get_data(res->image); 72 } 73 74 pixels = s->current_cursor->width * s->current_cursor->height; 75 memcpy(s->current_cursor->data, data, 76 pixels * sizeof(uint32_t)); 77 } 78 79 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 80 { 81 struct virtio_gpu_scanout *s; 82 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 83 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 84 85 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { 86 return; 87 } 88 s = &g->parent_obj.scanout[cursor->pos.scanout_id]; 89 90 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 91 cursor->pos.x, 92 cursor->pos.y, 93 move ? "move" : "update", 94 cursor->resource_id); 95 96 if (!move) { 97 if (!s->current_cursor) { 98 s->current_cursor = cursor_alloc(64, 64); 99 } 100 101 s->current_cursor->hot_x = cursor->hot_x; 102 s->current_cursor->hot_y = cursor->hot_y; 103 104 if (cursor->resource_id > 0) { 105 vgc->update_cursor_data(g, s, cursor->resource_id); 106 } 107 dpy_cursor_define(s->con, s->current_cursor); 108 109 s->cursor = *cursor; 110 } else { 111 s->cursor.pos.x = cursor->pos.x; 112 s->cursor.pos.y = cursor->pos.y; 113 } 114 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 115 cursor->resource_id ? 1 : 0); 116 } 117 118 static struct virtio_gpu_simple_resource * 119 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 120 { 121 struct virtio_gpu_simple_resource *res; 122 123 QTAILQ_FOREACH(res, &g->reslist, next) { 124 if (res->resource_id == resource_id) { 125 return res; 126 } 127 } 128 return NULL; 129 } 130 131 static struct virtio_gpu_simple_resource * 132 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 133 bool require_backing, 134 const char *caller, uint32_t *error) 135 { 136 struct virtio_gpu_simple_resource *res; 137 138 res = virtio_gpu_find_resource(g, resource_id); 139 if (!res) { 140 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n", 141 caller, resource_id); 142 if (error) { 143 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 144 } 145 return NULL; 146 } 147 148 if (require_backing) { 149 if (!res->iov || (!res->image && !res->blob)) { 150 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n", 151 caller, resource_id); 152 if (error) { 153 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 154 } 155 return NULL; 156 } 157 } 158 159 return res; 160 } 161 162 void virtio_gpu_ctrl_response(VirtIOGPU *g, 163 struct virtio_gpu_ctrl_command *cmd, 164 struct virtio_gpu_ctrl_hdr *resp, 165 size_t resp_len) 166 { 167 size_t s; 168 169 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 170 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 171 resp->fence_id = cmd->cmd_hdr.fence_id; 172 resp->ctx_id = cmd->cmd_hdr.ctx_id; 173 } 174 virtio_gpu_ctrl_hdr_bswap(resp); 175 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 176 if (s != resp_len) { 177 qemu_log_mask(LOG_GUEST_ERROR, 178 "%s: response size incorrect %zu vs %zu\n", 179 __func__, s, resp_len); 180 } 181 virtqueue_push(cmd->vq, &cmd->elem, s); 182 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 183 cmd->finished = true; 184 } 185 186 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 187 struct virtio_gpu_ctrl_command *cmd, 188 enum virtio_gpu_ctrl_type type) 189 { 190 struct virtio_gpu_ctrl_hdr resp; 191 192 memset(&resp, 0, sizeof(resp)); 193 resp.type = type; 194 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 195 } 196 197 void virtio_gpu_get_display_info(VirtIOGPU *g, 198 struct virtio_gpu_ctrl_command *cmd) 199 { 200 struct virtio_gpu_resp_display_info display_info; 201 202 trace_virtio_gpu_cmd_get_display_info(); 203 memset(&display_info, 0, sizeof(display_info)); 204 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 205 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); 206 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 207 sizeof(display_info)); 208 } 209 210 static void 211 virtio_gpu_generate_edid(VirtIOGPU *g, int scanout, 212 struct virtio_gpu_resp_edid *edid) 213 { 214 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 215 qemu_edid_info info = { 216 .width_mm = b->req_state[scanout].width_mm, 217 .height_mm = b->req_state[scanout].height_mm, 218 .prefx = b->req_state[scanout].width, 219 .prefy = b->req_state[scanout].height, 220 .refresh_rate = b->req_state[scanout].refresh_rate, 221 }; 222 223 edid->size = cpu_to_le32(sizeof(edid->edid)); 224 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info); 225 } 226 227 void virtio_gpu_get_edid(VirtIOGPU *g, 228 struct virtio_gpu_ctrl_command *cmd) 229 { 230 struct virtio_gpu_resp_edid edid; 231 struct virtio_gpu_cmd_get_edid get_edid; 232 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 233 234 VIRTIO_GPU_FILL_CMD(get_edid); 235 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 236 237 if (get_edid.scanout >= b->conf.max_outputs) { 238 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 239 return; 240 } 241 242 trace_virtio_gpu_cmd_get_edid(get_edid.scanout); 243 memset(&edid, 0, sizeof(edid)); 244 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; 245 virtio_gpu_generate_edid(g, get_edid.scanout, &edid); 246 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); 247 } 248 249 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 250 uint32_t width, uint32_t height) 251 { 252 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 253 * pixman_image_create_bits will fail in case it overflow. 254 */ 255 256 int bpp = PIXMAN_FORMAT_BPP(pformat); 257 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 258 return height * stride; 259 } 260 261 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 262 struct virtio_gpu_ctrl_command *cmd) 263 { 264 pixman_format_code_t pformat; 265 struct virtio_gpu_simple_resource *res; 266 struct virtio_gpu_resource_create_2d c2d; 267 268 VIRTIO_GPU_FILL_CMD(c2d); 269 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 270 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 271 c2d.width, c2d.height); 272 273 if (c2d.resource_id == 0) { 274 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 275 __func__); 276 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 277 return; 278 } 279 280 res = virtio_gpu_find_resource(g, c2d.resource_id); 281 if (res) { 282 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 283 __func__, c2d.resource_id); 284 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 285 return; 286 } 287 288 res = g_new0(struct virtio_gpu_simple_resource, 1); 289 290 res->width = c2d.width; 291 res->height = c2d.height; 292 res->format = c2d.format; 293 res->resource_id = c2d.resource_id; 294 295 pformat = virtio_gpu_get_pixman_format(c2d.format); 296 if (!pformat) { 297 qemu_log_mask(LOG_GUEST_ERROR, 298 "%s: host couldn't handle guest format %d\n", 299 __func__, c2d.format); 300 g_free(res); 301 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 302 return; 303 } 304 305 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 306 if (res->hostmem + g->hostmem < g->conf_max_hostmem) { 307 res->image = pixman_image_create_bits(pformat, 308 c2d.width, 309 c2d.height, 310 NULL, 0); 311 } 312 313 if (!res->image) { 314 qemu_log_mask(LOG_GUEST_ERROR, 315 "%s: resource creation failed %d %d %d\n", 316 __func__, c2d.resource_id, c2d.width, c2d.height); 317 g_free(res); 318 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 319 return; 320 } 321 322 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 323 g->hostmem += res->hostmem; 324 } 325 326 static void virtio_gpu_resource_create_blob(VirtIOGPU *g, 327 struct virtio_gpu_ctrl_command *cmd) 328 { 329 struct virtio_gpu_simple_resource *res; 330 struct virtio_gpu_resource_create_blob cblob; 331 int ret; 332 333 VIRTIO_GPU_FILL_CMD(cblob); 334 virtio_gpu_create_blob_bswap(&cblob); 335 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); 336 337 if (cblob.resource_id == 0) { 338 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 339 __func__); 340 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 341 return; 342 } 343 344 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST && 345 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) { 346 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n", 347 __func__); 348 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 349 return; 350 } 351 352 if (virtio_gpu_find_resource(g, cblob.resource_id)) { 353 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 354 __func__, cblob.resource_id); 355 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 356 return; 357 } 358 359 res = g_new0(struct virtio_gpu_simple_resource, 1); 360 res->resource_id = cblob.resource_id; 361 res->blob_size = cblob.size; 362 363 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), 364 cmd, &res->addrs, &res->iov, 365 &res->iov_cnt); 366 if (ret != 0) { 367 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 368 g_free(res); 369 return; 370 } 371 372 virtio_gpu_init_udmabuf(res); 373 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 374 } 375 376 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 377 { 378 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 379 struct virtio_gpu_simple_resource *res; 380 381 if (scanout->resource_id == 0) { 382 return; 383 } 384 385 res = virtio_gpu_find_resource(g, scanout->resource_id); 386 if (res) { 387 res->scanout_bitmask &= ~(1 << scanout_id); 388 } 389 390 dpy_gfx_replace_surface(scanout->con, NULL); 391 scanout->resource_id = 0; 392 scanout->ds = NULL; 393 scanout->width = 0; 394 scanout->height = 0; 395 } 396 397 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 398 struct virtio_gpu_simple_resource *res) 399 { 400 int i; 401 402 if (res->scanout_bitmask) { 403 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 404 if (res->scanout_bitmask & (1 << i)) { 405 virtio_gpu_disable_scanout(g, i); 406 } 407 } 408 } 409 410 qemu_pixman_image_unref(res->image); 411 virtio_gpu_cleanup_mapping(g, res); 412 QTAILQ_REMOVE(&g->reslist, res, next); 413 g->hostmem -= res->hostmem; 414 g_free(res); 415 } 416 417 static void virtio_gpu_resource_unref(VirtIOGPU *g, 418 struct virtio_gpu_ctrl_command *cmd) 419 { 420 struct virtio_gpu_simple_resource *res; 421 struct virtio_gpu_resource_unref unref; 422 423 VIRTIO_GPU_FILL_CMD(unref); 424 virtio_gpu_bswap_32(&unref, sizeof(unref)); 425 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 426 427 res = virtio_gpu_find_resource(g, unref.resource_id); 428 if (!res) { 429 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 430 __func__, unref.resource_id); 431 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 432 return; 433 } 434 virtio_gpu_resource_destroy(g, res); 435 } 436 437 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 438 struct virtio_gpu_ctrl_command *cmd) 439 { 440 struct virtio_gpu_simple_resource *res; 441 int h, bpp; 442 uint32_t src_offset, dst_offset, stride; 443 pixman_format_code_t format; 444 struct virtio_gpu_transfer_to_host_2d t2d; 445 void *img_data; 446 447 VIRTIO_GPU_FILL_CMD(t2d); 448 virtio_gpu_t2d_bswap(&t2d); 449 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 450 451 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true, 452 __func__, &cmd->error); 453 if (!res || res->blob) { 454 return; 455 } 456 457 if (t2d.r.x > res->width || 458 t2d.r.y > res->height || 459 t2d.r.width > res->width || 460 t2d.r.height > res->height || 461 t2d.r.x + t2d.r.width > res->width || 462 t2d.r.y + t2d.r.height > res->height) { 463 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 464 " bounds for resource %d: %d %d %d %d vs %d %d\n", 465 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 466 t2d.r.width, t2d.r.height, res->width, res->height); 467 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 468 return; 469 } 470 471 format = pixman_image_get_format(res->image); 472 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 473 stride = pixman_image_get_stride(res->image); 474 img_data = pixman_image_get_data(res->image); 475 476 if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) { 477 for (h = 0; h < t2d.r.height; h++) { 478 src_offset = t2d.offset + stride * h; 479 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 480 481 iov_to_buf(res->iov, res->iov_cnt, src_offset, 482 (uint8_t *)img_data + dst_offset, 483 t2d.r.width * bpp); 484 } 485 } else { 486 src_offset = t2d.offset; 487 dst_offset = t2d.r.y * stride + t2d.r.x * bpp; 488 iov_to_buf(res->iov, res->iov_cnt, src_offset, 489 (uint8_t *)img_data + dst_offset, 490 stride * t2d.r.height); 491 } 492 } 493 494 static void virtio_gpu_resource_flush(VirtIOGPU *g, 495 struct virtio_gpu_ctrl_command *cmd) 496 { 497 struct virtio_gpu_simple_resource *res; 498 struct virtio_gpu_resource_flush rf; 499 struct virtio_gpu_scanout *scanout; 500 pixman_region16_t flush_region; 501 bool within_bounds = false; 502 bool update_submitted = false; 503 int i; 504 505 VIRTIO_GPU_FILL_CMD(rf); 506 virtio_gpu_bswap_32(&rf, sizeof(rf)); 507 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 508 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 509 510 res = virtio_gpu_find_check_resource(g, rf.resource_id, false, 511 __func__, &cmd->error); 512 if (!res) { 513 return; 514 } 515 516 if (res->blob) { 517 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 518 scanout = &g->parent_obj.scanout[i]; 519 if (scanout->resource_id == res->resource_id && 520 rf.r.x < scanout->x + scanout->width && 521 rf.r.x + rf.r.width >= scanout->x && 522 rf.r.y < scanout->y + scanout->height && 523 rf.r.y + rf.r.height >= scanout->y) { 524 within_bounds = true; 525 526 if (console_has_gl(scanout->con)) { 527 dpy_gl_update(scanout->con, 0, 0, scanout->width, 528 scanout->height); 529 update_submitted = true; 530 } 531 } 532 } 533 534 if (update_submitted) { 535 return; 536 } 537 if (!within_bounds) { 538 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts" 539 " bounds for flush %d: %d %d %d %d\n", 540 __func__, rf.resource_id, rf.r.x, rf.r.y, 541 rf.r.width, rf.r.height); 542 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 543 return; 544 } 545 } 546 547 if (!res->blob && 548 (rf.r.x > res->width || 549 rf.r.y > res->height || 550 rf.r.width > res->width || 551 rf.r.height > res->height || 552 rf.r.x + rf.r.width > res->width || 553 rf.r.y + rf.r.height > res->height)) { 554 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 555 " bounds for resource %d: %d %d %d %d vs %d %d\n", 556 __func__, rf.resource_id, rf.r.x, rf.r.y, 557 rf.r.width, rf.r.height, res->width, res->height); 558 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 559 return; 560 } 561 562 pixman_region_init_rect(&flush_region, 563 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 564 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 565 pixman_region16_t region, finalregion; 566 pixman_box16_t *extents; 567 568 if (!(res->scanout_bitmask & (1 << i))) { 569 continue; 570 } 571 scanout = &g->parent_obj.scanout[i]; 572 573 pixman_region_init(&finalregion); 574 pixman_region_init_rect(®ion, scanout->x, scanout->y, 575 scanout->width, scanout->height); 576 577 pixman_region_intersect(&finalregion, &flush_region, ®ion); 578 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 579 extents = pixman_region_extents(&finalregion); 580 /* work out the area we need to update for each console */ 581 dpy_gfx_update(g->parent_obj.scanout[i].con, 582 extents->x1, extents->y1, 583 extents->x2 - extents->x1, 584 extents->y2 - extents->y1); 585 586 pixman_region_fini(®ion); 587 pixman_region_fini(&finalregion); 588 } 589 pixman_region_fini(&flush_region); 590 } 591 592 static void virtio_unref_resource(pixman_image_t *image, void *data) 593 { 594 pixman_image_unref(data); 595 } 596 597 static void virtio_gpu_update_scanout(VirtIOGPU *g, 598 uint32_t scanout_id, 599 struct virtio_gpu_simple_resource *res, 600 struct virtio_gpu_rect *r) 601 { 602 struct virtio_gpu_simple_resource *ores; 603 struct virtio_gpu_scanout *scanout; 604 605 scanout = &g->parent_obj.scanout[scanout_id]; 606 ores = virtio_gpu_find_resource(g, scanout->resource_id); 607 if (ores) { 608 ores->scanout_bitmask &= ~(1 << scanout_id); 609 } 610 611 res->scanout_bitmask |= (1 << scanout_id); 612 scanout->resource_id = res->resource_id; 613 scanout->x = r->x; 614 scanout->y = r->y; 615 scanout->width = r->width; 616 scanout->height = r->height; 617 } 618 619 static void virtio_gpu_do_set_scanout(VirtIOGPU *g, 620 uint32_t scanout_id, 621 struct virtio_gpu_framebuffer *fb, 622 struct virtio_gpu_simple_resource *res, 623 struct virtio_gpu_rect *r, 624 uint32_t *error) 625 { 626 struct virtio_gpu_scanout *scanout; 627 uint8_t *data; 628 629 scanout = &g->parent_obj.scanout[scanout_id]; 630 631 if (r->x > fb->width || 632 r->y > fb->height || 633 r->width < 16 || 634 r->height < 16 || 635 r->width > fb->width || 636 r->height > fb->height || 637 r->x + r->width > fb->width || 638 r->y + r->height > fb->height) { 639 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 640 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", 641 __func__, scanout_id, res->resource_id, 642 r->x, r->y, r->width, r->height, 643 fb->width, fb->height); 644 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 645 return; 646 } 647 648 g->parent_obj.enable = 1; 649 650 if (res->blob) { 651 if (console_has_gl(scanout->con)) { 652 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) { 653 virtio_gpu_update_scanout(g, scanout_id, res, r); 654 } else { 655 *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 656 } 657 return; 658 } 659 660 data = res->blob; 661 } else { 662 data = (uint8_t *)pixman_image_get_data(res->image); 663 } 664 665 /* create a surface for this scanout */ 666 if ((res->blob && !console_has_gl(scanout->con)) || 667 !scanout->ds || 668 surface_data(scanout->ds) != data + fb->offset || 669 scanout->width != r->width || 670 scanout->height != r->height) { 671 pixman_image_t *rect; 672 void *ptr = data + fb->offset; 673 rect = pixman_image_create_bits(fb->format, r->width, r->height, 674 ptr, fb->stride); 675 676 if (res->image) { 677 pixman_image_ref(res->image); 678 pixman_image_set_destroy_function(rect, virtio_unref_resource, 679 res->image); 680 } 681 682 /* realloc the surface ptr */ 683 scanout->ds = qemu_create_displaysurface_pixman(rect); 684 if (!scanout->ds) { 685 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 686 return; 687 } 688 689 pixman_image_unref(rect); 690 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, 691 scanout->ds); 692 } 693 694 virtio_gpu_update_scanout(g, scanout_id, res, r); 695 } 696 697 static void virtio_gpu_set_scanout(VirtIOGPU *g, 698 struct virtio_gpu_ctrl_command *cmd) 699 { 700 struct virtio_gpu_simple_resource *res; 701 struct virtio_gpu_framebuffer fb = { 0 }; 702 struct virtio_gpu_set_scanout ss; 703 704 VIRTIO_GPU_FILL_CMD(ss); 705 virtio_gpu_bswap_32(&ss, sizeof(ss)); 706 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 707 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 708 709 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 710 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 711 __func__, ss.scanout_id); 712 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 713 return; 714 } 715 716 if (ss.resource_id == 0) { 717 virtio_gpu_disable_scanout(g, ss.scanout_id); 718 return; 719 } 720 721 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 722 __func__, &cmd->error); 723 if (!res) { 724 return; 725 } 726 727 fb.format = pixman_image_get_format(res->image); 728 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 729 fb.width = pixman_image_get_width(res->image); 730 fb.height = pixman_image_get_height(res->image); 731 fb.stride = pixman_image_get_stride(res->image); 732 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 733 734 virtio_gpu_do_set_scanout(g, ss.scanout_id, 735 &fb, res, &ss.r, &cmd->error); 736 } 737 738 static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, 739 struct virtio_gpu_ctrl_command *cmd) 740 { 741 struct virtio_gpu_simple_resource *res; 742 struct virtio_gpu_framebuffer fb = { 0 }; 743 struct virtio_gpu_set_scanout_blob ss; 744 uint64_t fbend; 745 746 VIRTIO_GPU_FILL_CMD(ss); 747 virtio_gpu_scanout_blob_bswap(&ss); 748 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, 749 ss.r.width, ss.r.height, ss.r.x, 750 ss.r.y); 751 752 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 753 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 754 __func__, ss.scanout_id); 755 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 756 return; 757 } 758 759 if (ss.resource_id == 0) { 760 virtio_gpu_disable_scanout(g, ss.scanout_id); 761 return; 762 } 763 764 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 765 __func__, &cmd->error); 766 if (!res) { 767 return; 768 } 769 770 fb.format = virtio_gpu_get_pixman_format(ss.format); 771 if (!fb.format) { 772 qemu_log_mask(LOG_GUEST_ERROR, 773 "%s: host couldn't handle guest format %d\n", 774 __func__, ss.format); 775 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 776 return; 777 } 778 779 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 780 fb.width = ss.width; 781 fb.height = ss.height; 782 fb.stride = ss.strides[0]; 783 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 784 785 fbend = fb.offset; 786 fbend += fb.stride * (ss.r.height - 1); 787 fbend += fb.bytes_pp * ss.r.width; 788 if (fbend > res->blob_size) { 789 qemu_log_mask(LOG_GUEST_ERROR, 790 "%s: fb end out of range\n", 791 __func__); 792 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 793 return; 794 } 795 796 virtio_gpu_do_set_scanout(g, ss.scanout_id, 797 &fb, res, &ss.r, &cmd->error); 798 } 799 800 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 801 uint32_t nr_entries, uint32_t offset, 802 struct virtio_gpu_ctrl_command *cmd, 803 uint64_t **addr, struct iovec **iov, 804 uint32_t *niov) 805 { 806 struct virtio_gpu_mem_entry *ents; 807 size_t esize, s; 808 int e, v; 809 810 if (nr_entries > 16384) { 811 qemu_log_mask(LOG_GUEST_ERROR, 812 "%s: nr_entries is too big (%d > 16384)\n", 813 __func__, nr_entries); 814 return -1; 815 } 816 817 esize = sizeof(*ents) * nr_entries; 818 ents = g_malloc(esize); 819 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 820 offset, ents, esize); 821 if (s != esize) { 822 qemu_log_mask(LOG_GUEST_ERROR, 823 "%s: command data size incorrect %zu vs %zu\n", 824 __func__, s, esize); 825 g_free(ents); 826 return -1; 827 } 828 829 *iov = NULL; 830 if (addr) { 831 *addr = NULL; 832 } 833 for (e = 0, v = 0; e < nr_entries; e++) { 834 uint64_t a = le64_to_cpu(ents[e].addr); 835 uint32_t l = le32_to_cpu(ents[e].length); 836 hwaddr len; 837 void *map; 838 839 do { 840 len = l; 841 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len, 842 DMA_DIRECTION_TO_DEVICE, 843 MEMTXATTRS_UNSPECIFIED); 844 if (!map) { 845 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 846 " element %d\n", __func__, e); 847 virtio_gpu_cleanup_mapping_iov(g, *iov, v); 848 g_free(ents); 849 *iov = NULL; 850 if (addr) { 851 g_free(*addr); 852 *addr = NULL; 853 } 854 return -1; 855 } 856 857 if (!(v % 16)) { 858 *iov = g_renew(struct iovec, *iov, v + 16); 859 if (addr) { 860 *addr = g_renew(uint64_t, *addr, v + 16); 861 } 862 } 863 (*iov)[v].iov_base = map; 864 (*iov)[v].iov_len = len; 865 if (addr) { 866 (*addr)[v] = a; 867 } 868 869 a += len; 870 l -= len; 871 v += 1; 872 } while (l > 0); 873 } 874 *niov = v; 875 876 g_free(ents); 877 return 0; 878 } 879 880 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 881 struct iovec *iov, uint32_t count) 882 { 883 int i; 884 885 for (i = 0; i < count; i++) { 886 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 887 iov[i].iov_base, iov[i].iov_len, 888 DMA_DIRECTION_TO_DEVICE, 889 iov[i].iov_len); 890 } 891 g_free(iov); 892 } 893 894 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 895 struct virtio_gpu_simple_resource *res) 896 { 897 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 898 res->iov = NULL; 899 res->iov_cnt = 0; 900 g_free(res->addrs); 901 res->addrs = NULL; 902 903 if (res->blob) { 904 virtio_gpu_fini_udmabuf(res); 905 } 906 } 907 908 static void 909 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 910 struct virtio_gpu_ctrl_command *cmd) 911 { 912 struct virtio_gpu_simple_resource *res; 913 struct virtio_gpu_resource_attach_backing ab; 914 int ret; 915 916 VIRTIO_GPU_FILL_CMD(ab); 917 virtio_gpu_bswap_32(&ab, sizeof(ab)); 918 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 919 920 res = virtio_gpu_find_resource(g, ab.resource_id); 921 if (!res) { 922 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 923 __func__, ab.resource_id); 924 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 925 return; 926 } 927 928 if (res->iov) { 929 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 930 return; 931 } 932 933 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd, 934 &res->addrs, &res->iov, &res->iov_cnt); 935 if (ret != 0) { 936 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 937 return; 938 } 939 } 940 941 static void 942 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 943 struct virtio_gpu_ctrl_command *cmd) 944 { 945 struct virtio_gpu_simple_resource *res; 946 struct virtio_gpu_resource_detach_backing detach; 947 948 VIRTIO_GPU_FILL_CMD(detach); 949 virtio_gpu_bswap_32(&detach, sizeof(detach)); 950 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 951 952 res = virtio_gpu_find_check_resource(g, detach.resource_id, true, 953 __func__, &cmd->error); 954 if (!res) { 955 return; 956 } 957 virtio_gpu_cleanup_mapping(g, res); 958 } 959 960 void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 961 struct virtio_gpu_ctrl_command *cmd) 962 { 963 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 964 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 965 966 switch (cmd->cmd_hdr.type) { 967 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 968 virtio_gpu_get_display_info(g, cmd); 969 break; 970 case VIRTIO_GPU_CMD_GET_EDID: 971 virtio_gpu_get_edid(g, cmd); 972 break; 973 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 974 virtio_gpu_resource_create_2d(g, cmd); 975 break; 976 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: 977 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 978 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 979 break; 980 } 981 virtio_gpu_resource_create_blob(g, cmd); 982 break; 983 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 984 virtio_gpu_resource_unref(g, cmd); 985 break; 986 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 987 virtio_gpu_resource_flush(g, cmd); 988 break; 989 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 990 virtio_gpu_transfer_to_host_2d(g, cmd); 991 break; 992 case VIRTIO_GPU_CMD_SET_SCANOUT: 993 virtio_gpu_set_scanout(g, cmd); 994 break; 995 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: 996 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 997 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 998 break; 999 } 1000 virtio_gpu_set_scanout_blob(g, cmd); 1001 break; 1002 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 1003 virtio_gpu_resource_attach_backing(g, cmd); 1004 break; 1005 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 1006 virtio_gpu_resource_detach_backing(g, cmd); 1007 break; 1008 default: 1009 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 1010 break; 1011 } 1012 if (!cmd->finished) { 1013 if (!g->parent_obj.renderer_blocked) { 1014 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 1015 VIRTIO_GPU_RESP_OK_NODATA); 1016 } 1017 } 1018 } 1019 1020 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 1021 { 1022 VirtIOGPU *g = VIRTIO_GPU(vdev); 1023 qemu_bh_schedule(g->ctrl_bh); 1024 } 1025 1026 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 1027 { 1028 VirtIOGPU *g = VIRTIO_GPU(vdev); 1029 qemu_bh_schedule(g->cursor_bh); 1030 } 1031 1032 void virtio_gpu_process_cmdq(VirtIOGPU *g) 1033 { 1034 struct virtio_gpu_ctrl_command *cmd; 1035 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1036 1037 if (g->processing_cmdq) { 1038 return; 1039 } 1040 g->processing_cmdq = true; 1041 while (!QTAILQ_EMPTY(&g->cmdq)) { 1042 cmd = QTAILQ_FIRST(&g->cmdq); 1043 1044 if (g->parent_obj.renderer_blocked) { 1045 break; 1046 } 1047 1048 /* process command */ 1049 vgc->process_cmd(g, cmd); 1050 1051 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1052 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1053 g->stats.requests++; 1054 } 1055 1056 if (!cmd->finished) { 1057 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 1058 g->inflight++; 1059 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1060 if (g->stats.max_inflight < g->inflight) { 1061 g->stats.max_inflight = g->inflight; 1062 } 1063 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 1064 } 1065 } else { 1066 g_free(cmd); 1067 } 1068 } 1069 g->processing_cmdq = false; 1070 } 1071 1072 static void virtio_gpu_process_fenceq(VirtIOGPU *g) 1073 { 1074 struct virtio_gpu_ctrl_command *cmd, *tmp; 1075 1076 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { 1077 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); 1078 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); 1079 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1080 g_free(cmd); 1081 g->inflight--; 1082 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 1083 fprintf(stderr, "inflight: %3d (-)\r", g->inflight); 1084 } 1085 } 1086 } 1087 1088 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b) 1089 { 1090 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj); 1091 1092 virtio_gpu_process_fenceq(g); 1093 virtio_gpu_process_cmdq(g); 1094 } 1095 1096 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 1097 { 1098 VirtIOGPU *g = VIRTIO_GPU(vdev); 1099 struct virtio_gpu_ctrl_command *cmd; 1100 1101 if (!virtio_queue_ready(vq)) { 1102 return; 1103 } 1104 1105 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1106 while (cmd) { 1107 cmd->vq = vq; 1108 cmd->error = 0; 1109 cmd->finished = false; 1110 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 1111 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 1112 } 1113 1114 virtio_gpu_process_cmdq(g); 1115 } 1116 1117 static void virtio_gpu_ctrl_bh(void *opaque) 1118 { 1119 VirtIOGPU *g = opaque; 1120 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 1121 1122 vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq); 1123 } 1124 1125 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 1126 { 1127 VirtIOGPU *g = VIRTIO_GPU(vdev); 1128 VirtQueueElement *elem; 1129 size_t s; 1130 struct virtio_gpu_update_cursor cursor_info; 1131 1132 if (!virtio_queue_ready(vq)) { 1133 return; 1134 } 1135 for (;;) { 1136 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 1137 if (!elem) { 1138 break; 1139 } 1140 1141 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 1142 &cursor_info, sizeof(cursor_info)); 1143 if (s != sizeof(cursor_info)) { 1144 qemu_log_mask(LOG_GUEST_ERROR, 1145 "%s: cursor size incorrect %zu vs %zu\n", 1146 __func__, s, sizeof(cursor_info)); 1147 } else { 1148 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 1149 update_cursor(g, &cursor_info); 1150 } 1151 virtqueue_push(vq, elem, 0); 1152 virtio_notify(vdev, vq); 1153 g_free(elem); 1154 } 1155 } 1156 1157 static void virtio_gpu_cursor_bh(void *opaque) 1158 { 1159 VirtIOGPU *g = opaque; 1160 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); 1161 } 1162 1163 static const VMStateDescription vmstate_virtio_gpu_scanout = { 1164 .name = "virtio-gpu-one-scanout", 1165 .version_id = 1, 1166 .fields = (VMStateField[]) { 1167 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1168 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1169 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1170 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1171 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1172 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1173 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1174 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1175 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1176 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1177 VMSTATE_END_OF_LIST() 1178 }, 1179 }; 1180 1181 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1182 .name = "virtio-gpu-scanouts", 1183 .version_id = 1, 1184 .fields = (VMStateField[]) { 1185 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), 1186 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, 1187 struct VirtIOGPU, NULL), 1188 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, 1189 parent_obj.conf.max_outputs, 1, 1190 vmstate_virtio_gpu_scanout, 1191 struct virtio_gpu_scanout), 1192 VMSTATE_END_OF_LIST() 1193 }, 1194 }; 1195 1196 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1197 const VMStateField *field, JSONWriter *vmdesc) 1198 { 1199 VirtIOGPU *g = opaque; 1200 struct virtio_gpu_simple_resource *res; 1201 int i; 1202 1203 /* in 2d mode we should never find unprocessed commands here */ 1204 assert(QTAILQ_EMPTY(&g->cmdq)); 1205 1206 QTAILQ_FOREACH(res, &g->reslist, next) { 1207 qemu_put_be32(f, res->resource_id); 1208 qemu_put_be32(f, res->width); 1209 qemu_put_be32(f, res->height); 1210 qemu_put_be32(f, res->format); 1211 qemu_put_be32(f, res->iov_cnt); 1212 for (i = 0; i < res->iov_cnt; i++) { 1213 qemu_put_be64(f, res->addrs[i]); 1214 qemu_put_be32(f, res->iov[i].iov_len); 1215 } 1216 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1217 pixman_image_get_stride(res->image) * res->height); 1218 } 1219 qemu_put_be32(f, 0); /* end of list */ 1220 1221 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1222 } 1223 1224 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1225 const VMStateField *field) 1226 { 1227 VirtIOGPU *g = opaque; 1228 struct virtio_gpu_simple_resource *res; 1229 struct virtio_gpu_scanout *scanout; 1230 uint32_t resource_id, pformat; 1231 int i; 1232 1233 g->hostmem = 0; 1234 1235 resource_id = qemu_get_be32(f); 1236 while (resource_id != 0) { 1237 res = virtio_gpu_find_resource(g, resource_id); 1238 if (res) { 1239 return -EINVAL; 1240 } 1241 1242 res = g_new0(struct virtio_gpu_simple_resource, 1); 1243 res->resource_id = resource_id; 1244 res->width = qemu_get_be32(f); 1245 res->height = qemu_get_be32(f); 1246 res->format = qemu_get_be32(f); 1247 res->iov_cnt = qemu_get_be32(f); 1248 1249 /* allocate */ 1250 pformat = virtio_gpu_get_pixman_format(res->format); 1251 if (!pformat) { 1252 g_free(res); 1253 return -EINVAL; 1254 } 1255 res->image = pixman_image_create_bits(pformat, 1256 res->width, res->height, 1257 NULL, 0); 1258 if (!res->image) { 1259 g_free(res); 1260 return -EINVAL; 1261 } 1262 1263 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1264 1265 res->addrs = g_new(uint64_t, res->iov_cnt); 1266 res->iov = g_new(struct iovec, res->iov_cnt); 1267 1268 /* read data */ 1269 for (i = 0; i < res->iov_cnt; i++) { 1270 res->addrs[i] = qemu_get_be64(f); 1271 res->iov[i].iov_len = qemu_get_be32(f); 1272 } 1273 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1274 pixman_image_get_stride(res->image) * res->height); 1275 1276 /* restore mapping */ 1277 for (i = 0; i < res->iov_cnt; i++) { 1278 hwaddr len = res->iov[i].iov_len; 1279 res->iov[i].iov_base = 1280 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len, 1281 DMA_DIRECTION_TO_DEVICE, 1282 MEMTXATTRS_UNSPECIFIED); 1283 1284 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1285 /* Clean up the half-a-mapping we just created... */ 1286 if (res->iov[i].iov_base) { 1287 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 1288 res->iov[i].iov_base, 1289 len, 1290 DMA_DIRECTION_TO_DEVICE, 1291 0); 1292 } 1293 /* ...and the mappings for previous loop iterations */ 1294 res->iov_cnt = i; 1295 virtio_gpu_cleanup_mapping(g, res); 1296 pixman_image_unref(res->image); 1297 g_free(res); 1298 return -EINVAL; 1299 } 1300 } 1301 1302 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1303 g->hostmem += res->hostmem; 1304 1305 resource_id = qemu_get_be32(f); 1306 } 1307 1308 /* load & apply scanout state */ 1309 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1310 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1311 /* FIXME: should take scanout.r.{x,y} into account */ 1312 scanout = &g->parent_obj.scanout[i]; 1313 if (!scanout->resource_id) { 1314 continue; 1315 } 1316 res = virtio_gpu_find_resource(g, scanout->resource_id); 1317 if (!res) { 1318 return -EINVAL; 1319 } 1320 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1321 if (!scanout->ds) { 1322 return -EINVAL; 1323 } 1324 1325 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1326 dpy_gfx_update_full(scanout->con); 1327 if (scanout->cursor.resource_id) { 1328 update_cursor(g, &scanout->cursor); 1329 } 1330 res->scanout_bitmask |= (1 << i); 1331 } 1332 1333 return 0; 1334 } 1335 1336 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1337 { 1338 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1339 VirtIOGPU *g = VIRTIO_GPU(qdev); 1340 1341 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1342 if (!virtio_gpu_have_udmabuf()) { 1343 error_setg(errp, "cannot enable blob resources without udmabuf"); 1344 return; 1345 } 1346 1347 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { 1348 error_setg(errp, "blobs and virgl are not compatible (yet)"); 1349 return; 1350 } 1351 } 1352 1353 if (!virtio_gpu_base_device_realize(qdev, 1354 virtio_gpu_handle_ctrl_cb, 1355 virtio_gpu_handle_cursor_cb, 1356 errp)) { 1357 return; 1358 } 1359 1360 g->ctrl_vq = virtio_get_queue(vdev, 0); 1361 g->cursor_vq = virtio_get_queue(vdev, 1); 1362 g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g, 1363 &qdev->mem_reentrancy_guard); 1364 g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g, 1365 &qdev->mem_reentrancy_guard); 1366 QTAILQ_INIT(&g->reslist); 1367 QTAILQ_INIT(&g->cmdq); 1368 QTAILQ_INIT(&g->fenceq); 1369 } 1370 1371 void virtio_gpu_reset(VirtIODevice *vdev) 1372 { 1373 VirtIOGPU *g = VIRTIO_GPU(vdev); 1374 struct virtio_gpu_simple_resource *res, *tmp; 1375 struct virtio_gpu_ctrl_command *cmd; 1376 1377 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1378 virtio_gpu_resource_destroy(g, res); 1379 } 1380 1381 while (!QTAILQ_EMPTY(&g->cmdq)) { 1382 cmd = QTAILQ_FIRST(&g->cmdq); 1383 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1384 g_free(cmd); 1385 } 1386 1387 while (!QTAILQ_EMPTY(&g->fenceq)) { 1388 cmd = QTAILQ_FIRST(&g->fenceq); 1389 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1390 g->inflight--; 1391 g_free(cmd); 1392 } 1393 1394 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); 1395 } 1396 1397 static void 1398 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 1399 { 1400 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1401 1402 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 1403 } 1404 1405 static void 1406 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 1407 { 1408 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1409 const struct virtio_gpu_config *vgconfig = 1410 (const struct virtio_gpu_config *)config; 1411 1412 if (vgconfig->events_clear) { 1413 g->virtio_config.events_read &= ~vgconfig->events_clear; 1414 } 1415 } 1416 1417 /* 1418 * For historical reasons virtio_gpu does not adhere to virtio migration 1419 * scheme as described in doc/virtio-migration.txt, in a sense that no 1420 * save/load callback are provided to the core. Instead the device data 1421 * is saved/loaded after the core data. 1422 * 1423 * Because of this we need a special vmsd. 1424 */ 1425 static const VMStateDescription vmstate_virtio_gpu = { 1426 .name = "virtio-gpu", 1427 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1428 .version_id = VIRTIO_GPU_VM_VERSION, 1429 .fields = (VMStateField[]) { 1430 VMSTATE_VIRTIO_DEVICE /* core */, 1431 { 1432 .name = "virtio-gpu", 1433 .info = &(const VMStateInfo) { 1434 .name = "virtio-gpu", 1435 .get = virtio_gpu_load, 1436 .put = virtio_gpu_save, 1437 }, 1438 .flags = VMS_SINGLE, 1439 } /* device */, 1440 VMSTATE_END_OF_LIST() 1441 }, 1442 }; 1443 1444 static Property virtio_gpu_properties[] = { 1445 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), 1446 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 1447 256 * MiB), 1448 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags, 1449 VIRTIO_GPU_FLAG_BLOB_ENABLED, false), 1450 DEFINE_PROP_END_OF_LIST(), 1451 }; 1452 1453 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1454 { 1455 DeviceClass *dc = DEVICE_CLASS(klass); 1456 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1457 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); 1458 VirtIOGPUBaseClass *vgbc = &vgc->parent; 1459 1460 vgc->handle_ctrl = virtio_gpu_handle_ctrl; 1461 vgc->process_cmd = virtio_gpu_simple_process_cmd; 1462 vgc->update_cursor_data = virtio_gpu_update_cursor_data; 1463 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed; 1464 1465 vdc->realize = virtio_gpu_device_realize; 1466 vdc->reset = virtio_gpu_reset; 1467 vdc->get_config = virtio_gpu_get_config; 1468 vdc->set_config = virtio_gpu_set_config; 1469 1470 dc->vmsd = &vmstate_virtio_gpu; 1471 device_class_set_props(dc, virtio_gpu_properties); 1472 } 1473 1474 static const TypeInfo virtio_gpu_info = { 1475 .name = TYPE_VIRTIO_GPU, 1476 .parent = TYPE_VIRTIO_GPU_BASE, 1477 .instance_size = sizeof(VirtIOGPU), 1478 .class_size = sizeof(VirtIOGPUClass), 1479 .class_init = virtio_gpu_class_init, 1480 }; 1481 module_obj(TYPE_VIRTIO_GPU); 1482 module_kconfig(VIRTIO_GPU); 1483 1484 static void virtio_register_types(void) 1485 { 1486 type_register_static(&virtio_gpu_info); 1487 } 1488 1489 type_init(virtio_register_types) 1490