1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "sysemu/dma.h" 20 #include "sysemu/sysemu.h" 21 #include "hw/virtio/virtio.h" 22 #include "migration/qemu-file-types.h" 23 #include "hw/virtio/virtio-gpu.h" 24 #include "hw/virtio/virtio-gpu-bswap.h" 25 #include "hw/virtio/virtio-gpu-pixman.h" 26 #include "hw/virtio/virtio-bus.h" 27 #include "hw/display/edid.h" 28 #include "hw/qdev-properties.h" 29 #include "qemu/log.h" 30 #include "qemu/module.h" 31 #include "qapi/error.h" 32 #include "qemu/error-report.h" 33 34 #define VIRTIO_GPU_VM_VERSION 1 35 36 static struct virtio_gpu_simple_resource* 37 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 38 static struct virtio_gpu_simple_resource * 39 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 40 bool require_backing, 41 const char *caller, uint32_t *error); 42 43 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 44 struct virtio_gpu_simple_resource *res); 45 46 void virtio_gpu_update_cursor_data(VirtIOGPU *g, 47 struct virtio_gpu_scanout *s, 48 uint32_t resource_id) 49 { 50 struct virtio_gpu_simple_resource *res; 51 uint32_t pixels; 52 53 res = virtio_gpu_find_check_resource(g, resource_id, false, 54 __func__, NULL); 55 if (!res) { 56 return; 57 } 58 59 if (pixman_image_get_width(res->image) != s->current_cursor->width || 60 pixman_image_get_height(res->image) != s->current_cursor->height) { 61 return; 62 } 63 64 pixels = s->current_cursor->width * s->current_cursor->height; 65 memcpy(s->current_cursor->data, 66 pixman_image_get_data(res->image), 67 pixels * sizeof(uint32_t)); 68 } 69 70 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 71 { 72 struct virtio_gpu_scanout *s; 73 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 74 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 75 76 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { 77 return; 78 } 79 s = &g->parent_obj.scanout[cursor->pos.scanout_id]; 80 81 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 82 cursor->pos.x, 83 cursor->pos.y, 84 move ? "move" : "update", 85 cursor->resource_id); 86 87 if (!move) { 88 if (!s->current_cursor) { 89 s->current_cursor = cursor_alloc(64, 64); 90 } 91 92 s->current_cursor->hot_x = cursor->hot_x; 93 s->current_cursor->hot_y = cursor->hot_y; 94 95 if (cursor->resource_id > 0) { 96 vgc->update_cursor_data(g, s, cursor->resource_id); 97 } 98 dpy_cursor_define(s->con, s->current_cursor); 99 100 s->cursor = *cursor; 101 } else { 102 s->cursor.pos.x = cursor->pos.x; 103 s->cursor.pos.y = cursor->pos.y; 104 } 105 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 106 cursor->resource_id ? 1 : 0); 107 } 108 109 static struct virtio_gpu_simple_resource * 110 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 111 { 112 struct virtio_gpu_simple_resource *res; 113 114 QTAILQ_FOREACH(res, &g->reslist, next) { 115 if (res->resource_id == resource_id) { 116 return res; 117 } 118 } 119 return NULL; 120 } 121 122 static struct virtio_gpu_simple_resource * 123 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, 124 bool require_backing, 125 const char *caller, uint32_t *error) 126 { 127 struct virtio_gpu_simple_resource *res; 128 129 res = virtio_gpu_find_resource(g, resource_id); 130 if (!res) { 131 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n", 132 caller, resource_id); 133 if (error) { 134 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 135 } 136 return NULL; 137 } 138 139 if (require_backing) { 140 if (!res->iov || (!res->image && !res->blob)) { 141 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n", 142 caller, resource_id); 143 if (error) { 144 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 145 } 146 return NULL; 147 } 148 } 149 150 return res; 151 } 152 153 void virtio_gpu_ctrl_response(VirtIOGPU *g, 154 struct virtio_gpu_ctrl_command *cmd, 155 struct virtio_gpu_ctrl_hdr *resp, 156 size_t resp_len) 157 { 158 size_t s; 159 160 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 161 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 162 resp->fence_id = cmd->cmd_hdr.fence_id; 163 resp->ctx_id = cmd->cmd_hdr.ctx_id; 164 } 165 virtio_gpu_ctrl_hdr_bswap(resp); 166 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 167 if (s != resp_len) { 168 qemu_log_mask(LOG_GUEST_ERROR, 169 "%s: response size incorrect %zu vs %zu\n", 170 __func__, s, resp_len); 171 } 172 virtqueue_push(cmd->vq, &cmd->elem, s); 173 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 174 cmd->finished = true; 175 } 176 177 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 178 struct virtio_gpu_ctrl_command *cmd, 179 enum virtio_gpu_ctrl_type type) 180 { 181 struct virtio_gpu_ctrl_hdr resp; 182 183 memset(&resp, 0, sizeof(resp)); 184 resp.type = type; 185 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 186 } 187 188 void virtio_gpu_get_display_info(VirtIOGPU *g, 189 struct virtio_gpu_ctrl_command *cmd) 190 { 191 struct virtio_gpu_resp_display_info display_info; 192 193 trace_virtio_gpu_cmd_get_display_info(); 194 memset(&display_info, 0, sizeof(display_info)); 195 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 196 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); 197 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 198 sizeof(display_info)); 199 } 200 201 static void 202 virtio_gpu_generate_edid(VirtIOGPU *g, int scanout, 203 struct virtio_gpu_resp_edid *edid) 204 { 205 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 206 qemu_edid_info info = { 207 .width_mm = b->req_state[scanout].width_mm, 208 .height_mm = b->req_state[scanout].height_mm, 209 .prefx = b->req_state[scanout].width, 210 .prefy = b->req_state[scanout].height, 211 }; 212 213 edid->size = cpu_to_le32(sizeof(edid->edid)); 214 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info); 215 } 216 217 void virtio_gpu_get_edid(VirtIOGPU *g, 218 struct virtio_gpu_ctrl_command *cmd) 219 { 220 struct virtio_gpu_resp_edid edid; 221 struct virtio_gpu_cmd_get_edid get_edid; 222 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 223 224 VIRTIO_GPU_FILL_CMD(get_edid); 225 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 226 227 if (get_edid.scanout >= b->conf.max_outputs) { 228 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 229 return; 230 } 231 232 trace_virtio_gpu_cmd_get_edid(get_edid.scanout); 233 memset(&edid, 0, sizeof(edid)); 234 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; 235 virtio_gpu_generate_edid(g, get_edid.scanout, &edid); 236 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); 237 } 238 239 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 240 uint32_t width, uint32_t height) 241 { 242 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 243 * pixman_image_create_bits will fail in case it overflow. 244 */ 245 246 int bpp = PIXMAN_FORMAT_BPP(pformat); 247 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 248 return height * stride; 249 } 250 251 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 252 struct virtio_gpu_ctrl_command *cmd) 253 { 254 pixman_format_code_t pformat; 255 struct virtio_gpu_simple_resource *res; 256 struct virtio_gpu_resource_create_2d c2d; 257 258 VIRTIO_GPU_FILL_CMD(c2d); 259 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 260 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 261 c2d.width, c2d.height); 262 263 if (c2d.resource_id == 0) { 264 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 265 __func__); 266 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 267 return; 268 } 269 270 res = virtio_gpu_find_resource(g, c2d.resource_id); 271 if (res) { 272 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 273 __func__, c2d.resource_id); 274 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 275 return; 276 } 277 278 res = g_new0(struct virtio_gpu_simple_resource, 1); 279 280 res->width = c2d.width; 281 res->height = c2d.height; 282 res->format = c2d.format; 283 res->resource_id = c2d.resource_id; 284 285 pformat = virtio_gpu_get_pixman_format(c2d.format); 286 if (!pformat) { 287 qemu_log_mask(LOG_GUEST_ERROR, 288 "%s: host couldn't handle guest format %d\n", 289 __func__, c2d.format); 290 g_free(res); 291 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 292 return; 293 } 294 295 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 296 if (res->hostmem + g->hostmem < g->conf_max_hostmem) { 297 res->image = pixman_image_create_bits(pformat, 298 c2d.width, 299 c2d.height, 300 NULL, 0); 301 } 302 303 if (!res->image) { 304 qemu_log_mask(LOG_GUEST_ERROR, 305 "%s: resource creation failed %d %d %d\n", 306 __func__, c2d.resource_id, c2d.width, c2d.height); 307 g_free(res); 308 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 309 return; 310 } 311 312 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 313 g->hostmem += res->hostmem; 314 } 315 316 static void virtio_gpu_resource_create_blob(VirtIOGPU *g, 317 struct virtio_gpu_ctrl_command *cmd) 318 { 319 struct virtio_gpu_simple_resource *res; 320 struct virtio_gpu_resource_create_blob cblob; 321 int ret; 322 323 VIRTIO_GPU_FILL_CMD(cblob); 324 virtio_gpu_create_blob_bswap(&cblob); 325 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); 326 327 if (cblob.resource_id == 0) { 328 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 329 __func__); 330 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 331 return; 332 } 333 334 res = virtio_gpu_find_resource(g, cblob.resource_id); 335 if (res) { 336 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 337 __func__, cblob.resource_id); 338 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 339 return; 340 } 341 342 res = g_new0(struct virtio_gpu_simple_resource, 1); 343 res->resource_id = cblob.resource_id; 344 res->blob_size = cblob.size; 345 346 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST && 347 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) { 348 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n", 349 __func__); 350 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 351 g_free(res); 352 return; 353 } 354 355 if (res->iov) { 356 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 357 return; 358 } 359 360 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), 361 cmd, &res->addrs, &res->iov, 362 &res->iov_cnt); 363 if (ret != 0) { 364 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 365 return; 366 } 367 368 virtio_gpu_init_udmabuf(res); 369 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 370 } 371 372 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 373 { 374 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 375 struct virtio_gpu_simple_resource *res; 376 377 if (scanout->resource_id == 0) { 378 return; 379 } 380 381 res = virtio_gpu_find_resource(g, scanout->resource_id); 382 if (res) { 383 res->scanout_bitmask &= ~(1 << scanout_id); 384 } 385 386 dpy_gfx_replace_surface(scanout->con, NULL); 387 scanout->resource_id = 0; 388 scanout->ds = NULL; 389 scanout->width = 0; 390 scanout->height = 0; 391 } 392 393 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 394 struct virtio_gpu_simple_resource *res) 395 { 396 int i; 397 398 if (res->scanout_bitmask) { 399 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 400 if (res->scanout_bitmask & (1 << i)) { 401 virtio_gpu_disable_scanout(g, i); 402 } 403 } 404 } 405 406 pixman_image_unref(res->image); 407 virtio_gpu_cleanup_mapping(g, res); 408 QTAILQ_REMOVE(&g->reslist, res, next); 409 g->hostmem -= res->hostmem; 410 g_free(res); 411 } 412 413 static void virtio_gpu_resource_unref(VirtIOGPU *g, 414 struct virtio_gpu_ctrl_command *cmd) 415 { 416 struct virtio_gpu_simple_resource *res; 417 struct virtio_gpu_resource_unref unref; 418 419 VIRTIO_GPU_FILL_CMD(unref); 420 virtio_gpu_bswap_32(&unref, sizeof(unref)); 421 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 422 423 res = virtio_gpu_find_resource(g, unref.resource_id); 424 if (!res) { 425 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 426 __func__, unref.resource_id); 427 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 428 return; 429 } 430 virtio_gpu_resource_destroy(g, res); 431 } 432 433 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 434 struct virtio_gpu_ctrl_command *cmd) 435 { 436 struct virtio_gpu_simple_resource *res; 437 int h; 438 uint32_t src_offset, dst_offset, stride; 439 int bpp; 440 pixman_format_code_t format; 441 struct virtio_gpu_transfer_to_host_2d t2d; 442 443 VIRTIO_GPU_FILL_CMD(t2d); 444 virtio_gpu_t2d_bswap(&t2d); 445 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 446 447 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true, 448 __func__, &cmd->error); 449 if (!res || res->blob) { 450 return; 451 } 452 453 if (t2d.r.x > res->width || 454 t2d.r.y > res->height || 455 t2d.r.width > res->width || 456 t2d.r.height > res->height || 457 t2d.r.x + t2d.r.width > res->width || 458 t2d.r.y + t2d.r.height > res->height) { 459 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 460 " bounds for resource %d: %d %d %d %d vs %d %d\n", 461 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 462 t2d.r.width, t2d.r.height, res->width, res->height); 463 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 464 return; 465 } 466 467 format = pixman_image_get_format(res->image); 468 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 469 stride = pixman_image_get_stride(res->image); 470 471 if (t2d.offset || t2d.r.x || t2d.r.y || 472 t2d.r.width != pixman_image_get_width(res->image)) { 473 void *img_data = pixman_image_get_data(res->image); 474 for (h = 0; h < t2d.r.height; h++) { 475 src_offset = t2d.offset + stride * h; 476 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 477 478 iov_to_buf(res->iov, res->iov_cnt, src_offset, 479 (uint8_t *)img_data 480 + dst_offset, t2d.r.width * bpp); 481 } 482 } else { 483 iov_to_buf(res->iov, res->iov_cnt, 0, 484 pixman_image_get_data(res->image), 485 pixman_image_get_stride(res->image) 486 * pixman_image_get_height(res->image)); 487 } 488 } 489 490 static void virtio_gpu_resource_flush(VirtIOGPU *g, 491 struct virtio_gpu_ctrl_command *cmd) 492 { 493 struct virtio_gpu_simple_resource *res; 494 struct virtio_gpu_resource_flush rf; 495 pixman_region16_t flush_region; 496 int i; 497 498 VIRTIO_GPU_FILL_CMD(rf); 499 virtio_gpu_bswap_32(&rf, sizeof(rf)); 500 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 501 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 502 503 res = virtio_gpu_find_check_resource(g, rf.resource_id, false, 504 __func__, &cmd->error); 505 if (!res || res->blob) { 506 return; 507 } 508 509 if (rf.r.x > res->width || 510 rf.r.y > res->height || 511 rf.r.width > res->width || 512 rf.r.height > res->height || 513 rf.r.x + rf.r.width > res->width || 514 rf.r.y + rf.r.height > res->height) { 515 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 516 " bounds for resource %d: %d %d %d %d vs %d %d\n", 517 __func__, rf.resource_id, rf.r.x, rf.r.y, 518 rf.r.width, rf.r.height, res->width, res->height); 519 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 520 return; 521 } 522 523 pixman_region_init_rect(&flush_region, 524 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 525 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 526 struct virtio_gpu_scanout *scanout; 527 pixman_region16_t region, finalregion; 528 pixman_box16_t *extents; 529 530 if (!(res->scanout_bitmask & (1 << i))) { 531 continue; 532 } 533 scanout = &g->parent_obj.scanout[i]; 534 535 pixman_region_init(&finalregion); 536 pixman_region_init_rect(®ion, scanout->x, scanout->y, 537 scanout->width, scanout->height); 538 539 pixman_region_intersect(&finalregion, &flush_region, ®ion); 540 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 541 extents = pixman_region_extents(&finalregion); 542 /* work out the area we need to update for each console */ 543 dpy_gfx_update(g->parent_obj.scanout[i].con, 544 extents->x1, extents->y1, 545 extents->x2 - extents->x1, 546 extents->y2 - extents->y1); 547 548 pixman_region_fini(®ion); 549 pixman_region_fini(&finalregion); 550 } 551 pixman_region_fini(&flush_region); 552 } 553 554 static void virtio_unref_resource(pixman_image_t *image, void *data) 555 { 556 pixman_image_unref(data); 557 } 558 559 static void virtio_gpu_do_set_scanout(VirtIOGPU *g, 560 uint32_t scanout_id, 561 struct virtio_gpu_framebuffer *fb, 562 struct virtio_gpu_simple_resource *res, 563 struct virtio_gpu_rect *r, 564 uint32_t *error) 565 { 566 struct virtio_gpu_simple_resource *ores; 567 struct virtio_gpu_scanout *scanout; 568 uint8_t *data; 569 570 if (scanout_id >= g->parent_obj.conf.max_outputs) { 571 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 572 __func__, scanout_id); 573 *error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 574 return; 575 } 576 scanout = &g->parent_obj.scanout[scanout_id]; 577 578 if (r->x > fb->width || 579 r->y > fb->height || 580 r->width < 16 || 581 r->height < 16 || 582 r->width > fb->width || 583 r->height > fb->height || 584 r->x + r->width > fb->width || 585 r->y + r->height > fb->height) { 586 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 587 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", 588 __func__, scanout_id, res->resource_id, 589 r->x, r->y, r->width, r->height, 590 fb->width, fb->height); 591 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 592 return; 593 } 594 595 g->parent_obj.enable = 1; 596 data = (uint8_t *)pixman_image_get_data(res->image); 597 598 /* create a surface for this scanout */ 599 if (!scanout->ds || 600 surface_data(scanout->ds) != data + fb->offset || 601 scanout->width != r->width || 602 scanout->height != r->height) { 603 pixman_image_t *rect; 604 void *ptr = data + fb->offset; 605 rect = pixman_image_create_bits(fb->format, r->width, r->height, 606 ptr, fb->stride); 607 608 if (res->image) { 609 pixman_image_ref(res->image); 610 pixman_image_set_destroy_function(rect, virtio_unref_resource, 611 res->image); 612 } 613 614 /* realloc the surface ptr */ 615 scanout->ds = qemu_create_displaysurface_pixman(rect); 616 if (!scanout->ds) { 617 *error = VIRTIO_GPU_RESP_ERR_UNSPEC; 618 return; 619 } 620 621 pixman_image_unref(rect); 622 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, 623 scanout->ds); 624 } 625 626 ores = virtio_gpu_find_resource(g, scanout->resource_id); 627 if (ores) { 628 ores->scanout_bitmask &= ~(1 << scanout_id); 629 } 630 631 res->scanout_bitmask |= (1 << scanout_id); 632 scanout->resource_id = res->resource_id; 633 scanout->x = r->x; 634 scanout->y = r->y; 635 scanout->width = r->width; 636 scanout->height = r->height; 637 } 638 639 static void virtio_gpu_set_scanout(VirtIOGPU *g, 640 struct virtio_gpu_ctrl_command *cmd) 641 { 642 struct virtio_gpu_simple_resource *res; 643 struct virtio_gpu_framebuffer fb = { 0 }; 644 struct virtio_gpu_set_scanout ss; 645 646 VIRTIO_GPU_FILL_CMD(ss); 647 virtio_gpu_bswap_32(&ss, sizeof(ss)); 648 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 649 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 650 651 if (ss.resource_id == 0) { 652 virtio_gpu_disable_scanout(g, ss.scanout_id); 653 return; 654 } 655 656 res = virtio_gpu_find_check_resource(g, ss.resource_id, true, 657 __func__, &cmd->error); 658 if (!res) { 659 return; 660 } 661 662 fb.format = pixman_image_get_format(res->image); 663 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); 664 fb.width = pixman_image_get_width(res->image); 665 fb.height = pixman_image_get_height(res->image); 666 fb.stride = pixman_image_get_stride(res->image); 667 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; 668 669 virtio_gpu_do_set_scanout(g, ss.scanout_id, 670 &fb, res, &ss.r, &cmd->error); 671 } 672 673 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 674 uint32_t nr_entries, uint32_t offset, 675 struct virtio_gpu_ctrl_command *cmd, 676 uint64_t **addr, struct iovec **iov, 677 uint32_t *niov) 678 { 679 struct virtio_gpu_mem_entry *ents; 680 size_t esize, s; 681 int e, v; 682 683 if (nr_entries > 16384) { 684 qemu_log_mask(LOG_GUEST_ERROR, 685 "%s: nr_entries is too big (%d > 16384)\n", 686 __func__, nr_entries); 687 return -1; 688 } 689 690 esize = sizeof(*ents) * nr_entries; 691 ents = g_malloc(esize); 692 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 693 offset, ents, esize); 694 if (s != esize) { 695 qemu_log_mask(LOG_GUEST_ERROR, 696 "%s: command data size incorrect %zu vs %zu\n", 697 __func__, s, esize); 698 g_free(ents); 699 return -1; 700 } 701 702 *iov = NULL; 703 if (addr) { 704 *addr = NULL; 705 } 706 for (e = 0, v = 0; e < nr_entries; e++) { 707 uint64_t a = le64_to_cpu(ents[e].addr); 708 uint32_t l = le32_to_cpu(ents[e].length); 709 hwaddr len; 710 void *map; 711 712 do { 713 len = l; 714 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, 715 a, &len, DMA_DIRECTION_TO_DEVICE); 716 if (!map) { 717 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 718 " element %d\n", __func__, e); 719 virtio_gpu_cleanup_mapping_iov(g, *iov, v); 720 g_free(ents); 721 *iov = NULL; 722 if (addr) { 723 g_free(*addr); 724 *addr = NULL; 725 } 726 return -1; 727 } 728 729 if (!(v % 16)) { 730 *iov = g_realloc(*iov, sizeof(struct iovec) * (v + 16)); 731 if (addr) { 732 *addr = g_realloc(*addr, sizeof(uint64_t) * (v + 16)); 733 } 734 } 735 (*iov)[v].iov_base = map; 736 (*iov)[v].iov_len = len; 737 if (addr) { 738 (*addr)[v] = a; 739 } 740 741 a += len; 742 l -= len; 743 v += 1; 744 } while (l > 0); 745 } 746 *niov = v; 747 748 g_free(ents); 749 return 0; 750 } 751 752 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 753 struct iovec *iov, uint32_t count) 754 { 755 int i; 756 757 for (i = 0; i < count; i++) { 758 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 759 iov[i].iov_base, iov[i].iov_len, 760 DMA_DIRECTION_TO_DEVICE, 761 iov[i].iov_len); 762 } 763 g_free(iov); 764 } 765 766 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 767 struct virtio_gpu_simple_resource *res) 768 { 769 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 770 res->iov = NULL; 771 res->iov_cnt = 0; 772 g_free(res->addrs); 773 res->addrs = NULL; 774 775 if (res->blob) { 776 virtio_gpu_fini_udmabuf(res); 777 } 778 } 779 780 static void 781 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 782 struct virtio_gpu_ctrl_command *cmd) 783 { 784 struct virtio_gpu_simple_resource *res; 785 struct virtio_gpu_resource_attach_backing ab; 786 int ret; 787 788 VIRTIO_GPU_FILL_CMD(ab); 789 virtio_gpu_bswap_32(&ab, sizeof(ab)); 790 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 791 792 res = virtio_gpu_find_resource(g, ab.resource_id); 793 if (!res) { 794 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 795 __func__, ab.resource_id); 796 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 797 return; 798 } 799 800 if (res->iov) { 801 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 802 return; 803 } 804 805 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd, 806 &res->addrs, &res->iov, &res->iov_cnt); 807 if (ret != 0) { 808 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 809 return; 810 } 811 } 812 813 static void 814 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 815 struct virtio_gpu_ctrl_command *cmd) 816 { 817 struct virtio_gpu_simple_resource *res; 818 struct virtio_gpu_resource_detach_backing detach; 819 820 VIRTIO_GPU_FILL_CMD(detach); 821 virtio_gpu_bswap_32(&detach, sizeof(detach)); 822 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 823 824 res = virtio_gpu_find_check_resource(g, detach.resource_id, true, 825 __func__, &cmd->error); 826 if (!res) { 827 return; 828 } 829 virtio_gpu_cleanup_mapping(g, res); 830 } 831 832 void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 833 struct virtio_gpu_ctrl_command *cmd) 834 { 835 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 836 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 837 838 switch (cmd->cmd_hdr.type) { 839 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 840 virtio_gpu_get_display_info(g, cmd); 841 break; 842 case VIRTIO_GPU_CMD_GET_EDID: 843 virtio_gpu_get_edid(g, cmd); 844 break; 845 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 846 virtio_gpu_resource_create_2d(g, cmd); 847 break; 848 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: 849 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { 850 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 851 break; 852 } 853 virtio_gpu_resource_create_blob(g, cmd); 854 break; 855 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 856 virtio_gpu_resource_unref(g, cmd); 857 break; 858 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 859 virtio_gpu_resource_flush(g, cmd); 860 break; 861 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 862 virtio_gpu_transfer_to_host_2d(g, cmd); 863 break; 864 case VIRTIO_GPU_CMD_SET_SCANOUT: 865 virtio_gpu_set_scanout(g, cmd); 866 break; 867 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 868 virtio_gpu_resource_attach_backing(g, cmd); 869 break; 870 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 871 virtio_gpu_resource_detach_backing(g, cmd); 872 break; 873 default: 874 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 875 break; 876 } 877 if (!cmd->finished) { 878 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 879 VIRTIO_GPU_RESP_OK_NODATA); 880 } 881 } 882 883 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 884 { 885 VirtIOGPU *g = VIRTIO_GPU(vdev); 886 qemu_bh_schedule(g->ctrl_bh); 887 } 888 889 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 890 { 891 VirtIOGPU *g = VIRTIO_GPU(vdev); 892 qemu_bh_schedule(g->cursor_bh); 893 } 894 895 void virtio_gpu_process_cmdq(VirtIOGPU *g) 896 { 897 struct virtio_gpu_ctrl_command *cmd; 898 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 899 900 if (g->processing_cmdq) { 901 return; 902 } 903 g->processing_cmdq = true; 904 while (!QTAILQ_EMPTY(&g->cmdq)) { 905 cmd = QTAILQ_FIRST(&g->cmdq); 906 907 if (g->parent_obj.renderer_blocked) { 908 break; 909 } 910 911 /* process command */ 912 vgc->process_cmd(g, cmd); 913 914 QTAILQ_REMOVE(&g->cmdq, cmd, next); 915 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 916 g->stats.requests++; 917 } 918 919 if (!cmd->finished) { 920 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 921 g->inflight++; 922 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 923 if (g->stats.max_inflight < g->inflight) { 924 g->stats.max_inflight = g->inflight; 925 } 926 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 927 } 928 } else { 929 g_free(cmd); 930 } 931 } 932 g->processing_cmdq = false; 933 } 934 935 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 936 { 937 VirtIOGPU *g = VIRTIO_GPU(vdev); 938 struct virtio_gpu_ctrl_command *cmd; 939 940 if (!virtio_queue_ready(vq)) { 941 return; 942 } 943 944 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 945 while (cmd) { 946 cmd->vq = vq; 947 cmd->error = 0; 948 cmd->finished = false; 949 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 950 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 951 } 952 953 virtio_gpu_process_cmdq(g); 954 } 955 956 static void virtio_gpu_ctrl_bh(void *opaque) 957 { 958 VirtIOGPU *g = opaque; 959 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 960 961 vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq); 962 } 963 964 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 965 { 966 VirtIOGPU *g = VIRTIO_GPU(vdev); 967 VirtQueueElement *elem; 968 size_t s; 969 struct virtio_gpu_update_cursor cursor_info; 970 971 if (!virtio_queue_ready(vq)) { 972 return; 973 } 974 for (;;) { 975 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 976 if (!elem) { 977 break; 978 } 979 980 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 981 &cursor_info, sizeof(cursor_info)); 982 if (s != sizeof(cursor_info)) { 983 qemu_log_mask(LOG_GUEST_ERROR, 984 "%s: cursor size incorrect %zu vs %zu\n", 985 __func__, s, sizeof(cursor_info)); 986 } else { 987 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 988 update_cursor(g, &cursor_info); 989 } 990 virtqueue_push(vq, elem, 0); 991 virtio_notify(vdev, vq); 992 g_free(elem); 993 } 994 } 995 996 static void virtio_gpu_cursor_bh(void *opaque) 997 { 998 VirtIOGPU *g = opaque; 999 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); 1000 } 1001 1002 static const VMStateDescription vmstate_virtio_gpu_scanout = { 1003 .name = "virtio-gpu-one-scanout", 1004 .version_id = 1, 1005 .fields = (VMStateField[]) { 1006 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1007 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1008 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1009 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1010 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1011 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1012 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1013 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1014 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1015 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1016 VMSTATE_END_OF_LIST() 1017 }, 1018 }; 1019 1020 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1021 .name = "virtio-gpu-scanouts", 1022 .version_id = 1, 1023 .fields = (VMStateField[]) { 1024 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), 1025 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, 1026 struct VirtIOGPU, NULL), 1027 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, 1028 parent_obj.conf.max_outputs, 1, 1029 vmstate_virtio_gpu_scanout, 1030 struct virtio_gpu_scanout), 1031 VMSTATE_END_OF_LIST() 1032 }, 1033 }; 1034 1035 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1036 const VMStateField *field, JSONWriter *vmdesc) 1037 { 1038 VirtIOGPU *g = opaque; 1039 struct virtio_gpu_simple_resource *res; 1040 int i; 1041 1042 /* in 2d mode we should never find unprocessed commands here */ 1043 assert(QTAILQ_EMPTY(&g->cmdq)); 1044 1045 QTAILQ_FOREACH(res, &g->reslist, next) { 1046 qemu_put_be32(f, res->resource_id); 1047 qemu_put_be32(f, res->width); 1048 qemu_put_be32(f, res->height); 1049 qemu_put_be32(f, res->format); 1050 qemu_put_be32(f, res->iov_cnt); 1051 for (i = 0; i < res->iov_cnt; i++) { 1052 qemu_put_be64(f, res->addrs[i]); 1053 qemu_put_be32(f, res->iov[i].iov_len); 1054 } 1055 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1056 pixman_image_get_stride(res->image) * res->height); 1057 } 1058 qemu_put_be32(f, 0); /* end of list */ 1059 1060 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1061 } 1062 1063 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1064 const VMStateField *field) 1065 { 1066 VirtIOGPU *g = opaque; 1067 struct virtio_gpu_simple_resource *res; 1068 struct virtio_gpu_scanout *scanout; 1069 uint32_t resource_id, pformat; 1070 int i; 1071 1072 g->hostmem = 0; 1073 1074 resource_id = qemu_get_be32(f); 1075 while (resource_id != 0) { 1076 res = virtio_gpu_find_resource(g, resource_id); 1077 if (res) { 1078 return -EINVAL; 1079 } 1080 1081 res = g_new0(struct virtio_gpu_simple_resource, 1); 1082 res->resource_id = resource_id; 1083 res->width = qemu_get_be32(f); 1084 res->height = qemu_get_be32(f); 1085 res->format = qemu_get_be32(f); 1086 res->iov_cnt = qemu_get_be32(f); 1087 1088 /* allocate */ 1089 pformat = virtio_gpu_get_pixman_format(res->format); 1090 if (!pformat) { 1091 g_free(res); 1092 return -EINVAL; 1093 } 1094 res->image = pixman_image_create_bits(pformat, 1095 res->width, res->height, 1096 NULL, 0); 1097 if (!res->image) { 1098 g_free(res); 1099 return -EINVAL; 1100 } 1101 1102 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1103 1104 res->addrs = g_new(uint64_t, res->iov_cnt); 1105 res->iov = g_new(struct iovec, res->iov_cnt); 1106 1107 /* read data */ 1108 for (i = 0; i < res->iov_cnt; i++) { 1109 res->addrs[i] = qemu_get_be64(f); 1110 res->iov[i].iov_len = qemu_get_be32(f); 1111 } 1112 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1113 pixman_image_get_stride(res->image) * res->height); 1114 1115 /* restore mapping */ 1116 for (i = 0; i < res->iov_cnt; i++) { 1117 hwaddr len = res->iov[i].iov_len; 1118 res->iov[i].iov_base = 1119 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, 1120 res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE); 1121 1122 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1123 /* Clean up the half-a-mapping we just created... */ 1124 if (res->iov[i].iov_base) { 1125 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 1126 res->iov[i].iov_base, 1127 len, 1128 DMA_DIRECTION_TO_DEVICE, 1129 0); 1130 } 1131 /* ...and the mappings for previous loop iterations */ 1132 res->iov_cnt = i; 1133 virtio_gpu_cleanup_mapping(g, res); 1134 pixman_image_unref(res->image); 1135 g_free(res); 1136 return -EINVAL; 1137 } 1138 } 1139 1140 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1141 g->hostmem += res->hostmem; 1142 1143 resource_id = qemu_get_be32(f); 1144 } 1145 1146 /* load & apply scanout state */ 1147 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1148 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1149 scanout = &g->parent_obj.scanout[i]; 1150 if (!scanout->resource_id) { 1151 continue; 1152 } 1153 res = virtio_gpu_find_resource(g, scanout->resource_id); 1154 if (!res) { 1155 return -EINVAL; 1156 } 1157 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1158 if (!scanout->ds) { 1159 return -EINVAL; 1160 } 1161 1162 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1163 dpy_gfx_update_full(scanout->con); 1164 if (scanout->cursor.resource_id) { 1165 update_cursor(g, &scanout->cursor); 1166 } 1167 res->scanout_bitmask |= (1 << i); 1168 } 1169 1170 return 0; 1171 } 1172 1173 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1174 { 1175 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1176 VirtIOGPU *g = VIRTIO_GPU(qdev); 1177 1178 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { 1179 if (!virtio_gpu_have_udmabuf()) { 1180 error_setg(errp, "cannot enable blob resources without udmabuf"); 1181 return; 1182 } 1183 1184 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { 1185 error_setg(errp, "blobs and virgl are not compatible (yet)"); 1186 return; 1187 } 1188 } 1189 1190 if (!virtio_gpu_base_device_realize(qdev, 1191 virtio_gpu_handle_ctrl_cb, 1192 virtio_gpu_handle_cursor_cb, 1193 errp)) { 1194 return; 1195 } 1196 1197 g->ctrl_vq = virtio_get_queue(vdev, 0); 1198 g->cursor_vq = virtio_get_queue(vdev, 1); 1199 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 1200 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 1201 QTAILQ_INIT(&g->reslist); 1202 QTAILQ_INIT(&g->cmdq); 1203 QTAILQ_INIT(&g->fenceq); 1204 } 1205 1206 void virtio_gpu_reset(VirtIODevice *vdev) 1207 { 1208 VirtIOGPU *g = VIRTIO_GPU(vdev); 1209 struct virtio_gpu_simple_resource *res, *tmp; 1210 struct virtio_gpu_ctrl_command *cmd; 1211 1212 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1213 virtio_gpu_resource_destroy(g, res); 1214 } 1215 1216 while (!QTAILQ_EMPTY(&g->cmdq)) { 1217 cmd = QTAILQ_FIRST(&g->cmdq); 1218 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1219 g_free(cmd); 1220 } 1221 1222 while (!QTAILQ_EMPTY(&g->fenceq)) { 1223 cmd = QTAILQ_FIRST(&g->fenceq); 1224 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1225 g->inflight--; 1226 g_free(cmd); 1227 } 1228 1229 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); 1230 } 1231 1232 static void 1233 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 1234 { 1235 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1236 1237 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 1238 } 1239 1240 static void 1241 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 1242 { 1243 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1244 const struct virtio_gpu_config *vgconfig = 1245 (const struct virtio_gpu_config *)config; 1246 1247 if (vgconfig->events_clear) { 1248 g->virtio_config.events_read &= ~vgconfig->events_clear; 1249 } 1250 } 1251 1252 /* 1253 * For historical reasons virtio_gpu does not adhere to virtio migration 1254 * scheme as described in doc/virtio-migration.txt, in a sense that no 1255 * save/load callback are provided to the core. Instead the device data 1256 * is saved/loaded after the core data. 1257 * 1258 * Because of this we need a special vmsd. 1259 */ 1260 static const VMStateDescription vmstate_virtio_gpu = { 1261 .name = "virtio-gpu", 1262 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1263 .version_id = VIRTIO_GPU_VM_VERSION, 1264 .fields = (VMStateField[]) { 1265 VMSTATE_VIRTIO_DEVICE /* core */, 1266 { 1267 .name = "virtio-gpu", 1268 .info = &(const VMStateInfo) { 1269 .name = "virtio-gpu", 1270 .get = virtio_gpu_load, 1271 .put = virtio_gpu_save, 1272 }, 1273 .flags = VMS_SINGLE, 1274 } /* device */, 1275 VMSTATE_END_OF_LIST() 1276 }, 1277 }; 1278 1279 static Property virtio_gpu_properties[] = { 1280 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), 1281 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 1282 256 * MiB), 1283 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags, 1284 VIRTIO_GPU_FLAG_BLOB_ENABLED, false), 1285 DEFINE_PROP_END_OF_LIST(), 1286 }; 1287 1288 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1289 { 1290 DeviceClass *dc = DEVICE_CLASS(klass); 1291 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1292 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); 1293 1294 vgc->handle_ctrl = virtio_gpu_handle_ctrl; 1295 vgc->process_cmd = virtio_gpu_simple_process_cmd; 1296 vgc->update_cursor_data = virtio_gpu_update_cursor_data; 1297 1298 vdc->realize = virtio_gpu_device_realize; 1299 vdc->reset = virtio_gpu_reset; 1300 vdc->get_config = virtio_gpu_get_config; 1301 vdc->set_config = virtio_gpu_set_config; 1302 1303 dc->vmsd = &vmstate_virtio_gpu; 1304 device_class_set_props(dc, virtio_gpu_properties); 1305 } 1306 1307 static const TypeInfo virtio_gpu_info = { 1308 .name = TYPE_VIRTIO_GPU, 1309 .parent = TYPE_VIRTIO_GPU_BASE, 1310 .instance_size = sizeof(VirtIOGPU), 1311 .class_size = sizeof(VirtIOGPUClass), 1312 .class_init = virtio_gpu_class_init, 1313 }; 1314 1315 static void virtio_register_types(void) 1316 { 1317 type_register_static(&virtio_gpu_info); 1318 } 1319 1320 type_init(virtio_register_types) 1321