1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "sysemu/dma.h" 20 #include "hw/virtio/virtio.h" 21 #include "migration/qemu-file-types.h" 22 #include "hw/virtio/virtio-gpu.h" 23 #include "hw/virtio/virtio-gpu-bswap.h" 24 #include "hw/virtio/virtio-gpu-pixman.h" 25 #include "hw/virtio/virtio-bus.h" 26 #include "hw/display/edid.h" 27 #include "hw/qdev-properties.h" 28 #include "qemu/log.h" 29 #include "qemu/module.h" 30 #include "qapi/error.h" 31 #include "qemu/error-report.h" 32 33 #define VIRTIO_GPU_VM_VERSION 1 34 35 static struct virtio_gpu_simple_resource* 36 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 37 38 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 39 struct virtio_gpu_simple_resource *res); 40 41 #ifdef CONFIG_VIRGL 42 #include <virglrenderer.h> 43 #define VIRGL(_g, _virgl, _simple, ...) \ 44 do { \ 45 if (_g->parent_obj.use_virgl_renderer) { \ 46 _virgl(__VA_ARGS__); \ 47 } else { \ 48 _simple(__VA_ARGS__); \ 49 } \ 50 } while (0) 51 #else 52 #define VIRGL(_g, _virgl, _simple, ...) \ 53 do { \ 54 _simple(__VA_ARGS__); \ 55 } while (0) 56 #endif 57 58 static void update_cursor_data_simple(VirtIOGPU *g, 59 struct virtio_gpu_scanout *s, 60 uint32_t resource_id) 61 { 62 struct virtio_gpu_simple_resource *res; 63 uint32_t pixels; 64 65 res = virtio_gpu_find_resource(g, resource_id); 66 if (!res) { 67 return; 68 } 69 70 if (pixman_image_get_width(res->image) != s->current_cursor->width || 71 pixman_image_get_height(res->image) != s->current_cursor->height) { 72 return; 73 } 74 75 pixels = s->current_cursor->width * s->current_cursor->height; 76 memcpy(s->current_cursor->data, 77 pixman_image_get_data(res->image), 78 pixels * sizeof(uint32_t)); 79 } 80 81 #ifdef CONFIG_VIRGL 82 83 static void update_cursor_data_virgl(VirtIOGPU *g, 84 struct virtio_gpu_scanout *s, 85 uint32_t resource_id) 86 { 87 uint32_t width, height; 88 uint32_t pixels, *data; 89 90 data = virgl_renderer_get_cursor_data(resource_id, &width, &height); 91 if (!data) { 92 return; 93 } 94 95 if (width != s->current_cursor->width || 96 height != s->current_cursor->height) { 97 free(data); 98 return; 99 } 100 101 pixels = s->current_cursor->width * s->current_cursor->height; 102 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); 103 free(data); 104 } 105 106 #endif 107 108 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 109 { 110 struct virtio_gpu_scanout *s; 111 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 112 113 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { 114 return; 115 } 116 s = &g->parent_obj.scanout[cursor->pos.scanout_id]; 117 118 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 119 cursor->pos.x, 120 cursor->pos.y, 121 move ? "move" : "update", 122 cursor->resource_id); 123 124 if (!move) { 125 if (!s->current_cursor) { 126 s->current_cursor = cursor_alloc(64, 64); 127 } 128 129 s->current_cursor->hot_x = cursor->hot_x; 130 s->current_cursor->hot_y = cursor->hot_y; 131 132 if (cursor->resource_id > 0) { 133 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, 134 g, s, cursor->resource_id); 135 } 136 dpy_cursor_define(s->con, s->current_cursor); 137 138 s->cursor = *cursor; 139 } else { 140 s->cursor.pos.x = cursor->pos.x; 141 s->cursor.pos.y = cursor->pos.y; 142 } 143 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 144 cursor->resource_id ? 1 : 0); 145 } 146 147 static struct virtio_gpu_simple_resource * 148 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 149 { 150 struct virtio_gpu_simple_resource *res; 151 152 QTAILQ_FOREACH(res, &g->reslist, next) { 153 if (res->resource_id == resource_id) { 154 return res; 155 } 156 } 157 return NULL; 158 } 159 160 void virtio_gpu_ctrl_response(VirtIOGPU *g, 161 struct virtio_gpu_ctrl_command *cmd, 162 struct virtio_gpu_ctrl_hdr *resp, 163 size_t resp_len) 164 { 165 size_t s; 166 167 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 168 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 169 resp->fence_id = cmd->cmd_hdr.fence_id; 170 resp->ctx_id = cmd->cmd_hdr.ctx_id; 171 } 172 virtio_gpu_ctrl_hdr_bswap(resp); 173 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 174 if (s != resp_len) { 175 qemu_log_mask(LOG_GUEST_ERROR, 176 "%s: response size incorrect %zu vs %zu\n", 177 __func__, s, resp_len); 178 } 179 virtqueue_push(cmd->vq, &cmd->elem, s); 180 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 181 cmd->finished = true; 182 } 183 184 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 185 struct virtio_gpu_ctrl_command *cmd, 186 enum virtio_gpu_ctrl_type type) 187 { 188 struct virtio_gpu_ctrl_hdr resp; 189 190 memset(&resp, 0, sizeof(resp)); 191 resp.type = type; 192 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 193 } 194 195 void virtio_gpu_get_display_info(VirtIOGPU *g, 196 struct virtio_gpu_ctrl_command *cmd) 197 { 198 struct virtio_gpu_resp_display_info display_info; 199 200 trace_virtio_gpu_cmd_get_display_info(); 201 memset(&display_info, 0, sizeof(display_info)); 202 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 203 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); 204 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 205 sizeof(display_info)); 206 } 207 208 static void 209 virtio_gpu_generate_edid(VirtIOGPU *g, int scanout, 210 struct virtio_gpu_resp_edid *edid) 211 { 212 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 213 qemu_edid_info info = { 214 .prefx = b->req_state[scanout].width, 215 .prefy = b->req_state[scanout].height, 216 }; 217 218 edid->size = cpu_to_le32(sizeof(edid->edid)); 219 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info); 220 } 221 222 void virtio_gpu_get_edid(VirtIOGPU *g, 223 struct virtio_gpu_ctrl_command *cmd) 224 { 225 struct virtio_gpu_resp_edid edid; 226 struct virtio_gpu_cmd_get_edid get_edid; 227 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 228 229 VIRTIO_GPU_FILL_CMD(get_edid); 230 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 231 232 if (get_edid.scanout >= b->conf.max_outputs) { 233 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 234 return; 235 } 236 237 trace_virtio_gpu_cmd_get_edid(get_edid.scanout); 238 memset(&edid, 0, sizeof(edid)); 239 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; 240 virtio_gpu_generate_edid(g, get_edid.scanout, &edid); 241 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); 242 } 243 244 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 245 uint32_t width, uint32_t height) 246 { 247 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 248 * pixman_image_create_bits will fail in case it overflow. 249 */ 250 251 int bpp = PIXMAN_FORMAT_BPP(pformat); 252 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 253 return height * stride; 254 } 255 256 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 257 struct virtio_gpu_ctrl_command *cmd) 258 { 259 pixman_format_code_t pformat; 260 struct virtio_gpu_simple_resource *res; 261 struct virtio_gpu_resource_create_2d c2d; 262 263 VIRTIO_GPU_FILL_CMD(c2d); 264 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 265 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 266 c2d.width, c2d.height); 267 268 if (c2d.resource_id == 0) { 269 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 270 __func__); 271 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 272 return; 273 } 274 275 res = virtio_gpu_find_resource(g, c2d.resource_id); 276 if (res) { 277 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 278 __func__, c2d.resource_id); 279 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 280 return; 281 } 282 283 res = g_new0(struct virtio_gpu_simple_resource, 1); 284 285 res->width = c2d.width; 286 res->height = c2d.height; 287 res->format = c2d.format; 288 res->resource_id = c2d.resource_id; 289 290 pformat = virtio_gpu_get_pixman_format(c2d.format); 291 if (!pformat) { 292 qemu_log_mask(LOG_GUEST_ERROR, 293 "%s: host couldn't handle guest format %d\n", 294 __func__, c2d.format); 295 g_free(res); 296 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 297 return; 298 } 299 300 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 301 if (res->hostmem + g->hostmem < g->conf_max_hostmem) { 302 res->image = pixman_image_create_bits(pformat, 303 c2d.width, 304 c2d.height, 305 NULL, 0); 306 } 307 308 if (!res->image) { 309 qemu_log_mask(LOG_GUEST_ERROR, 310 "%s: resource creation failed %d %d %d\n", 311 __func__, c2d.resource_id, c2d.width, c2d.height); 312 g_free(res); 313 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 314 return; 315 } 316 317 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 318 g->hostmem += res->hostmem; 319 } 320 321 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 322 { 323 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 324 struct virtio_gpu_simple_resource *res; 325 DisplaySurface *ds = NULL; 326 327 if (scanout->resource_id == 0) { 328 return; 329 } 330 331 res = virtio_gpu_find_resource(g, scanout->resource_id); 332 if (res) { 333 res->scanout_bitmask &= ~(1 << scanout_id); 334 } 335 336 if (scanout_id == 0) { 337 /* primary head */ 338 ds = qemu_create_message_surface(scanout->width ?: 640, 339 scanout->height ?: 480, 340 "Guest disabled display."); 341 } 342 dpy_gfx_replace_surface(scanout->con, ds); 343 scanout->resource_id = 0; 344 scanout->ds = NULL; 345 scanout->width = 0; 346 scanout->height = 0; 347 } 348 349 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 350 struct virtio_gpu_simple_resource *res) 351 { 352 int i; 353 354 if (res->scanout_bitmask) { 355 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 356 if (res->scanout_bitmask & (1 << i)) { 357 virtio_gpu_disable_scanout(g, i); 358 } 359 } 360 } 361 362 pixman_image_unref(res->image); 363 virtio_gpu_cleanup_mapping(g, res); 364 QTAILQ_REMOVE(&g->reslist, res, next); 365 g->hostmem -= res->hostmem; 366 g_free(res); 367 } 368 369 static void virtio_gpu_resource_unref(VirtIOGPU *g, 370 struct virtio_gpu_ctrl_command *cmd) 371 { 372 struct virtio_gpu_simple_resource *res; 373 struct virtio_gpu_resource_unref unref; 374 375 VIRTIO_GPU_FILL_CMD(unref); 376 virtio_gpu_bswap_32(&unref, sizeof(unref)); 377 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 378 379 res = virtio_gpu_find_resource(g, unref.resource_id); 380 if (!res) { 381 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 382 __func__, unref.resource_id); 383 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 384 return; 385 } 386 virtio_gpu_resource_destroy(g, res); 387 } 388 389 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 390 struct virtio_gpu_ctrl_command *cmd) 391 { 392 struct virtio_gpu_simple_resource *res; 393 int h; 394 uint32_t src_offset, dst_offset, stride; 395 int bpp; 396 pixman_format_code_t format; 397 struct virtio_gpu_transfer_to_host_2d t2d; 398 399 VIRTIO_GPU_FILL_CMD(t2d); 400 virtio_gpu_t2d_bswap(&t2d); 401 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 402 403 res = virtio_gpu_find_resource(g, t2d.resource_id); 404 if (!res || !res->iov) { 405 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 406 __func__, t2d.resource_id); 407 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 408 return; 409 } 410 411 if (t2d.r.x > res->width || 412 t2d.r.y > res->height || 413 t2d.r.width > res->width || 414 t2d.r.height > res->height || 415 t2d.r.x + t2d.r.width > res->width || 416 t2d.r.y + t2d.r.height > res->height) { 417 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 418 " bounds for resource %d: %d %d %d %d vs %d %d\n", 419 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 420 t2d.r.width, t2d.r.height, res->width, res->height); 421 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 422 return; 423 } 424 425 format = pixman_image_get_format(res->image); 426 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 427 stride = pixman_image_get_stride(res->image); 428 429 if (t2d.offset || t2d.r.x || t2d.r.y || 430 t2d.r.width != pixman_image_get_width(res->image)) { 431 void *img_data = pixman_image_get_data(res->image); 432 for (h = 0; h < t2d.r.height; h++) { 433 src_offset = t2d.offset + stride * h; 434 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 435 436 iov_to_buf(res->iov, res->iov_cnt, src_offset, 437 (uint8_t *)img_data 438 + dst_offset, t2d.r.width * bpp); 439 } 440 } else { 441 iov_to_buf(res->iov, res->iov_cnt, 0, 442 pixman_image_get_data(res->image), 443 pixman_image_get_stride(res->image) 444 * pixman_image_get_height(res->image)); 445 } 446 } 447 448 static void virtio_gpu_resource_flush(VirtIOGPU *g, 449 struct virtio_gpu_ctrl_command *cmd) 450 { 451 struct virtio_gpu_simple_resource *res; 452 struct virtio_gpu_resource_flush rf; 453 pixman_region16_t flush_region; 454 int i; 455 456 VIRTIO_GPU_FILL_CMD(rf); 457 virtio_gpu_bswap_32(&rf, sizeof(rf)); 458 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 459 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 460 461 res = virtio_gpu_find_resource(g, rf.resource_id); 462 if (!res) { 463 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 464 __func__, rf.resource_id); 465 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 466 return; 467 } 468 469 if (rf.r.x > res->width || 470 rf.r.y > res->height || 471 rf.r.width > res->width || 472 rf.r.height > res->height || 473 rf.r.x + rf.r.width > res->width || 474 rf.r.y + rf.r.height > res->height) { 475 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 476 " bounds for resource %d: %d %d %d %d vs %d %d\n", 477 __func__, rf.resource_id, rf.r.x, rf.r.y, 478 rf.r.width, rf.r.height, res->width, res->height); 479 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 480 return; 481 } 482 483 pixman_region_init_rect(&flush_region, 484 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 485 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 486 struct virtio_gpu_scanout *scanout; 487 pixman_region16_t region, finalregion; 488 pixman_box16_t *extents; 489 490 if (!(res->scanout_bitmask & (1 << i))) { 491 continue; 492 } 493 scanout = &g->parent_obj.scanout[i]; 494 495 pixman_region_init(&finalregion); 496 pixman_region_init_rect(®ion, scanout->x, scanout->y, 497 scanout->width, scanout->height); 498 499 pixman_region_intersect(&finalregion, &flush_region, ®ion); 500 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 501 extents = pixman_region_extents(&finalregion); 502 /* work out the area we need to update for each console */ 503 dpy_gfx_update(g->parent_obj.scanout[i].con, 504 extents->x1, extents->y1, 505 extents->x2 - extents->x1, 506 extents->y2 - extents->y1); 507 508 pixman_region_fini(®ion); 509 pixman_region_fini(&finalregion); 510 } 511 pixman_region_fini(&flush_region); 512 } 513 514 static void virtio_unref_resource(pixman_image_t *image, void *data) 515 { 516 pixman_image_unref(data); 517 } 518 519 static void virtio_gpu_set_scanout(VirtIOGPU *g, 520 struct virtio_gpu_ctrl_command *cmd) 521 { 522 struct virtio_gpu_simple_resource *res, *ores; 523 struct virtio_gpu_scanout *scanout; 524 pixman_format_code_t format; 525 uint32_t offset; 526 int bpp; 527 struct virtio_gpu_set_scanout ss; 528 529 VIRTIO_GPU_FILL_CMD(ss); 530 virtio_gpu_bswap_32(&ss, sizeof(ss)); 531 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 532 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 533 534 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 535 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 536 __func__, ss.scanout_id); 537 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 538 return; 539 } 540 541 g->parent_obj.enable = 1; 542 if (ss.resource_id == 0) { 543 virtio_gpu_disable_scanout(g, ss.scanout_id); 544 return; 545 } 546 547 /* create a surface for this scanout */ 548 res = virtio_gpu_find_resource(g, ss.resource_id); 549 if (!res) { 550 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 551 __func__, ss.resource_id); 552 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 553 return; 554 } 555 556 if (ss.r.x > res->width || 557 ss.r.y > res->height || 558 ss.r.width < 16 || 559 ss.r.height < 16 || 560 ss.r.width > res->width || 561 ss.r.height > res->height || 562 ss.r.x + ss.r.width > res->width || 563 ss.r.y + ss.r.height > res->height) { 564 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 565 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 566 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 567 ss.r.width, ss.r.height, res->width, res->height); 568 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 569 return; 570 } 571 572 scanout = &g->parent_obj.scanout[ss.scanout_id]; 573 574 format = pixman_image_get_format(res->image); 575 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 576 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 577 if (!scanout->ds || surface_data(scanout->ds) 578 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 579 scanout->width != ss.r.width || 580 scanout->height != ss.r.height) { 581 pixman_image_t *rect; 582 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset; 583 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr, 584 pixman_image_get_stride(res->image)); 585 pixman_image_ref(res->image); 586 pixman_image_set_destroy_function(rect, virtio_unref_resource, 587 res->image); 588 /* realloc the surface ptr */ 589 scanout->ds = qemu_create_displaysurface_pixman(rect); 590 if (!scanout->ds) { 591 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 592 return; 593 } 594 pixman_image_unref(rect); 595 dpy_gfx_replace_surface(g->parent_obj.scanout[ss.scanout_id].con, 596 scanout->ds); 597 } 598 599 ores = virtio_gpu_find_resource(g, scanout->resource_id); 600 if (ores) { 601 ores->scanout_bitmask &= ~(1 << ss.scanout_id); 602 } 603 604 res->scanout_bitmask |= (1 << ss.scanout_id); 605 scanout->resource_id = ss.resource_id; 606 scanout->x = ss.r.x; 607 scanout->y = ss.r.y; 608 scanout->width = ss.r.width; 609 scanout->height = ss.r.height; 610 } 611 612 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 613 struct virtio_gpu_resource_attach_backing *ab, 614 struct virtio_gpu_ctrl_command *cmd, 615 uint64_t **addr, struct iovec **iov) 616 { 617 struct virtio_gpu_mem_entry *ents; 618 size_t esize, s; 619 int i; 620 621 if (ab->nr_entries > 16384) { 622 qemu_log_mask(LOG_GUEST_ERROR, 623 "%s: nr_entries is too big (%d > 16384)\n", 624 __func__, ab->nr_entries); 625 return -1; 626 } 627 628 esize = sizeof(*ents) * ab->nr_entries; 629 ents = g_malloc(esize); 630 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 631 sizeof(*ab), ents, esize); 632 if (s != esize) { 633 qemu_log_mask(LOG_GUEST_ERROR, 634 "%s: command data size incorrect %zu vs %zu\n", 635 __func__, s, esize); 636 g_free(ents); 637 return -1; 638 } 639 640 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 641 if (addr) { 642 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries); 643 } 644 for (i = 0; i < ab->nr_entries; i++) { 645 uint64_t a = le64_to_cpu(ents[i].addr); 646 uint32_t l = le32_to_cpu(ents[i].length); 647 hwaddr len = l; 648 (*iov)[i].iov_len = l; 649 (*iov)[i].iov_base = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, 650 a, &len, DMA_DIRECTION_TO_DEVICE); 651 if (addr) { 652 (*addr)[i] = a; 653 } 654 if (!(*iov)[i].iov_base || len != l) { 655 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 656 " resource %d element %d\n", 657 __func__, ab->resource_id, i); 658 virtio_gpu_cleanup_mapping_iov(g, *iov, i); 659 g_free(ents); 660 *iov = NULL; 661 if (addr) { 662 g_free(*addr); 663 *addr = NULL; 664 } 665 return -1; 666 } 667 } 668 g_free(ents); 669 return 0; 670 } 671 672 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 673 struct iovec *iov, uint32_t count) 674 { 675 int i; 676 677 for (i = 0; i < count; i++) { 678 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 679 iov[i].iov_base, iov[i].iov_len, 680 DMA_DIRECTION_TO_DEVICE, 681 iov[i].iov_len); 682 } 683 g_free(iov); 684 } 685 686 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 687 struct virtio_gpu_simple_resource *res) 688 { 689 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 690 res->iov = NULL; 691 res->iov_cnt = 0; 692 g_free(res->addrs); 693 res->addrs = NULL; 694 } 695 696 static void 697 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 698 struct virtio_gpu_ctrl_command *cmd) 699 { 700 struct virtio_gpu_simple_resource *res; 701 struct virtio_gpu_resource_attach_backing ab; 702 int ret; 703 704 VIRTIO_GPU_FILL_CMD(ab); 705 virtio_gpu_bswap_32(&ab, sizeof(ab)); 706 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 707 708 res = virtio_gpu_find_resource(g, ab.resource_id); 709 if (!res) { 710 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 711 __func__, ab.resource_id); 712 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 713 return; 714 } 715 716 if (res->iov) { 717 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 718 return; 719 } 720 721 ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs, &res->iov); 722 if (ret != 0) { 723 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 724 return; 725 } 726 727 res->iov_cnt = ab.nr_entries; 728 } 729 730 static void 731 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 732 struct virtio_gpu_ctrl_command *cmd) 733 { 734 struct virtio_gpu_simple_resource *res; 735 struct virtio_gpu_resource_detach_backing detach; 736 737 VIRTIO_GPU_FILL_CMD(detach); 738 virtio_gpu_bswap_32(&detach, sizeof(detach)); 739 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 740 741 res = virtio_gpu_find_resource(g, detach.resource_id); 742 if (!res || !res->iov) { 743 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 744 __func__, detach.resource_id); 745 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 746 return; 747 } 748 virtio_gpu_cleanup_mapping(g, res); 749 } 750 751 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 752 struct virtio_gpu_ctrl_command *cmd) 753 { 754 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 755 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 756 757 switch (cmd->cmd_hdr.type) { 758 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 759 virtio_gpu_get_display_info(g, cmd); 760 break; 761 case VIRTIO_GPU_CMD_GET_EDID: 762 virtio_gpu_get_edid(g, cmd); 763 break; 764 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 765 virtio_gpu_resource_create_2d(g, cmd); 766 break; 767 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 768 virtio_gpu_resource_unref(g, cmd); 769 break; 770 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 771 virtio_gpu_resource_flush(g, cmd); 772 break; 773 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 774 virtio_gpu_transfer_to_host_2d(g, cmd); 775 break; 776 case VIRTIO_GPU_CMD_SET_SCANOUT: 777 virtio_gpu_set_scanout(g, cmd); 778 break; 779 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 780 virtio_gpu_resource_attach_backing(g, cmd); 781 break; 782 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 783 virtio_gpu_resource_detach_backing(g, cmd); 784 break; 785 default: 786 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 787 break; 788 } 789 if (!cmd->finished) { 790 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 791 VIRTIO_GPU_RESP_OK_NODATA); 792 } 793 } 794 795 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 796 { 797 VirtIOGPU *g = VIRTIO_GPU(vdev); 798 qemu_bh_schedule(g->ctrl_bh); 799 } 800 801 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 802 { 803 VirtIOGPU *g = VIRTIO_GPU(vdev); 804 qemu_bh_schedule(g->cursor_bh); 805 } 806 807 void virtio_gpu_process_cmdq(VirtIOGPU *g) 808 { 809 struct virtio_gpu_ctrl_command *cmd; 810 811 while (!QTAILQ_EMPTY(&g->cmdq)) { 812 cmd = QTAILQ_FIRST(&g->cmdq); 813 814 if (g->parent_obj.renderer_blocked) { 815 break; 816 } 817 818 /* process command */ 819 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, 820 g, cmd); 821 822 QTAILQ_REMOVE(&g->cmdq, cmd, next); 823 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 824 g->stats.requests++; 825 } 826 827 if (!cmd->finished) { 828 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 829 g->inflight++; 830 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 831 if (g->stats.max_inflight < g->inflight) { 832 g->stats.max_inflight = g->inflight; 833 } 834 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 835 } 836 } else { 837 g_free(cmd); 838 } 839 } 840 } 841 842 static void virtio_gpu_gl_unblock(VirtIOGPUBase *b) 843 { 844 VirtIOGPU *g = VIRTIO_GPU(b); 845 846 #ifdef CONFIG_VIRGL 847 if (g->renderer_reset) { 848 g->renderer_reset = false; 849 virtio_gpu_virgl_reset(g); 850 } 851 #endif 852 virtio_gpu_process_cmdq(g); 853 } 854 855 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 856 { 857 VirtIOGPU *g = VIRTIO_GPU(vdev); 858 struct virtio_gpu_ctrl_command *cmd; 859 860 if (!virtio_queue_ready(vq)) { 861 return; 862 } 863 864 #ifdef CONFIG_VIRGL 865 if (!g->renderer_inited && g->parent_obj.use_virgl_renderer) { 866 virtio_gpu_virgl_init(g); 867 g->renderer_inited = true; 868 } 869 #endif 870 871 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 872 while (cmd) { 873 cmd->vq = vq; 874 cmd->error = 0; 875 cmd->finished = false; 876 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 877 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 878 } 879 880 virtio_gpu_process_cmdq(g); 881 882 #ifdef CONFIG_VIRGL 883 if (g->parent_obj.use_virgl_renderer) { 884 virtio_gpu_virgl_fence_poll(g); 885 } 886 #endif 887 } 888 889 static void virtio_gpu_ctrl_bh(void *opaque) 890 { 891 VirtIOGPU *g = opaque; 892 virtio_gpu_handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq); 893 } 894 895 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 896 { 897 VirtIOGPU *g = VIRTIO_GPU(vdev); 898 VirtQueueElement *elem; 899 size_t s; 900 struct virtio_gpu_update_cursor cursor_info; 901 902 if (!virtio_queue_ready(vq)) { 903 return; 904 } 905 for (;;) { 906 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 907 if (!elem) { 908 break; 909 } 910 911 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 912 &cursor_info, sizeof(cursor_info)); 913 if (s != sizeof(cursor_info)) { 914 qemu_log_mask(LOG_GUEST_ERROR, 915 "%s: cursor size incorrect %zu vs %zu\n", 916 __func__, s, sizeof(cursor_info)); 917 } else { 918 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 919 update_cursor(g, &cursor_info); 920 } 921 virtqueue_push(vq, elem, 0); 922 virtio_notify(vdev, vq); 923 g_free(elem); 924 } 925 } 926 927 static void virtio_gpu_cursor_bh(void *opaque) 928 { 929 VirtIOGPU *g = opaque; 930 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); 931 } 932 933 static const VMStateDescription vmstate_virtio_gpu_scanout = { 934 .name = "virtio-gpu-one-scanout", 935 .version_id = 1, 936 .fields = (VMStateField[]) { 937 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 938 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 939 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 940 VMSTATE_INT32(x, struct virtio_gpu_scanout), 941 VMSTATE_INT32(y, struct virtio_gpu_scanout), 942 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 943 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 944 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 945 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 946 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 947 VMSTATE_END_OF_LIST() 948 }, 949 }; 950 951 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 952 .name = "virtio-gpu-scanouts", 953 .version_id = 1, 954 .fields = (VMStateField[]) { 955 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), 956 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, 957 struct VirtIOGPU, NULL), 958 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, 959 parent_obj.conf.max_outputs, 1, 960 vmstate_virtio_gpu_scanout, 961 struct virtio_gpu_scanout), 962 VMSTATE_END_OF_LIST() 963 }, 964 }; 965 966 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 967 const VMStateField *field, QJSON *vmdesc) 968 { 969 VirtIOGPU *g = opaque; 970 struct virtio_gpu_simple_resource *res; 971 int i; 972 973 /* in 2d mode we should never find unprocessed commands here */ 974 assert(QTAILQ_EMPTY(&g->cmdq)); 975 976 QTAILQ_FOREACH(res, &g->reslist, next) { 977 qemu_put_be32(f, res->resource_id); 978 qemu_put_be32(f, res->width); 979 qemu_put_be32(f, res->height); 980 qemu_put_be32(f, res->format); 981 qemu_put_be32(f, res->iov_cnt); 982 for (i = 0; i < res->iov_cnt; i++) { 983 qemu_put_be64(f, res->addrs[i]); 984 qemu_put_be32(f, res->iov[i].iov_len); 985 } 986 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 987 pixman_image_get_stride(res->image) * res->height); 988 } 989 qemu_put_be32(f, 0); /* end of list */ 990 991 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 992 } 993 994 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 995 const VMStateField *field) 996 { 997 VirtIOGPU *g = opaque; 998 struct virtio_gpu_simple_resource *res; 999 struct virtio_gpu_scanout *scanout; 1000 uint32_t resource_id, pformat; 1001 int i; 1002 1003 g->hostmem = 0; 1004 1005 resource_id = qemu_get_be32(f); 1006 while (resource_id != 0) { 1007 res = virtio_gpu_find_resource(g, resource_id); 1008 if (res) { 1009 return -EINVAL; 1010 } 1011 1012 res = g_new0(struct virtio_gpu_simple_resource, 1); 1013 res->resource_id = resource_id; 1014 res->width = qemu_get_be32(f); 1015 res->height = qemu_get_be32(f); 1016 res->format = qemu_get_be32(f); 1017 res->iov_cnt = qemu_get_be32(f); 1018 1019 /* allocate */ 1020 pformat = virtio_gpu_get_pixman_format(res->format); 1021 if (!pformat) { 1022 g_free(res); 1023 return -EINVAL; 1024 } 1025 res->image = pixman_image_create_bits(pformat, 1026 res->width, res->height, 1027 NULL, 0); 1028 if (!res->image) { 1029 g_free(res); 1030 return -EINVAL; 1031 } 1032 1033 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1034 1035 res->addrs = g_new(uint64_t, res->iov_cnt); 1036 res->iov = g_new(struct iovec, res->iov_cnt); 1037 1038 /* read data */ 1039 for (i = 0; i < res->iov_cnt; i++) { 1040 res->addrs[i] = qemu_get_be64(f); 1041 res->iov[i].iov_len = qemu_get_be32(f); 1042 } 1043 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1044 pixman_image_get_stride(res->image) * res->height); 1045 1046 /* restore mapping */ 1047 for (i = 0; i < res->iov_cnt; i++) { 1048 hwaddr len = res->iov[i].iov_len; 1049 res->iov[i].iov_base = 1050 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, 1051 res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE); 1052 1053 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1054 /* Clean up the half-a-mapping we just created... */ 1055 if (res->iov[i].iov_base) { 1056 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 1057 res->iov[i].iov_base, 1058 len, 1059 DMA_DIRECTION_TO_DEVICE, 1060 0); 1061 } 1062 /* ...and the mappings for previous loop iterations */ 1063 res->iov_cnt = i; 1064 virtio_gpu_cleanup_mapping(g, res); 1065 pixman_image_unref(res->image); 1066 g_free(res); 1067 return -EINVAL; 1068 } 1069 } 1070 1071 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1072 g->hostmem += res->hostmem; 1073 1074 resource_id = qemu_get_be32(f); 1075 } 1076 1077 /* load & apply scanout state */ 1078 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1079 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1080 scanout = &g->parent_obj.scanout[i]; 1081 if (!scanout->resource_id) { 1082 continue; 1083 } 1084 res = virtio_gpu_find_resource(g, scanout->resource_id); 1085 if (!res) { 1086 return -EINVAL; 1087 } 1088 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1089 if (!scanout->ds) { 1090 return -EINVAL; 1091 } 1092 1093 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1094 dpy_gfx_update_full(scanout->con); 1095 if (scanout->cursor.resource_id) { 1096 update_cursor(g, &scanout->cursor); 1097 } 1098 res->scanout_bitmask |= (1 << i); 1099 } 1100 1101 return 0; 1102 } 1103 1104 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1105 { 1106 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1107 VirtIOGPU *g = VIRTIO_GPU(qdev); 1108 bool have_virgl; 1109 1110 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) 1111 have_virgl = false; 1112 #else 1113 have_virgl = display_opengl; 1114 #endif 1115 if (!have_virgl) { 1116 g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); 1117 } else { 1118 #if defined(CONFIG_VIRGL) 1119 VIRTIO_GPU_BASE(g)->virtio_config.num_capsets = 1120 virtio_gpu_virgl_get_num_capsets(g); 1121 #endif 1122 } 1123 1124 if (!virtio_gpu_base_device_realize(qdev, 1125 virtio_gpu_handle_ctrl_cb, 1126 virtio_gpu_handle_cursor_cb, 1127 errp)) { 1128 return; 1129 } 1130 1131 g->ctrl_vq = virtio_get_queue(vdev, 0); 1132 g->cursor_vq = virtio_get_queue(vdev, 1); 1133 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 1134 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 1135 QTAILQ_INIT(&g->reslist); 1136 QTAILQ_INIT(&g->cmdq); 1137 QTAILQ_INIT(&g->fenceq); 1138 } 1139 1140 static void virtio_gpu_reset(VirtIODevice *vdev) 1141 { 1142 VirtIOGPU *g = VIRTIO_GPU(vdev); 1143 struct virtio_gpu_simple_resource *res, *tmp; 1144 struct virtio_gpu_ctrl_command *cmd; 1145 1146 #ifdef CONFIG_VIRGL 1147 if (g->parent_obj.use_virgl_renderer) { 1148 virtio_gpu_virgl_reset(g); 1149 } 1150 #endif 1151 1152 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1153 virtio_gpu_resource_destroy(g, res); 1154 } 1155 1156 while (!QTAILQ_EMPTY(&g->cmdq)) { 1157 cmd = QTAILQ_FIRST(&g->cmdq); 1158 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1159 g_free(cmd); 1160 } 1161 1162 while (!QTAILQ_EMPTY(&g->fenceq)) { 1163 cmd = QTAILQ_FIRST(&g->fenceq); 1164 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1165 g->inflight--; 1166 g_free(cmd); 1167 } 1168 1169 #ifdef CONFIG_VIRGL 1170 if (g->parent_obj.use_virgl_renderer) { 1171 if (g->parent_obj.renderer_blocked) { 1172 g->renderer_reset = true; 1173 } else { 1174 virtio_gpu_virgl_reset(g); 1175 } 1176 g->parent_obj.use_virgl_renderer = false; 1177 } 1178 #endif 1179 1180 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); 1181 } 1182 1183 static void 1184 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 1185 { 1186 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1187 1188 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 1189 } 1190 1191 static void 1192 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 1193 { 1194 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1195 const struct virtio_gpu_config *vgconfig = 1196 (const struct virtio_gpu_config *)config; 1197 1198 if (vgconfig->events_clear) { 1199 g->virtio_config.events_read &= ~vgconfig->events_clear; 1200 } 1201 } 1202 1203 /* 1204 * For historical reasons virtio_gpu does not adhere to virtio migration 1205 * scheme as described in doc/virtio-migration.txt, in a sense that no 1206 * save/load callback are provided to the core. Instead the device data 1207 * is saved/loaded after the core data. 1208 * 1209 * Because of this we need a special vmsd. 1210 */ 1211 static const VMStateDescription vmstate_virtio_gpu = { 1212 .name = "virtio-gpu", 1213 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1214 .version_id = VIRTIO_GPU_VM_VERSION, 1215 .fields = (VMStateField[]) { 1216 VMSTATE_VIRTIO_DEVICE /* core */, 1217 { 1218 .name = "virtio-gpu", 1219 .info = &(const VMStateInfo) { 1220 .name = "virtio-gpu", 1221 .get = virtio_gpu_load, 1222 .put = virtio_gpu_save, 1223 }, 1224 .flags = VMS_SINGLE, 1225 } /* device */, 1226 VMSTATE_END_OF_LIST() 1227 }, 1228 }; 1229 1230 static Property virtio_gpu_properties[] = { 1231 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), 1232 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 1233 256 * MiB), 1234 #ifdef CONFIG_VIRGL 1235 DEFINE_PROP_BIT("virgl", VirtIOGPU, parent_obj.conf.flags, 1236 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), 1237 DEFINE_PROP_BIT("stats", VirtIOGPU, parent_obj.conf.flags, 1238 VIRTIO_GPU_FLAG_STATS_ENABLED, false), 1239 #endif 1240 DEFINE_PROP_END_OF_LIST(), 1241 }; 1242 1243 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1244 { 1245 DeviceClass *dc = DEVICE_CLASS(klass); 1246 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1247 VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass); 1248 1249 vgc->gl_unblock = virtio_gpu_gl_unblock; 1250 vdc->realize = virtio_gpu_device_realize; 1251 vdc->reset = virtio_gpu_reset; 1252 vdc->get_config = virtio_gpu_get_config; 1253 vdc->set_config = virtio_gpu_set_config; 1254 1255 dc->vmsd = &vmstate_virtio_gpu; 1256 dc->props = virtio_gpu_properties; 1257 } 1258 1259 static const TypeInfo virtio_gpu_info = { 1260 .name = TYPE_VIRTIO_GPU, 1261 .parent = TYPE_VIRTIO_GPU_BASE, 1262 .instance_size = sizeof(VirtIOGPU), 1263 .class_init = virtio_gpu_class_init, 1264 }; 1265 1266 static void virtio_register_types(void) 1267 { 1268 type_register_static(&virtio_gpu_info); 1269 } 1270 1271 type_init(virtio_register_types) 1272