1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu-common.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "hw/virtio/virtio.h" 20 #include "hw/virtio/virtio-gpu.h" 21 #include "hw/virtio/virtio-bus.h" 22 #include "migration/migration.h" 23 #include "qemu/log.h" 24 #include "qapi/error.h" 25 26 #define VIRTIO_GPU_VM_VERSION 1 27 28 static struct virtio_gpu_simple_resource* 29 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 30 31 #ifdef CONFIG_VIRGL 32 #include <virglrenderer.h> 33 #define VIRGL(_g, _virgl, _simple, ...) \ 34 do { \ 35 if (_g->use_virgl_renderer) { \ 36 _virgl(__VA_ARGS__); \ 37 } else { \ 38 _simple(__VA_ARGS__); \ 39 } \ 40 } while (0) 41 #else 42 #define VIRGL(_g, _virgl, _simple, ...) \ 43 do { \ 44 _simple(__VA_ARGS__); \ 45 } while (0) 46 #endif 47 48 static void update_cursor_data_simple(VirtIOGPU *g, 49 struct virtio_gpu_scanout *s, 50 uint32_t resource_id) 51 { 52 struct virtio_gpu_simple_resource *res; 53 uint32_t pixels; 54 55 res = virtio_gpu_find_resource(g, resource_id); 56 if (!res) { 57 return; 58 } 59 60 if (pixman_image_get_width(res->image) != s->current_cursor->width || 61 pixman_image_get_height(res->image) != s->current_cursor->height) { 62 return; 63 } 64 65 pixels = s->current_cursor->width * s->current_cursor->height; 66 memcpy(s->current_cursor->data, 67 pixman_image_get_data(res->image), 68 pixels * sizeof(uint32_t)); 69 } 70 71 #ifdef CONFIG_VIRGL 72 73 static void update_cursor_data_virgl(VirtIOGPU *g, 74 struct virtio_gpu_scanout *s, 75 uint32_t resource_id) 76 { 77 uint32_t width, height; 78 uint32_t pixels, *data; 79 80 data = virgl_renderer_get_cursor_data(resource_id, &width, &height); 81 if (!data) { 82 return; 83 } 84 85 if (width != s->current_cursor->width || 86 height != s->current_cursor->height) { 87 return; 88 } 89 90 pixels = s->current_cursor->width * s->current_cursor->height; 91 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); 92 free(data); 93 } 94 95 #endif 96 97 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 98 { 99 struct virtio_gpu_scanout *s; 100 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 101 102 if (cursor->pos.scanout_id >= g->conf.max_outputs) { 103 return; 104 } 105 s = &g->scanout[cursor->pos.scanout_id]; 106 107 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 108 cursor->pos.x, 109 cursor->pos.y, 110 move ? "move" : "update", 111 cursor->resource_id); 112 113 if (!move) { 114 if (!s->current_cursor) { 115 s->current_cursor = cursor_alloc(64, 64); 116 } 117 118 s->current_cursor->hot_x = cursor->hot_x; 119 s->current_cursor->hot_y = cursor->hot_y; 120 121 if (cursor->resource_id > 0) { 122 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, 123 g, s, cursor->resource_id); 124 } 125 dpy_cursor_define(s->con, s->current_cursor); 126 127 s->cursor = *cursor; 128 } else { 129 s->cursor.pos.x = cursor->pos.x; 130 s->cursor.pos.y = cursor->pos.y; 131 } 132 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 133 cursor->resource_id ? 1 : 0); 134 } 135 136 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 137 { 138 VirtIOGPU *g = VIRTIO_GPU(vdev); 139 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 140 } 141 142 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 143 { 144 VirtIOGPU *g = VIRTIO_GPU(vdev); 145 struct virtio_gpu_config vgconfig; 146 147 memcpy(&vgconfig, config, sizeof(g->virtio_config)); 148 149 if (vgconfig.events_clear) { 150 g->virtio_config.events_read &= ~vgconfig.events_clear; 151 } 152 } 153 154 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, 155 Error **errp) 156 { 157 VirtIOGPU *g = VIRTIO_GPU(vdev); 158 159 if (virtio_gpu_virgl_enabled(g->conf)) { 160 features |= (1 << VIRTIO_GPU_F_VIRGL); 161 } 162 return features; 163 } 164 165 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) 166 { 167 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); 168 VirtIOGPU *g = VIRTIO_GPU(vdev); 169 170 g->use_virgl_renderer = ((features & virgl) == virgl); 171 trace_virtio_gpu_features(g->use_virgl_renderer); 172 } 173 174 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 175 { 176 g->virtio_config.events_read |= event_type; 177 virtio_notify_config(&g->parent_obj); 178 } 179 180 static struct virtio_gpu_simple_resource * 181 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 182 { 183 struct virtio_gpu_simple_resource *res; 184 185 QTAILQ_FOREACH(res, &g->reslist, next) { 186 if (res->resource_id == resource_id) { 187 return res; 188 } 189 } 190 return NULL; 191 } 192 193 void virtio_gpu_ctrl_response(VirtIOGPU *g, 194 struct virtio_gpu_ctrl_command *cmd, 195 struct virtio_gpu_ctrl_hdr *resp, 196 size_t resp_len) 197 { 198 size_t s; 199 200 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 201 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 202 resp->fence_id = cmd->cmd_hdr.fence_id; 203 resp->ctx_id = cmd->cmd_hdr.ctx_id; 204 } 205 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 206 if (s != resp_len) { 207 qemu_log_mask(LOG_GUEST_ERROR, 208 "%s: response size incorrect %zu vs %zu\n", 209 __func__, s, resp_len); 210 } 211 virtqueue_push(cmd->vq, &cmd->elem, s); 212 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 213 cmd->finished = true; 214 } 215 216 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 217 struct virtio_gpu_ctrl_command *cmd, 218 enum virtio_gpu_ctrl_type type) 219 { 220 struct virtio_gpu_ctrl_hdr resp; 221 222 memset(&resp, 0, sizeof(resp)); 223 resp.type = type; 224 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 225 } 226 227 static void 228 virtio_gpu_fill_display_info(VirtIOGPU *g, 229 struct virtio_gpu_resp_display_info *dpy_info) 230 { 231 int i; 232 233 for (i = 0; i < g->conf.max_outputs; i++) { 234 if (g->enabled_output_bitmask & (1 << i)) { 235 dpy_info->pmodes[i].enabled = 1; 236 dpy_info->pmodes[i].r.width = g->req_state[i].width; 237 dpy_info->pmodes[i].r.height = g->req_state[i].height; 238 } 239 } 240 } 241 242 void virtio_gpu_get_display_info(VirtIOGPU *g, 243 struct virtio_gpu_ctrl_command *cmd) 244 { 245 struct virtio_gpu_resp_display_info display_info; 246 247 trace_virtio_gpu_cmd_get_display_info(); 248 memset(&display_info, 0, sizeof(display_info)); 249 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 250 virtio_gpu_fill_display_info(g, &display_info); 251 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 252 sizeof(display_info)); 253 } 254 255 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 256 { 257 switch (virtio_gpu_format) { 258 #ifdef HOST_WORDS_BIGENDIAN 259 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 260 return PIXMAN_b8g8r8x8; 261 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 262 return PIXMAN_b8g8r8a8; 263 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 264 return PIXMAN_x8r8g8b8; 265 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 266 return PIXMAN_a8r8g8b8; 267 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 268 return PIXMAN_r8g8b8x8; 269 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 270 return PIXMAN_r8g8b8a8; 271 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 272 return PIXMAN_x8b8g8r8; 273 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 274 return PIXMAN_a8b8g8r8; 275 #else 276 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 277 return PIXMAN_x8r8g8b8; 278 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 279 return PIXMAN_a8r8g8b8; 280 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 281 return PIXMAN_b8g8r8x8; 282 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 283 return PIXMAN_b8g8r8a8; 284 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 285 return PIXMAN_x8b8g8r8; 286 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 287 return PIXMAN_a8b8g8r8; 288 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 289 return PIXMAN_r8g8b8x8; 290 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 291 return PIXMAN_r8g8b8a8; 292 #endif 293 default: 294 return 0; 295 } 296 } 297 298 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 299 struct virtio_gpu_ctrl_command *cmd) 300 { 301 pixman_format_code_t pformat; 302 struct virtio_gpu_simple_resource *res; 303 struct virtio_gpu_resource_create_2d c2d; 304 305 VIRTIO_GPU_FILL_CMD(c2d); 306 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 307 c2d.width, c2d.height); 308 309 if (c2d.resource_id == 0) { 310 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 311 __func__); 312 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 313 return; 314 } 315 316 res = virtio_gpu_find_resource(g, c2d.resource_id); 317 if (res) { 318 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 319 __func__, c2d.resource_id); 320 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 321 return; 322 } 323 324 res = g_new0(struct virtio_gpu_simple_resource, 1); 325 326 res->width = c2d.width; 327 res->height = c2d.height; 328 res->format = c2d.format; 329 res->resource_id = c2d.resource_id; 330 331 pformat = get_pixman_format(c2d.format); 332 if (!pformat) { 333 qemu_log_mask(LOG_GUEST_ERROR, 334 "%s: host couldn't handle guest format %d\n", 335 __func__, c2d.format); 336 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 337 return; 338 } 339 res->image = pixman_image_create_bits(pformat, 340 c2d.width, 341 c2d.height, 342 NULL, 0); 343 344 if (!res->image) { 345 qemu_log_mask(LOG_GUEST_ERROR, 346 "%s: resource creation failed %d %d %d\n", 347 __func__, c2d.resource_id, c2d.width, c2d.height); 348 g_free(res); 349 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 350 return; 351 } 352 353 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 354 } 355 356 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 357 struct virtio_gpu_simple_resource *res) 358 { 359 pixman_image_unref(res->image); 360 QTAILQ_REMOVE(&g->reslist, res, next); 361 g_free(res); 362 } 363 364 static void virtio_gpu_resource_unref(VirtIOGPU *g, 365 struct virtio_gpu_ctrl_command *cmd) 366 { 367 struct virtio_gpu_simple_resource *res; 368 struct virtio_gpu_resource_unref unref; 369 370 VIRTIO_GPU_FILL_CMD(unref); 371 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 372 373 res = virtio_gpu_find_resource(g, unref.resource_id); 374 if (!res) { 375 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 376 __func__, unref.resource_id); 377 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 378 return; 379 } 380 virtio_gpu_resource_destroy(g, res); 381 } 382 383 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 384 struct virtio_gpu_ctrl_command *cmd) 385 { 386 struct virtio_gpu_simple_resource *res; 387 int h; 388 uint32_t src_offset, dst_offset, stride; 389 int bpp; 390 pixman_format_code_t format; 391 struct virtio_gpu_transfer_to_host_2d t2d; 392 393 VIRTIO_GPU_FILL_CMD(t2d); 394 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 395 396 res = virtio_gpu_find_resource(g, t2d.resource_id); 397 if (!res || !res->iov) { 398 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 399 __func__, t2d.resource_id); 400 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 401 return; 402 } 403 404 if (t2d.r.x > res->width || 405 t2d.r.y > res->height || 406 t2d.r.width > res->width || 407 t2d.r.height > res->height || 408 t2d.r.x + t2d.r.width > res->width || 409 t2d.r.y + t2d.r.height > res->height) { 410 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 411 " bounds for resource %d: %d %d %d %d vs %d %d\n", 412 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 413 t2d.r.width, t2d.r.height, res->width, res->height); 414 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 415 return; 416 } 417 418 format = pixman_image_get_format(res->image); 419 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 420 stride = pixman_image_get_stride(res->image); 421 422 if (t2d.offset || t2d.r.x || t2d.r.y || 423 t2d.r.width != pixman_image_get_width(res->image)) { 424 void *img_data = pixman_image_get_data(res->image); 425 for (h = 0; h < t2d.r.height; h++) { 426 src_offset = t2d.offset + stride * h; 427 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 428 429 iov_to_buf(res->iov, res->iov_cnt, src_offset, 430 (uint8_t *)img_data 431 + dst_offset, t2d.r.width * bpp); 432 } 433 } else { 434 iov_to_buf(res->iov, res->iov_cnt, 0, 435 pixman_image_get_data(res->image), 436 pixman_image_get_stride(res->image) 437 * pixman_image_get_height(res->image)); 438 } 439 } 440 441 static void virtio_gpu_resource_flush(VirtIOGPU *g, 442 struct virtio_gpu_ctrl_command *cmd) 443 { 444 struct virtio_gpu_simple_resource *res; 445 struct virtio_gpu_resource_flush rf; 446 pixman_region16_t flush_region; 447 int i; 448 449 VIRTIO_GPU_FILL_CMD(rf); 450 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 451 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 452 453 res = virtio_gpu_find_resource(g, rf.resource_id); 454 if (!res) { 455 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 456 __func__, rf.resource_id); 457 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 458 return; 459 } 460 461 if (rf.r.x > res->width || 462 rf.r.y > res->height || 463 rf.r.width > res->width || 464 rf.r.height > res->height || 465 rf.r.x + rf.r.width > res->width || 466 rf.r.y + rf.r.height > res->height) { 467 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 468 " bounds for resource %d: %d %d %d %d vs %d %d\n", 469 __func__, rf.resource_id, rf.r.x, rf.r.y, 470 rf.r.width, rf.r.height, res->width, res->height); 471 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 472 return; 473 } 474 475 pixman_region_init_rect(&flush_region, 476 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 477 for (i = 0; i < g->conf.max_outputs; i++) { 478 struct virtio_gpu_scanout *scanout; 479 pixman_region16_t region, finalregion; 480 pixman_box16_t *extents; 481 482 if (!(res->scanout_bitmask & (1 << i))) { 483 continue; 484 } 485 scanout = &g->scanout[i]; 486 487 pixman_region_init(&finalregion); 488 pixman_region_init_rect(®ion, scanout->x, scanout->y, 489 scanout->width, scanout->height); 490 491 pixman_region_intersect(&finalregion, &flush_region, ®ion); 492 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 493 extents = pixman_region_extents(&finalregion); 494 /* work out the area we need to update for each console */ 495 dpy_gfx_update(g->scanout[i].con, 496 extents->x1, extents->y1, 497 extents->x2 - extents->x1, 498 extents->y2 - extents->y1); 499 500 pixman_region_fini(®ion); 501 pixman_region_fini(&finalregion); 502 } 503 pixman_region_fini(&flush_region); 504 } 505 506 static void virtio_unref_resource(pixman_image_t *image, void *data) 507 { 508 pixman_image_unref(data); 509 } 510 511 static void virtio_gpu_set_scanout(VirtIOGPU *g, 512 struct virtio_gpu_ctrl_command *cmd) 513 { 514 struct virtio_gpu_simple_resource *res; 515 struct virtio_gpu_scanout *scanout; 516 pixman_format_code_t format; 517 uint32_t offset; 518 int bpp; 519 struct virtio_gpu_set_scanout ss; 520 521 VIRTIO_GPU_FILL_CMD(ss); 522 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 523 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 524 525 if (ss.scanout_id >= g->conf.max_outputs) { 526 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 527 __func__, ss.scanout_id); 528 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 529 return; 530 } 531 532 g->enable = 1; 533 if (ss.resource_id == 0) { 534 scanout = &g->scanout[ss.scanout_id]; 535 if (scanout->resource_id) { 536 res = virtio_gpu_find_resource(g, scanout->resource_id); 537 if (res) { 538 res->scanout_bitmask &= ~(1 << ss.scanout_id); 539 } 540 } 541 if (ss.scanout_id == 0) { 542 qemu_log_mask(LOG_GUEST_ERROR, 543 "%s: illegal scanout id specified %d", 544 __func__, ss.scanout_id); 545 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 546 return; 547 } 548 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); 549 scanout->ds = NULL; 550 scanout->width = 0; 551 scanout->height = 0; 552 return; 553 } 554 555 /* create a surface for this scanout */ 556 res = virtio_gpu_find_resource(g, ss.resource_id); 557 if (!res) { 558 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 559 __func__, ss.resource_id); 560 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 561 return; 562 } 563 564 if (ss.r.x > res->width || 565 ss.r.y > res->height || 566 ss.r.width > res->width || 567 ss.r.height > res->height || 568 ss.r.x + ss.r.width > res->width || 569 ss.r.y + ss.r.height > res->height) { 570 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 571 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 572 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 573 ss.r.width, ss.r.height, res->width, res->height); 574 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 575 return; 576 } 577 578 scanout = &g->scanout[ss.scanout_id]; 579 580 format = pixman_image_get_format(res->image); 581 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 582 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 583 if (!scanout->ds || surface_data(scanout->ds) 584 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 585 scanout->width != ss.r.width || 586 scanout->height != ss.r.height) { 587 pixman_image_t *rect; 588 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset; 589 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr, 590 pixman_image_get_stride(res->image)); 591 pixman_image_ref(res->image); 592 pixman_image_set_destroy_function(rect, virtio_unref_resource, 593 res->image); 594 /* realloc the surface ptr */ 595 scanout->ds = qemu_create_displaysurface_pixman(rect); 596 if (!scanout->ds) { 597 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 598 return; 599 } 600 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 601 } 602 603 res->scanout_bitmask |= (1 << ss.scanout_id); 604 scanout->resource_id = ss.resource_id; 605 scanout->x = ss.r.x; 606 scanout->y = ss.r.y; 607 scanout->width = ss.r.width; 608 scanout->height = ss.r.height; 609 } 610 611 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 612 struct virtio_gpu_ctrl_command *cmd, 613 uint64_t **addr, struct iovec **iov) 614 { 615 struct virtio_gpu_mem_entry *ents; 616 size_t esize, s; 617 int i; 618 619 if (ab->nr_entries > 16384) { 620 qemu_log_mask(LOG_GUEST_ERROR, 621 "%s: nr_entries is too big (%d > 16384)\n", 622 __func__, ab->nr_entries); 623 return -1; 624 } 625 626 esize = sizeof(*ents) * ab->nr_entries; 627 ents = g_malloc(esize); 628 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 629 sizeof(*ab), ents, esize); 630 if (s != esize) { 631 qemu_log_mask(LOG_GUEST_ERROR, 632 "%s: command data size incorrect %zu vs %zu\n", 633 __func__, s, esize); 634 g_free(ents); 635 return -1; 636 } 637 638 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 639 if (addr) { 640 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries); 641 } 642 for (i = 0; i < ab->nr_entries; i++) { 643 hwaddr len = ents[i].length; 644 (*iov)[i].iov_len = ents[i].length; 645 (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); 646 if (addr) { 647 (*addr)[i] = ents[i].addr; 648 } 649 if (!(*iov)[i].iov_base || len != ents[i].length) { 650 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 651 " resource %d element %d\n", 652 __func__, ab->resource_id, i); 653 virtio_gpu_cleanup_mapping_iov(*iov, i); 654 g_free(ents); 655 *iov = NULL; 656 if (addr) { 657 g_free(*addr); 658 *addr = NULL; 659 } 660 return -1; 661 } 662 } 663 g_free(ents); 664 return 0; 665 } 666 667 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 668 { 669 int i; 670 671 for (i = 0; i < count; i++) { 672 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 673 iov[i].iov_len); 674 } 675 g_free(iov); 676 } 677 678 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 679 { 680 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 681 res->iov = NULL; 682 res->iov_cnt = 0; 683 g_free(res->addrs); 684 res->addrs = NULL; 685 } 686 687 static void 688 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 689 struct virtio_gpu_ctrl_command *cmd) 690 { 691 struct virtio_gpu_simple_resource *res; 692 struct virtio_gpu_resource_attach_backing ab; 693 int ret; 694 695 VIRTIO_GPU_FILL_CMD(ab); 696 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 697 698 res = virtio_gpu_find_resource(g, ab.resource_id); 699 if (!res) { 700 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 701 __func__, ab.resource_id); 702 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 703 return; 704 } 705 706 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov); 707 if (ret != 0) { 708 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 709 return; 710 } 711 712 res->iov_cnt = ab.nr_entries; 713 } 714 715 static void 716 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 717 struct virtio_gpu_ctrl_command *cmd) 718 { 719 struct virtio_gpu_simple_resource *res; 720 struct virtio_gpu_resource_detach_backing detach; 721 722 VIRTIO_GPU_FILL_CMD(detach); 723 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 724 725 res = virtio_gpu_find_resource(g, detach.resource_id); 726 if (!res || !res->iov) { 727 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 728 __func__, detach.resource_id); 729 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 730 return; 731 } 732 virtio_gpu_cleanup_mapping(res); 733 } 734 735 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 736 struct virtio_gpu_ctrl_command *cmd) 737 { 738 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 739 740 switch (cmd->cmd_hdr.type) { 741 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 742 virtio_gpu_get_display_info(g, cmd); 743 break; 744 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 745 virtio_gpu_resource_create_2d(g, cmd); 746 break; 747 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 748 virtio_gpu_resource_unref(g, cmd); 749 break; 750 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 751 virtio_gpu_resource_flush(g, cmd); 752 break; 753 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 754 virtio_gpu_transfer_to_host_2d(g, cmd); 755 break; 756 case VIRTIO_GPU_CMD_SET_SCANOUT: 757 virtio_gpu_set_scanout(g, cmd); 758 break; 759 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 760 virtio_gpu_resource_attach_backing(g, cmd); 761 break; 762 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 763 virtio_gpu_resource_detach_backing(g, cmd); 764 break; 765 default: 766 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 767 break; 768 } 769 if (!cmd->finished) { 770 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 771 VIRTIO_GPU_RESP_OK_NODATA); 772 } 773 } 774 775 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 776 { 777 VirtIOGPU *g = VIRTIO_GPU(vdev); 778 qemu_bh_schedule(g->ctrl_bh); 779 } 780 781 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 782 { 783 VirtIOGPU *g = VIRTIO_GPU(vdev); 784 qemu_bh_schedule(g->cursor_bh); 785 } 786 787 void virtio_gpu_process_cmdq(VirtIOGPU *g) 788 { 789 struct virtio_gpu_ctrl_command *cmd; 790 791 while (!QTAILQ_EMPTY(&g->cmdq)) { 792 cmd = QTAILQ_FIRST(&g->cmdq); 793 794 /* process command */ 795 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, 796 g, cmd); 797 if (cmd->waiting) { 798 break; 799 } 800 QTAILQ_REMOVE(&g->cmdq, cmd, next); 801 if (virtio_gpu_stats_enabled(g->conf)) { 802 g->stats.requests++; 803 } 804 805 if (!cmd->finished) { 806 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 807 g->inflight++; 808 if (virtio_gpu_stats_enabled(g->conf)) { 809 if (g->stats.max_inflight < g->inflight) { 810 g->stats.max_inflight = g->inflight; 811 } 812 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 813 } 814 } else { 815 g_free(cmd); 816 } 817 } 818 } 819 820 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 821 { 822 VirtIOGPU *g = VIRTIO_GPU(vdev); 823 struct virtio_gpu_ctrl_command *cmd; 824 825 if (!virtio_queue_ready(vq)) { 826 return; 827 } 828 829 #ifdef CONFIG_VIRGL 830 if (!g->renderer_inited && g->use_virgl_renderer) { 831 virtio_gpu_virgl_init(g); 832 g->renderer_inited = true; 833 } 834 #endif 835 836 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 837 while (cmd) { 838 cmd->vq = vq; 839 cmd->error = 0; 840 cmd->finished = false; 841 cmd->waiting = false; 842 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 843 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 844 } 845 846 virtio_gpu_process_cmdq(g); 847 848 #ifdef CONFIG_VIRGL 849 if (g->use_virgl_renderer) { 850 virtio_gpu_virgl_fence_poll(g); 851 } 852 #endif 853 } 854 855 static void virtio_gpu_ctrl_bh(void *opaque) 856 { 857 VirtIOGPU *g = opaque; 858 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 859 } 860 861 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 862 { 863 VirtIOGPU *g = VIRTIO_GPU(vdev); 864 VirtQueueElement *elem; 865 size_t s; 866 struct virtio_gpu_update_cursor cursor_info; 867 868 if (!virtio_queue_ready(vq)) { 869 return; 870 } 871 for (;;) { 872 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 873 if (!elem) { 874 break; 875 } 876 877 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 878 &cursor_info, sizeof(cursor_info)); 879 if (s != sizeof(cursor_info)) { 880 qemu_log_mask(LOG_GUEST_ERROR, 881 "%s: cursor size incorrect %zu vs %zu\n", 882 __func__, s, sizeof(cursor_info)); 883 } else { 884 update_cursor(g, &cursor_info); 885 } 886 virtqueue_push(vq, elem, 0); 887 virtio_notify(vdev, vq); 888 g_free(elem); 889 } 890 } 891 892 static void virtio_gpu_cursor_bh(void *opaque) 893 { 894 VirtIOGPU *g = opaque; 895 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 896 } 897 898 static void virtio_gpu_invalidate_display(void *opaque) 899 { 900 } 901 902 static void virtio_gpu_update_display(void *opaque) 903 { 904 } 905 906 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 907 { 908 } 909 910 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 911 { 912 VirtIOGPU *g = opaque; 913 914 if (idx >= g->conf.max_outputs) { 915 return -1; 916 } 917 918 g->req_state[idx].x = info->xoff; 919 g->req_state[idx].y = info->yoff; 920 g->req_state[idx].width = info->width; 921 g->req_state[idx].height = info->height; 922 923 if (info->width && info->height) { 924 g->enabled_output_bitmask |= (1 << idx); 925 } else { 926 g->enabled_output_bitmask &= ~(1 << idx); 927 } 928 929 /* send event to guest */ 930 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 931 return 0; 932 } 933 934 static void virtio_gpu_gl_block(void *opaque, bool block) 935 { 936 VirtIOGPU *g = opaque; 937 938 if (block) { 939 g->renderer_blocked++; 940 } else { 941 g->renderer_blocked--; 942 } 943 assert(g->renderer_blocked >= 0); 944 945 if (g->renderer_blocked == 0) { 946 virtio_gpu_process_cmdq(g); 947 } 948 } 949 950 const GraphicHwOps virtio_gpu_ops = { 951 .invalidate = virtio_gpu_invalidate_display, 952 .gfx_update = virtio_gpu_update_display, 953 .text_update = virtio_gpu_text_update, 954 .ui_info = virtio_gpu_ui_info, 955 .gl_block = virtio_gpu_gl_block, 956 }; 957 958 static const VMStateDescription vmstate_virtio_gpu_scanout = { 959 .name = "virtio-gpu-one-scanout", 960 .version_id = 1, 961 .fields = (VMStateField[]) { 962 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 963 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 964 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 965 VMSTATE_INT32(x, struct virtio_gpu_scanout), 966 VMSTATE_INT32(y, struct virtio_gpu_scanout), 967 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 968 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 969 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 970 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 971 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 972 VMSTATE_END_OF_LIST() 973 }, 974 }; 975 976 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 977 .name = "virtio-gpu-scanouts", 978 .version_id = 1, 979 .fields = (VMStateField[]) { 980 VMSTATE_INT32(enable, struct VirtIOGPU), 981 VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU), 982 VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU, 983 conf.max_outputs, 1, 984 vmstate_virtio_gpu_scanout, 985 struct virtio_gpu_scanout), 986 VMSTATE_END_OF_LIST() 987 }, 988 }; 989 990 static void virtio_gpu_save(QEMUFile *f, void *opaque, size_t size) 991 { 992 VirtIOGPU *g = opaque; 993 struct virtio_gpu_simple_resource *res; 994 int i; 995 996 /* in 2d mode we should never find unprocessed commands here */ 997 assert(QTAILQ_EMPTY(&g->cmdq)); 998 999 QTAILQ_FOREACH(res, &g->reslist, next) { 1000 qemu_put_be32(f, res->resource_id); 1001 qemu_put_be32(f, res->width); 1002 qemu_put_be32(f, res->height); 1003 qemu_put_be32(f, res->format); 1004 qemu_put_be32(f, res->iov_cnt); 1005 for (i = 0; i < res->iov_cnt; i++) { 1006 qemu_put_be64(f, res->addrs[i]); 1007 qemu_put_be32(f, res->iov[i].iov_len); 1008 } 1009 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1010 pixman_image_get_stride(res->image) * res->height); 1011 } 1012 qemu_put_be32(f, 0); /* end of list */ 1013 1014 vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1015 } 1016 1017 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size) 1018 { 1019 VirtIOGPU *g = opaque; 1020 struct virtio_gpu_simple_resource *res; 1021 struct virtio_gpu_scanout *scanout; 1022 uint32_t resource_id, pformat; 1023 int i; 1024 1025 resource_id = qemu_get_be32(f); 1026 while (resource_id != 0) { 1027 res = g_new0(struct virtio_gpu_simple_resource, 1); 1028 res->resource_id = resource_id; 1029 res->width = qemu_get_be32(f); 1030 res->height = qemu_get_be32(f); 1031 res->format = qemu_get_be32(f); 1032 res->iov_cnt = qemu_get_be32(f); 1033 1034 /* allocate */ 1035 pformat = get_pixman_format(res->format); 1036 if (!pformat) { 1037 return -EINVAL; 1038 } 1039 res->image = pixman_image_create_bits(pformat, 1040 res->width, res->height, 1041 NULL, 0); 1042 if (!res->image) { 1043 return -EINVAL; 1044 } 1045 1046 res->addrs = g_new(uint64_t, res->iov_cnt); 1047 res->iov = g_new(struct iovec, res->iov_cnt); 1048 1049 /* read data */ 1050 for (i = 0; i < res->iov_cnt; i++) { 1051 res->addrs[i] = qemu_get_be64(f); 1052 res->iov[i].iov_len = qemu_get_be32(f); 1053 } 1054 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1055 pixman_image_get_stride(res->image) * res->height); 1056 1057 /* restore mapping */ 1058 for (i = 0; i < res->iov_cnt; i++) { 1059 hwaddr len = res->iov[i].iov_len; 1060 res->iov[i].iov_base = 1061 cpu_physical_memory_map(res->addrs[i], &len, 1); 1062 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1063 return -EINVAL; 1064 } 1065 } 1066 1067 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1068 1069 resource_id = qemu_get_be32(f); 1070 } 1071 1072 /* load & apply scanout state */ 1073 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1074 for (i = 0; i < g->conf.max_outputs; i++) { 1075 scanout = &g->scanout[i]; 1076 if (!scanout->resource_id) { 1077 continue; 1078 } 1079 res = virtio_gpu_find_resource(g, scanout->resource_id); 1080 if (!res) { 1081 return -EINVAL; 1082 } 1083 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1084 if (!scanout->ds) { 1085 return -EINVAL; 1086 } 1087 1088 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1089 dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height); 1090 update_cursor(g, &scanout->cursor); 1091 res->scanout_bitmask |= (1 << i); 1092 } 1093 1094 return 0; 1095 } 1096 1097 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1098 { 1099 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1100 VirtIOGPU *g = VIRTIO_GPU(qdev); 1101 bool have_virgl; 1102 int i; 1103 1104 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { 1105 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS); 1106 return; 1107 } 1108 1109 g->config_size = sizeof(struct virtio_gpu_config); 1110 g->virtio_config.num_scanouts = g->conf.max_outputs; 1111 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 1112 g->config_size); 1113 1114 g->req_state[0].width = 1024; 1115 g->req_state[0].height = 768; 1116 1117 g->use_virgl_renderer = false; 1118 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) 1119 have_virgl = false; 1120 #else 1121 have_virgl = display_opengl; 1122 #endif 1123 if (!have_virgl) { 1124 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); 1125 } 1126 1127 if (virtio_gpu_virgl_enabled(g->conf)) { 1128 /* use larger control queue in 3d mode */ 1129 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); 1130 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 1131 g->virtio_config.num_capsets = 1; 1132 } else { 1133 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 1134 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 1135 } 1136 1137 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 1138 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 1139 QTAILQ_INIT(&g->reslist); 1140 QTAILQ_INIT(&g->cmdq); 1141 QTAILQ_INIT(&g->fenceq); 1142 1143 g->enabled_output_bitmask = 1; 1144 g->qdev = qdev; 1145 1146 for (i = 0; i < g->conf.max_outputs; i++) { 1147 g->scanout[i].con = 1148 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 1149 if (i > 0) { 1150 dpy_gfx_replace_surface(g->scanout[i].con, NULL); 1151 } 1152 } 1153 1154 if (virtio_gpu_virgl_enabled(g->conf)) { 1155 error_setg(&g->migration_blocker, "virgl is not yet migratable"); 1156 migrate_add_blocker(g->migration_blocker); 1157 } 1158 } 1159 1160 static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp) 1161 { 1162 VirtIOGPU *g = VIRTIO_GPU(qdev); 1163 if (g->migration_blocker) { 1164 migrate_del_blocker(g->migration_blocker); 1165 error_free(g->migration_blocker); 1166 } 1167 } 1168 1169 static void virtio_gpu_instance_init(Object *obj) 1170 { 1171 } 1172 1173 static void virtio_gpu_reset(VirtIODevice *vdev) 1174 { 1175 VirtIOGPU *g = VIRTIO_GPU(vdev); 1176 struct virtio_gpu_simple_resource *res, *tmp; 1177 int i; 1178 1179 g->enable = 0; 1180 1181 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1182 virtio_gpu_resource_destroy(g, res); 1183 } 1184 for (i = 0; i < g->conf.max_outputs; i++) { 1185 #if 0 1186 g->req_state[i].x = 0; 1187 g->req_state[i].y = 0; 1188 if (i == 0) { 1189 g->req_state[0].width = 1024; 1190 g->req_state[0].height = 768; 1191 } else { 1192 g->req_state[i].width = 0; 1193 g->req_state[i].height = 0; 1194 } 1195 #endif 1196 g->scanout[i].resource_id = 0; 1197 g->scanout[i].width = 0; 1198 g->scanout[i].height = 0; 1199 g->scanout[i].x = 0; 1200 g->scanout[i].y = 0; 1201 g->scanout[i].ds = NULL; 1202 } 1203 g->enabled_output_bitmask = 1; 1204 1205 #ifdef CONFIG_VIRGL 1206 if (g->use_virgl_renderer) { 1207 virtio_gpu_virgl_reset(g); 1208 g->use_virgl_renderer = 0; 1209 } 1210 #endif 1211 } 1212 1213 /* 1214 * For historical reasons virtio_gpu does not adhere to virtio migration 1215 * scheme as described in doc/virtio-migration.txt, in a sense that no 1216 * save/load callback are provided to the core. Instead the device data 1217 * is saved/loaded after the core data. 1218 * 1219 * Because of this we need a special vmsd. 1220 */ 1221 static const VMStateDescription vmstate_virtio_gpu = { 1222 .name = "virtio-gpu", 1223 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1224 .version_id = VIRTIO_GPU_VM_VERSION, 1225 .fields = (VMStateField[]) { 1226 VMSTATE_VIRTIO_DEVICE /* core */, 1227 { 1228 .name = "virtio-gpu", 1229 .info = &(const VMStateInfo) { 1230 .name = "virtio-gpu", 1231 .get = virtio_gpu_load, 1232 .put = virtio_gpu_save, 1233 }, 1234 .flags = VMS_SINGLE, 1235 } /* device */, 1236 VMSTATE_END_OF_LIST() 1237 }, 1238 }; 1239 1240 static Property virtio_gpu_properties[] = { 1241 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), 1242 #ifdef CONFIG_VIRGL 1243 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, 1244 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), 1245 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, 1246 VIRTIO_GPU_FLAG_STATS_ENABLED, false), 1247 #endif 1248 DEFINE_PROP_END_OF_LIST(), 1249 }; 1250 1251 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1252 { 1253 DeviceClass *dc = DEVICE_CLASS(klass); 1254 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1255 1256 vdc->realize = virtio_gpu_device_realize; 1257 vdc->unrealize = virtio_gpu_device_unrealize; 1258 vdc->get_config = virtio_gpu_get_config; 1259 vdc->set_config = virtio_gpu_set_config; 1260 vdc->get_features = virtio_gpu_get_features; 1261 vdc->set_features = virtio_gpu_set_features; 1262 1263 vdc->reset = virtio_gpu_reset; 1264 1265 dc->props = virtio_gpu_properties; 1266 dc->vmsd = &vmstate_virtio_gpu; 1267 } 1268 1269 static const TypeInfo virtio_gpu_info = { 1270 .name = TYPE_VIRTIO_GPU, 1271 .parent = TYPE_VIRTIO_DEVICE, 1272 .instance_size = sizeof(VirtIOGPU), 1273 .instance_init = virtio_gpu_instance_init, 1274 .class_init = virtio_gpu_class_init, 1275 }; 1276 1277 static void virtio_register_types(void) 1278 { 1279 type_register_static(&virtio_gpu_info); 1280 } 1281 1282 type_init(virtio_register_types) 1283 1284 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 1285 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 1286 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 1287 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 1288 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 1289 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 1290 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 1291 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 1292 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 1293 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 1294 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 1295 1296 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); 1297 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); 1298 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); 1299 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); 1300 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); 1301 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); 1302 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); 1303 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); 1304 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); 1305 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); 1306