1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu-common.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "hw/virtio/virtio.h" 20 #include "hw/virtio/virtio-gpu.h" 21 #include "hw/virtio/virtio-bus.h" 22 #include "migration/blocker.h" 23 #include "qemu/log.h" 24 #include "qapi/error.h" 25 26 #define VIRTIO_GPU_VM_VERSION 1 27 28 static struct virtio_gpu_simple_resource* 29 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 30 31 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res); 32 33 static void 34 virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr) 35 { 36 le32_to_cpus(&hdr->type); 37 le32_to_cpus(&hdr->flags); 38 le64_to_cpus(&hdr->fence_id); 39 le32_to_cpus(&hdr->ctx_id); 40 le32_to_cpus(&hdr->padding); 41 } 42 43 static void virtio_gpu_bswap_32(void *ptr, 44 size_t size) 45 { 46 #ifdef HOST_WORDS_BIGENDIAN 47 48 size_t i; 49 struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr; 50 51 virtio_gpu_ctrl_hdr_bswap(hdr); 52 53 i = sizeof(struct virtio_gpu_ctrl_hdr); 54 while (i < size) { 55 le32_to_cpus((uint32_t *)(ptr + i)); 56 i = i + sizeof(uint32_t); 57 } 58 59 #endif 60 } 61 62 static void 63 virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d) 64 { 65 virtio_gpu_ctrl_hdr_bswap(&t2d->hdr); 66 le32_to_cpus(&t2d->r.x); 67 le32_to_cpus(&t2d->r.y); 68 le32_to_cpus(&t2d->r.width); 69 le32_to_cpus(&t2d->r.height); 70 le64_to_cpus(&t2d->offset); 71 le32_to_cpus(&t2d->resource_id); 72 le32_to_cpus(&t2d->padding); 73 } 74 75 #ifdef CONFIG_VIRGL 76 #include <virglrenderer.h> 77 #define VIRGL(_g, _virgl, _simple, ...) \ 78 do { \ 79 if (_g->use_virgl_renderer) { \ 80 _virgl(__VA_ARGS__); \ 81 } else { \ 82 _simple(__VA_ARGS__); \ 83 } \ 84 } while (0) 85 #else 86 #define VIRGL(_g, _virgl, _simple, ...) \ 87 do { \ 88 _simple(__VA_ARGS__); \ 89 } while (0) 90 #endif 91 92 static void update_cursor_data_simple(VirtIOGPU *g, 93 struct virtio_gpu_scanout *s, 94 uint32_t resource_id) 95 { 96 struct virtio_gpu_simple_resource *res; 97 uint32_t pixels; 98 99 res = virtio_gpu_find_resource(g, resource_id); 100 if (!res) { 101 return; 102 } 103 104 if (pixman_image_get_width(res->image) != s->current_cursor->width || 105 pixman_image_get_height(res->image) != s->current_cursor->height) { 106 return; 107 } 108 109 pixels = s->current_cursor->width * s->current_cursor->height; 110 memcpy(s->current_cursor->data, 111 pixman_image_get_data(res->image), 112 pixels * sizeof(uint32_t)); 113 } 114 115 #ifdef CONFIG_VIRGL 116 117 static void update_cursor_data_virgl(VirtIOGPU *g, 118 struct virtio_gpu_scanout *s, 119 uint32_t resource_id) 120 { 121 uint32_t width, height; 122 uint32_t pixels, *data; 123 124 data = virgl_renderer_get_cursor_data(resource_id, &width, &height); 125 if (!data) { 126 return; 127 } 128 129 if (width != s->current_cursor->width || 130 height != s->current_cursor->height) { 131 free(data); 132 return; 133 } 134 135 pixels = s->current_cursor->width * s->current_cursor->height; 136 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); 137 free(data); 138 } 139 140 #endif 141 142 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 143 { 144 struct virtio_gpu_scanout *s; 145 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 146 147 if (cursor->pos.scanout_id >= g->conf.max_outputs) { 148 return; 149 } 150 s = &g->scanout[cursor->pos.scanout_id]; 151 152 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 153 cursor->pos.x, 154 cursor->pos.y, 155 move ? "move" : "update", 156 cursor->resource_id); 157 158 if (!move) { 159 if (!s->current_cursor) { 160 s->current_cursor = cursor_alloc(64, 64); 161 } 162 163 s->current_cursor->hot_x = cursor->hot_x; 164 s->current_cursor->hot_y = cursor->hot_y; 165 166 if (cursor->resource_id > 0) { 167 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, 168 g, s, cursor->resource_id); 169 } 170 dpy_cursor_define(s->con, s->current_cursor); 171 172 s->cursor = *cursor; 173 } else { 174 s->cursor.pos.x = cursor->pos.x; 175 s->cursor.pos.y = cursor->pos.y; 176 } 177 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 178 cursor->resource_id ? 1 : 0); 179 } 180 181 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 182 { 183 VirtIOGPU *g = VIRTIO_GPU(vdev); 184 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 185 } 186 187 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 188 { 189 VirtIOGPU *g = VIRTIO_GPU(vdev); 190 struct virtio_gpu_config vgconfig; 191 192 memcpy(&vgconfig, config, sizeof(g->virtio_config)); 193 194 if (vgconfig.events_clear) { 195 g->virtio_config.events_read &= ~vgconfig.events_clear; 196 } 197 } 198 199 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, 200 Error **errp) 201 { 202 VirtIOGPU *g = VIRTIO_GPU(vdev); 203 204 if (virtio_gpu_virgl_enabled(g->conf)) { 205 features |= (1 << VIRTIO_GPU_F_VIRGL); 206 } 207 return features; 208 } 209 210 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) 211 { 212 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); 213 VirtIOGPU *g = VIRTIO_GPU(vdev); 214 215 g->use_virgl_renderer = ((features & virgl) == virgl); 216 trace_virtio_gpu_features(g->use_virgl_renderer); 217 } 218 219 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 220 { 221 g->virtio_config.events_read |= event_type; 222 virtio_notify_config(&g->parent_obj); 223 } 224 225 static struct virtio_gpu_simple_resource * 226 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 227 { 228 struct virtio_gpu_simple_resource *res; 229 230 QTAILQ_FOREACH(res, &g->reslist, next) { 231 if (res->resource_id == resource_id) { 232 return res; 233 } 234 } 235 return NULL; 236 } 237 238 void virtio_gpu_ctrl_response(VirtIOGPU *g, 239 struct virtio_gpu_ctrl_command *cmd, 240 struct virtio_gpu_ctrl_hdr *resp, 241 size_t resp_len) 242 { 243 size_t s; 244 245 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 246 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 247 resp->fence_id = cmd->cmd_hdr.fence_id; 248 resp->ctx_id = cmd->cmd_hdr.ctx_id; 249 } 250 virtio_gpu_ctrl_hdr_bswap(resp); 251 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 252 if (s != resp_len) { 253 qemu_log_mask(LOG_GUEST_ERROR, 254 "%s: response size incorrect %zu vs %zu\n", 255 __func__, s, resp_len); 256 } 257 virtqueue_push(cmd->vq, &cmd->elem, s); 258 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 259 cmd->finished = true; 260 } 261 262 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 263 struct virtio_gpu_ctrl_command *cmd, 264 enum virtio_gpu_ctrl_type type) 265 { 266 struct virtio_gpu_ctrl_hdr resp; 267 268 memset(&resp, 0, sizeof(resp)); 269 resp.type = type; 270 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 271 } 272 273 static void 274 virtio_gpu_fill_display_info(VirtIOGPU *g, 275 struct virtio_gpu_resp_display_info *dpy_info) 276 { 277 int i; 278 279 for (i = 0; i < g->conf.max_outputs; i++) { 280 if (g->enabled_output_bitmask & (1 << i)) { 281 dpy_info->pmodes[i].enabled = 1; 282 dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width); 283 dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height); 284 } 285 } 286 } 287 288 void virtio_gpu_get_display_info(VirtIOGPU *g, 289 struct virtio_gpu_ctrl_command *cmd) 290 { 291 struct virtio_gpu_resp_display_info display_info; 292 293 trace_virtio_gpu_cmd_get_display_info(); 294 memset(&display_info, 0, sizeof(display_info)); 295 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 296 virtio_gpu_fill_display_info(g, &display_info); 297 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 298 sizeof(display_info)); 299 } 300 301 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 302 { 303 switch (virtio_gpu_format) { 304 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 305 return PIXMAN_BE_b8g8r8x8; 306 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 307 return PIXMAN_BE_b8g8r8a8; 308 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 309 return PIXMAN_BE_x8r8g8b8; 310 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 311 return PIXMAN_BE_a8r8g8b8; 312 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 313 return PIXMAN_BE_r8g8b8x8; 314 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 315 return PIXMAN_BE_r8g8b8a8; 316 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 317 return PIXMAN_BE_x8b8g8r8; 318 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 319 return PIXMAN_BE_a8b8g8r8; 320 default: 321 return 0; 322 } 323 } 324 325 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 326 struct virtio_gpu_ctrl_command *cmd) 327 { 328 pixman_format_code_t pformat; 329 struct virtio_gpu_simple_resource *res; 330 struct virtio_gpu_resource_create_2d c2d; 331 332 VIRTIO_GPU_FILL_CMD(c2d); 333 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 334 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 335 c2d.width, c2d.height); 336 337 if (c2d.resource_id == 0) { 338 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 339 __func__); 340 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 341 return; 342 } 343 344 res = virtio_gpu_find_resource(g, c2d.resource_id); 345 if (res) { 346 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 347 __func__, c2d.resource_id); 348 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 349 return; 350 } 351 352 res = g_new0(struct virtio_gpu_simple_resource, 1); 353 354 res->width = c2d.width; 355 res->height = c2d.height; 356 res->format = c2d.format; 357 res->resource_id = c2d.resource_id; 358 359 pformat = get_pixman_format(c2d.format); 360 if (!pformat) { 361 qemu_log_mask(LOG_GUEST_ERROR, 362 "%s: host couldn't handle guest format %d\n", 363 __func__, c2d.format); 364 g_free(res); 365 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 366 return; 367 } 368 369 res->hostmem = PIXMAN_FORMAT_BPP(pformat) * c2d.width * c2d.height; 370 if (res->hostmem + g->hostmem < g->conf.max_hostmem) { 371 res->image = pixman_image_create_bits(pformat, 372 c2d.width, 373 c2d.height, 374 NULL, 0); 375 } 376 377 if (!res->image) { 378 qemu_log_mask(LOG_GUEST_ERROR, 379 "%s: resource creation failed %d %d %d\n", 380 __func__, c2d.resource_id, c2d.width, c2d.height); 381 g_free(res); 382 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 383 return; 384 } 385 386 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 387 g->hostmem += res->hostmem; 388 } 389 390 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 391 struct virtio_gpu_simple_resource *res) 392 { 393 pixman_image_unref(res->image); 394 virtio_gpu_cleanup_mapping(res); 395 QTAILQ_REMOVE(&g->reslist, res, next); 396 g->hostmem -= res->hostmem; 397 g_free(res); 398 } 399 400 static void virtio_gpu_resource_unref(VirtIOGPU *g, 401 struct virtio_gpu_ctrl_command *cmd) 402 { 403 struct virtio_gpu_simple_resource *res; 404 struct virtio_gpu_resource_unref unref; 405 406 VIRTIO_GPU_FILL_CMD(unref); 407 virtio_gpu_bswap_32(&unref, sizeof(unref)); 408 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 409 410 res = virtio_gpu_find_resource(g, unref.resource_id); 411 if (!res) { 412 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 413 __func__, unref.resource_id); 414 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 415 return; 416 } 417 virtio_gpu_resource_destroy(g, res); 418 } 419 420 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 421 struct virtio_gpu_ctrl_command *cmd) 422 { 423 struct virtio_gpu_simple_resource *res; 424 int h; 425 uint32_t src_offset, dst_offset, stride; 426 int bpp; 427 pixman_format_code_t format; 428 struct virtio_gpu_transfer_to_host_2d t2d; 429 430 VIRTIO_GPU_FILL_CMD(t2d); 431 virtio_gpu_t2d_bswap(&t2d); 432 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 433 434 res = virtio_gpu_find_resource(g, t2d.resource_id); 435 if (!res || !res->iov) { 436 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 437 __func__, t2d.resource_id); 438 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 439 return; 440 } 441 442 if (t2d.r.x > res->width || 443 t2d.r.y > res->height || 444 t2d.r.width > res->width || 445 t2d.r.height > res->height || 446 t2d.r.x + t2d.r.width > res->width || 447 t2d.r.y + t2d.r.height > res->height) { 448 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 449 " bounds for resource %d: %d %d %d %d vs %d %d\n", 450 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 451 t2d.r.width, t2d.r.height, res->width, res->height); 452 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 453 return; 454 } 455 456 format = pixman_image_get_format(res->image); 457 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 458 stride = pixman_image_get_stride(res->image); 459 460 if (t2d.offset || t2d.r.x || t2d.r.y || 461 t2d.r.width != pixman_image_get_width(res->image)) { 462 void *img_data = pixman_image_get_data(res->image); 463 for (h = 0; h < t2d.r.height; h++) { 464 src_offset = t2d.offset + stride * h; 465 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 466 467 iov_to_buf(res->iov, res->iov_cnt, src_offset, 468 (uint8_t *)img_data 469 + dst_offset, t2d.r.width * bpp); 470 } 471 } else { 472 iov_to_buf(res->iov, res->iov_cnt, 0, 473 pixman_image_get_data(res->image), 474 pixman_image_get_stride(res->image) 475 * pixman_image_get_height(res->image)); 476 } 477 } 478 479 static void virtio_gpu_resource_flush(VirtIOGPU *g, 480 struct virtio_gpu_ctrl_command *cmd) 481 { 482 struct virtio_gpu_simple_resource *res; 483 struct virtio_gpu_resource_flush rf; 484 pixman_region16_t flush_region; 485 int i; 486 487 VIRTIO_GPU_FILL_CMD(rf); 488 virtio_gpu_bswap_32(&rf, sizeof(rf)); 489 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 490 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 491 492 res = virtio_gpu_find_resource(g, rf.resource_id); 493 if (!res) { 494 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 495 __func__, rf.resource_id); 496 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 497 return; 498 } 499 500 if (rf.r.x > res->width || 501 rf.r.y > res->height || 502 rf.r.width > res->width || 503 rf.r.height > res->height || 504 rf.r.x + rf.r.width > res->width || 505 rf.r.y + rf.r.height > res->height) { 506 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 507 " bounds for resource %d: %d %d %d %d vs %d %d\n", 508 __func__, rf.resource_id, rf.r.x, rf.r.y, 509 rf.r.width, rf.r.height, res->width, res->height); 510 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 511 return; 512 } 513 514 pixman_region_init_rect(&flush_region, 515 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 516 for (i = 0; i < g->conf.max_outputs; i++) { 517 struct virtio_gpu_scanout *scanout; 518 pixman_region16_t region, finalregion; 519 pixman_box16_t *extents; 520 521 if (!(res->scanout_bitmask & (1 << i))) { 522 continue; 523 } 524 scanout = &g->scanout[i]; 525 526 pixman_region_init(&finalregion); 527 pixman_region_init_rect(®ion, scanout->x, scanout->y, 528 scanout->width, scanout->height); 529 530 pixman_region_intersect(&finalregion, &flush_region, ®ion); 531 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 532 extents = pixman_region_extents(&finalregion); 533 /* work out the area we need to update for each console */ 534 dpy_gfx_update(g->scanout[i].con, 535 extents->x1, extents->y1, 536 extents->x2 - extents->x1, 537 extents->y2 - extents->y1); 538 539 pixman_region_fini(®ion); 540 pixman_region_fini(&finalregion); 541 } 542 pixman_region_fini(&flush_region); 543 } 544 545 static void virtio_unref_resource(pixman_image_t *image, void *data) 546 { 547 pixman_image_unref(data); 548 } 549 550 static void virtio_gpu_set_scanout(VirtIOGPU *g, 551 struct virtio_gpu_ctrl_command *cmd) 552 { 553 struct virtio_gpu_simple_resource *res; 554 struct virtio_gpu_scanout *scanout; 555 pixman_format_code_t format; 556 uint32_t offset; 557 int bpp; 558 struct virtio_gpu_set_scanout ss; 559 560 VIRTIO_GPU_FILL_CMD(ss); 561 virtio_gpu_bswap_32(&ss, sizeof(ss)); 562 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 563 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 564 565 if (ss.scanout_id >= g->conf.max_outputs) { 566 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 567 __func__, ss.scanout_id); 568 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 569 return; 570 } 571 572 g->enable = 1; 573 if (ss.resource_id == 0) { 574 scanout = &g->scanout[ss.scanout_id]; 575 if (scanout->resource_id) { 576 res = virtio_gpu_find_resource(g, scanout->resource_id); 577 if (res) { 578 res->scanout_bitmask &= ~(1 << ss.scanout_id); 579 } 580 } 581 if (ss.scanout_id == 0) { 582 qemu_log_mask(LOG_GUEST_ERROR, 583 "%s: illegal scanout id specified %d", 584 __func__, ss.scanout_id); 585 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 586 return; 587 } 588 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); 589 scanout->ds = NULL; 590 scanout->width = 0; 591 scanout->height = 0; 592 return; 593 } 594 595 /* create a surface for this scanout */ 596 res = virtio_gpu_find_resource(g, ss.resource_id); 597 if (!res) { 598 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 599 __func__, ss.resource_id); 600 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 601 return; 602 } 603 604 if (ss.r.x > res->width || 605 ss.r.y > res->height || 606 ss.r.width > res->width || 607 ss.r.height > res->height || 608 ss.r.x + ss.r.width > res->width || 609 ss.r.y + ss.r.height > res->height) { 610 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 611 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 612 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 613 ss.r.width, ss.r.height, res->width, res->height); 614 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 615 return; 616 } 617 618 scanout = &g->scanout[ss.scanout_id]; 619 620 format = pixman_image_get_format(res->image); 621 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 622 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 623 if (!scanout->ds || surface_data(scanout->ds) 624 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 625 scanout->width != ss.r.width || 626 scanout->height != ss.r.height) { 627 pixman_image_t *rect; 628 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset; 629 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr, 630 pixman_image_get_stride(res->image)); 631 pixman_image_ref(res->image); 632 pixman_image_set_destroy_function(rect, virtio_unref_resource, 633 res->image); 634 /* realloc the surface ptr */ 635 scanout->ds = qemu_create_displaysurface_pixman(rect); 636 if (!scanout->ds) { 637 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 638 return; 639 } 640 pixman_image_unref(rect); 641 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 642 } 643 644 res->scanout_bitmask |= (1 << ss.scanout_id); 645 scanout->resource_id = ss.resource_id; 646 scanout->x = ss.r.x; 647 scanout->y = ss.r.y; 648 scanout->width = ss.r.width; 649 scanout->height = ss.r.height; 650 } 651 652 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 653 struct virtio_gpu_ctrl_command *cmd, 654 uint64_t **addr, struct iovec **iov) 655 { 656 struct virtio_gpu_mem_entry *ents; 657 size_t esize, s; 658 int i; 659 660 if (ab->nr_entries > 16384) { 661 qemu_log_mask(LOG_GUEST_ERROR, 662 "%s: nr_entries is too big (%d > 16384)\n", 663 __func__, ab->nr_entries); 664 return -1; 665 } 666 667 esize = sizeof(*ents) * ab->nr_entries; 668 ents = g_malloc(esize); 669 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 670 sizeof(*ab), ents, esize); 671 if (s != esize) { 672 qemu_log_mask(LOG_GUEST_ERROR, 673 "%s: command data size incorrect %zu vs %zu\n", 674 __func__, s, esize); 675 g_free(ents); 676 return -1; 677 } 678 679 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 680 if (addr) { 681 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries); 682 } 683 for (i = 0; i < ab->nr_entries; i++) { 684 uint64_t a = le64_to_cpu(ents[i].addr); 685 uint32_t l = le32_to_cpu(ents[i].length); 686 hwaddr len = l; 687 (*iov)[i].iov_len = l; 688 (*iov)[i].iov_base = cpu_physical_memory_map(a, &len, 1); 689 if (addr) { 690 (*addr)[i] = a; 691 } 692 if (!(*iov)[i].iov_base || len != l) { 693 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 694 " resource %d element %d\n", 695 __func__, ab->resource_id, i); 696 virtio_gpu_cleanup_mapping_iov(*iov, i); 697 g_free(ents); 698 *iov = NULL; 699 if (addr) { 700 g_free(*addr); 701 *addr = NULL; 702 } 703 return -1; 704 } 705 } 706 g_free(ents); 707 return 0; 708 } 709 710 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 711 { 712 int i; 713 714 for (i = 0; i < count; i++) { 715 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 716 iov[i].iov_len); 717 } 718 g_free(iov); 719 } 720 721 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 722 { 723 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 724 res->iov = NULL; 725 res->iov_cnt = 0; 726 g_free(res->addrs); 727 res->addrs = NULL; 728 } 729 730 static void 731 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 732 struct virtio_gpu_ctrl_command *cmd) 733 { 734 struct virtio_gpu_simple_resource *res; 735 struct virtio_gpu_resource_attach_backing ab; 736 int ret; 737 738 VIRTIO_GPU_FILL_CMD(ab); 739 virtio_gpu_bswap_32(&ab, sizeof(ab)); 740 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 741 742 res = virtio_gpu_find_resource(g, ab.resource_id); 743 if (!res) { 744 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 745 __func__, ab.resource_id); 746 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 747 return; 748 } 749 750 if (res->iov) { 751 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 752 return; 753 } 754 755 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov); 756 if (ret != 0) { 757 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 758 return; 759 } 760 761 res->iov_cnt = ab.nr_entries; 762 } 763 764 static void 765 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 766 struct virtio_gpu_ctrl_command *cmd) 767 { 768 struct virtio_gpu_simple_resource *res; 769 struct virtio_gpu_resource_detach_backing detach; 770 771 VIRTIO_GPU_FILL_CMD(detach); 772 virtio_gpu_bswap_32(&detach, sizeof(detach)); 773 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 774 775 res = virtio_gpu_find_resource(g, detach.resource_id); 776 if (!res || !res->iov) { 777 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 778 __func__, detach.resource_id); 779 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 780 return; 781 } 782 virtio_gpu_cleanup_mapping(res); 783 } 784 785 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 786 struct virtio_gpu_ctrl_command *cmd) 787 { 788 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 789 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 790 791 switch (cmd->cmd_hdr.type) { 792 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 793 virtio_gpu_get_display_info(g, cmd); 794 break; 795 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 796 virtio_gpu_resource_create_2d(g, cmd); 797 break; 798 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 799 virtio_gpu_resource_unref(g, cmd); 800 break; 801 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 802 virtio_gpu_resource_flush(g, cmd); 803 break; 804 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 805 virtio_gpu_transfer_to_host_2d(g, cmd); 806 break; 807 case VIRTIO_GPU_CMD_SET_SCANOUT: 808 virtio_gpu_set_scanout(g, cmd); 809 break; 810 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 811 virtio_gpu_resource_attach_backing(g, cmd); 812 break; 813 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 814 virtio_gpu_resource_detach_backing(g, cmd); 815 break; 816 default: 817 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 818 break; 819 } 820 if (!cmd->finished) { 821 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 822 VIRTIO_GPU_RESP_OK_NODATA); 823 } 824 } 825 826 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 827 { 828 VirtIOGPU *g = VIRTIO_GPU(vdev); 829 qemu_bh_schedule(g->ctrl_bh); 830 } 831 832 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 833 { 834 VirtIOGPU *g = VIRTIO_GPU(vdev); 835 qemu_bh_schedule(g->cursor_bh); 836 } 837 838 void virtio_gpu_process_cmdq(VirtIOGPU *g) 839 { 840 struct virtio_gpu_ctrl_command *cmd; 841 842 while (!QTAILQ_EMPTY(&g->cmdq)) { 843 cmd = QTAILQ_FIRST(&g->cmdq); 844 845 /* process command */ 846 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, 847 g, cmd); 848 if (cmd->waiting) { 849 break; 850 } 851 QTAILQ_REMOVE(&g->cmdq, cmd, next); 852 if (virtio_gpu_stats_enabled(g->conf)) { 853 g->stats.requests++; 854 } 855 856 if (!cmd->finished) { 857 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 858 g->inflight++; 859 if (virtio_gpu_stats_enabled(g->conf)) { 860 if (g->stats.max_inflight < g->inflight) { 861 g->stats.max_inflight = g->inflight; 862 } 863 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 864 } 865 } else { 866 g_free(cmd); 867 } 868 } 869 } 870 871 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 872 { 873 VirtIOGPU *g = VIRTIO_GPU(vdev); 874 struct virtio_gpu_ctrl_command *cmd; 875 876 if (!virtio_queue_ready(vq)) { 877 return; 878 } 879 880 #ifdef CONFIG_VIRGL 881 if (!g->renderer_inited && g->use_virgl_renderer) { 882 virtio_gpu_virgl_init(g); 883 g->renderer_inited = true; 884 } 885 #endif 886 887 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 888 while (cmd) { 889 cmd->vq = vq; 890 cmd->error = 0; 891 cmd->finished = false; 892 cmd->waiting = false; 893 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 894 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 895 } 896 897 virtio_gpu_process_cmdq(g); 898 899 #ifdef CONFIG_VIRGL 900 if (g->use_virgl_renderer) { 901 virtio_gpu_virgl_fence_poll(g); 902 } 903 #endif 904 } 905 906 static void virtio_gpu_ctrl_bh(void *opaque) 907 { 908 VirtIOGPU *g = opaque; 909 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 910 } 911 912 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 913 { 914 VirtIOGPU *g = VIRTIO_GPU(vdev); 915 VirtQueueElement *elem; 916 size_t s; 917 struct virtio_gpu_update_cursor cursor_info; 918 919 if (!virtio_queue_ready(vq)) { 920 return; 921 } 922 for (;;) { 923 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 924 if (!elem) { 925 break; 926 } 927 928 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 929 &cursor_info, sizeof(cursor_info)); 930 if (s != sizeof(cursor_info)) { 931 qemu_log_mask(LOG_GUEST_ERROR, 932 "%s: cursor size incorrect %zu vs %zu\n", 933 __func__, s, sizeof(cursor_info)); 934 } else { 935 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 936 update_cursor(g, &cursor_info); 937 } 938 virtqueue_push(vq, elem, 0); 939 virtio_notify(vdev, vq); 940 g_free(elem); 941 } 942 } 943 944 static void virtio_gpu_cursor_bh(void *opaque) 945 { 946 VirtIOGPU *g = opaque; 947 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 948 } 949 950 static void virtio_gpu_invalidate_display(void *opaque) 951 { 952 } 953 954 static void virtio_gpu_update_display(void *opaque) 955 { 956 } 957 958 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 959 { 960 } 961 962 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 963 { 964 VirtIOGPU *g = opaque; 965 966 if (idx >= g->conf.max_outputs) { 967 return -1; 968 } 969 970 g->req_state[idx].x = info->xoff; 971 g->req_state[idx].y = info->yoff; 972 g->req_state[idx].width = info->width; 973 g->req_state[idx].height = info->height; 974 975 if (info->width && info->height) { 976 g->enabled_output_bitmask |= (1 << idx); 977 } else { 978 g->enabled_output_bitmask &= ~(1 << idx); 979 } 980 981 /* send event to guest */ 982 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 983 return 0; 984 } 985 986 const GraphicHwOps virtio_gpu_ops = { 987 .invalidate = virtio_gpu_invalidate_display, 988 .gfx_update = virtio_gpu_update_display, 989 .text_update = virtio_gpu_text_update, 990 .ui_info = virtio_gpu_ui_info, 991 #ifdef CONFIG_VIRGL 992 .gl_block = virtio_gpu_gl_block, 993 #endif 994 }; 995 996 static const VMStateDescription vmstate_virtio_gpu_scanout = { 997 .name = "virtio-gpu-one-scanout", 998 .version_id = 1, 999 .fields = (VMStateField[]) { 1000 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1001 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1002 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1003 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1004 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1005 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1006 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1007 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1008 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1009 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1010 VMSTATE_END_OF_LIST() 1011 }, 1012 }; 1013 1014 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1015 .name = "virtio-gpu-scanouts", 1016 .version_id = 1, 1017 .fields = (VMStateField[]) { 1018 VMSTATE_INT32(enable, struct VirtIOGPU), 1019 VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL), 1020 VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU, 1021 conf.max_outputs, 1, 1022 vmstate_virtio_gpu_scanout, 1023 struct virtio_gpu_scanout), 1024 VMSTATE_END_OF_LIST() 1025 }, 1026 }; 1027 1028 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1029 VMStateField *field, QJSON *vmdesc) 1030 { 1031 VirtIOGPU *g = opaque; 1032 struct virtio_gpu_simple_resource *res; 1033 int i; 1034 1035 /* in 2d mode we should never find unprocessed commands here */ 1036 assert(QTAILQ_EMPTY(&g->cmdq)); 1037 1038 QTAILQ_FOREACH(res, &g->reslist, next) { 1039 qemu_put_be32(f, res->resource_id); 1040 qemu_put_be32(f, res->width); 1041 qemu_put_be32(f, res->height); 1042 qemu_put_be32(f, res->format); 1043 qemu_put_be32(f, res->iov_cnt); 1044 for (i = 0; i < res->iov_cnt; i++) { 1045 qemu_put_be64(f, res->addrs[i]); 1046 qemu_put_be32(f, res->iov[i].iov_len); 1047 } 1048 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1049 pixman_image_get_stride(res->image) * res->height); 1050 } 1051 qemu_put_be32(f, 0); /* end of list */ 1052 1053 vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1054 1055 return 0; 1056 } 1057 1058 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1059 VMStateField *field) 1060 { 1061 VirtIOGPU *g = opaque; 1062 struct virtio_gpu_simple_resource *res; 1063 struct virtio_gpu_scanout *scanout; 1064 uint32_t resource_id, pformat; 1065 int i; 1066 1067 g->hostmem = 0; 1068 1069 resource_id = qemu_get_be32(f); 1070 while (resource_id != 0) { 1071 res = g_new0(struct virtio_gpu_simple_resource, 1); 1072 res->resource_id = resource_id; 1073 res->width = qemu_get_be32(f); 1074 res->height = qemu_get_be32(f); 1075 res->format = qemu_get_be32(f); 1076 res->iov_cnt = qemu_get_be32(f); 1077 1078 /* allocate */ 1079 pformat = get_pixman_format(res->format); 1080 if (!pformat) { 1081 g_free(res); 1082 return -EINVAL; 1083 } 1084 res->image = pixman_image_create_bits(pformat, 1085 res->width, res->height, 1086 NULL, 0); 1087 if (!res->image) { 1088 g_free(res); 1089 return -EINVAL; 1090 } 1091 1092 res->hostmem = PIXMAN_FORMAT_BPP(pformat) * res->width * res->height; 1093 1094 res->addrs = g_new(uint64_t, res->iov_cnt); 1095 res->iov = g_new(struct iovec, res->iov_cnt); 1096 1097 /* read data */ 1098 for (i = 0; i < res->iov_cnt; i++) { 1099 res->addrs[i] = qemu_get_be64(f); 1100 res->iov[i].iov_len = qemu_get_be32(f); 1101 } 1102 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1103 pixman_image_get_stride(res->image) * res->height); 1104 1105 /* restore mapping */ 1106 for (i = 0; i < res->iov_cnt; i++) { 1107 hwaddr len = res->iov[i].iov_len; 1108 res->iov[i].iov_base = 1109 cpu_physical_memory_map(res->addrs[i], &len, 1); 1110 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1111 /* Clean up the half-a-mapping we just created... */ 1112 if (res->iov[i].iov_base) { 1113 cpu_physical_memory_unmap(res->iov[i].iov_base, 1114 len, 0, 0); 1115 } 1116 /* ...and the mappings for previous loop iterations */ 1117 res->iov_cnt = i; 1118 virtio_gpu_cleanup_mapping(res); 1119 pixman_image_unref(res->image); 1120 g_free(res); 1121 return -EINVAL; 1122 } 1123 } 1124 1125 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1126 g->hostmem += res->hostmem; 1127 1128 resource_id = qemu_get_be32(f); 1129 } 1130 1131 /* load & apply scanout state */ 1132 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1133 for (i = 0; i < g->conf.max_outputs; i++) { 1134 scanout = &g->scanout[i]; 1135 if (!scanout->resource_id) { 1136 continue; 1137 } 1138 res = virtio_gpu_find_resource(g, scanout->resource_id); 1139 if (!res) { 1140 return -EINVAL; 1141 } 1142 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1143 if (!scanout->ds) { 1144 return -EINVAL; 1145 } 1146 1147 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1148 dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height); 1149 if (scanout->cursor.resource_id) { 1150 update_cursor(g, &scanout->cursor); 1151 } 1152 res->scanout_bitmask |= (1 << i); 1153 } 1154 1155 return 0; 1156 } 1157 1158 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1159 { 1160 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1161 VirtIOGPU *g = VIRTIO_GPU(qdev); 1162 bool have_virgl; 1163 Error *local_err = NULL; 1164 int i; 1165 1166 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { 1167 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS); 1168 return; 1169 } 1170 1171 g->use_virgl_renderer = false; 1172 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) 1173 have_virgl = false; 1174 #else 1175 have_virgl = display_opengl; 1176 #endif 1177 if (!have_virgl) { 1178 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); 1179 } 1180 1181 if (virtio_gpu_virgl_enabled(g->conf)) { 1182 error_setg(&g->migration_blocker, "virgl is not yet migratable"); 1183 migrate_add_blocker(g->migration_blocker, &local_err); 1184 if (local_err) { 1185 error_propagate(errp, local_err); 1186 error_free(g->migration_blocker); 1187 return; 1188 } 1189 } 1190 1191 g->config_size = sizeof(struct virtio_gpu_config); 1192 g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs); 1193 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 1194 g->config_size); 1195 1196 g->req_state[0].width = g->conf.xres; 1197 g->req_state[0].height = g->conf.yres; 1198 1199 if (virtio_gpu_virgl_enabled(g->conf)) { 1200 /* use larger control queue in 3d mode */ 1201 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); 1202 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 1203 g->virtio_config.num_capsets = 1; 1204 } else { 1205 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 1206 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 1207 } 1208 1209 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 1210 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 1211 QTAILQ_INIT(&g->reslist); 1212 QTAILQ_INIT(&g->cmdq); 1213 QTAILQ_INIT(&g->fenceq); 1214 1215 g->enabled_output_bitmask = 1; 1216 g->qdev = qdev; 1217 1218 for (i = 0; i < g->conf.max_outputs; i++) { 1219 g->scanout[i].con = 1220 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 1221 if (i > 0) { 1222 dpy_gfx_replace_surface(g->scanout[i].con, NULL); 1223 } 1224 } 1225 } 1226 1227 static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp) 1228 { 1229 VirtIOGPU *g = VIRTIO_GPU(qdev); 1230 if (g->migration_blocker) { 1231 migrate_del_blocker(g->migration_blocker); 1232 error_free(g->migration_blocker); 1233 } 1234 } 1235 1236 static void virtio_gpu_instance_init(Object *obj) 1237 { 1238 } 1239 1240 static void virtio_gpu_reset(VirtIODevice *vdev) 1241 { 1242 VirtIOGPU *g = VIRTIO_GPU(vdev); 1243 struct virtio_gpu_simple_resource *res, *tmp; 1244 int i; 1245 1246 g->enable = 0; 1247 1248 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1249 virtio_gpu_resource_destroy(g, res); 1250 } 1251 for (i = 0; i < g->conf.max_outputs; i++) { 1252 g->scanout[i].resource_id = 0; 1253 g->scanout[i].width = 0; 1254 g->scanout[i].height = 0; 1255 g->scanout[i].x = 0; 1256 g->scanout[i].y = 0; 1257 g->scanout[i].ds = NULL; 1258 } 1259 1260 #ifdef CONFIG_VIRGL 1261 if (g->use_virgl_renderer) { 1262 virtio_gpu_virgl_reset(g); 1263 g->use_virgl_renderer = 0; 1264 } 1265 #endif 1266 } 1267 1268 /* 1269 * For historical reasons virtio_gpu does not adhere to virtio migration 1270 * scheme as described in doc/virtio-migration.txt, in a sense that no 1271 * save/load callback are provided to the core. Instead the device data 1272 * is saved/loaded after the core data. 1273 * 1274 * Because of this we need a special vmsd. 1275 */ 1276 static const VMStateDescription vmstate_virtio_gpu = { 1277 .name = "virtio-gpu", 1278 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1279 .version_id = VIRTIO_GPU_VM_VERSION, 1280 .fields = (VMStateField[]) { 1281 VMSTATE_VIRTIO_DEVICE /* core */, 1282 { 1283 .name = "virtio-gpu", 1284 .info = &(const VMStateInfo) { 1285 .name = "virtio-gpu", 1286 .get = virtio_gpu_load, 1287 .put = virtio_gpu_save, 1288 }, 1289 .flags = VMS_SINGLE, 1290 } /* device */, 1291 VMSTATE_END_OF_LIST() 1292 }, 1293 }; 1294 1295 static Property virtio_gpu_properties[] = { 1296 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), 1297 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 1298 256 * 1024 * 1024), 1299 #ifdef CONFIG_VIRGL 1300 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, 1301 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), 1302 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, 1303 VIRTIO_GPU_FLAG_STATS_ENABLED, false), 1304 #endif 1305 DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024), 1306 DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768), 1307 DEFINE_PROP_END_OF_LIST(), 1308 }; 1309 1310 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1311 { 1312 DeviceClass *dc = DEVICE_CLASS(klass); 1313 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1314 1315 vdc->realize = virtio_gpu_device_realize; 1316 vdc->unrealize = virtio_gpu_device_unrealize; 1317 vdc->get_config = virtio_gpu_get_config; 1318 vdc->set_config = virtio_gpu_set_config; 1319 vdc->get_features = virtio_gpu_get_features; 1320 vdc->set_features = virtio_gpu_set_features; 1321 1322 vdc->reset = virtio_gpu_reset; 1323 1324 set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); 1325 dc->props = virtio_gpu_properties; 1326 dc->vmsd = &vmstate_virtio_gpu; 1327 dc->hotpluggable = false; 1328 } 1329 1330 static const TypeInfo virtio_gpu_info = { 1331 .name = TYPE_VIRTIO_GPU, 1332 .parent = TYPE_VIRTIO_DEVICE, 1333 .instance_size = sizeof(VirtIOGPU), 1334 .instance_init = virtio_gpu_instance_init, 1335 .class_init = virtio_gpu_class_init, 1336 }; 1337 1338 static void virtio_register_types(void) 1339 { 1340 type_register_static(&virtio_gpu_info); 1341 } 1342 1343 type_init(virtio_register_types) 1344 1345 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 1346 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 1347 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 1348 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 1349 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 1350 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 1351 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 1352 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 1353 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 1354 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 1355 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 1356 1357 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); 1358 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); 1359 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); 1360 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); 1361 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); 1362 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); 1363 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); 1364 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); 1365 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); 1366 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); 1367