1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu-common.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "hw/virtio/virtio.h" 20 #include "hw/virtio/virtio-gpu.h" 21 #include "hw/virtio/virtio-bus.h" 22 #include "migration/blocker.h" 23 #include "qemu/log.h" 24 #include "qapi/error.h" 25 26 #define VIRTIO_GPU_VM_VERSION 1 27 28 static struct virtio_gpu_simple_resource* 29 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 30 31 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res); 32 33 static void 34 virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr) 35 { 36 le32_to_cpus(&hdr->type); 37 le32_to_cpus(&hdr->flags); 38 le64_to_cpus(&hdr->fence_id); 39 le32_to_cpus(&hdr->ctx_id); 40 le32_to_cpus(&hdr->padding); 41 } 42 43 static void virtio_gpu_bswap_32(void *ptr, 44 size_t size) 45 { 46 #ifdef HOST_WORDS_BIGENDIAN 47 48 size_t i; 49 struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr; 50 51 virtio_gpu_ctrl_hdr_bswap(hdr); 52 53 i = sizeof(struct virtio_gpu_ctrl_hdr); 54 while (i < size) { 55 le32_to_cpus((uint32_t *)(ptr + i)); 56 i = i + sizeof(uint32_t); 57 } 58 59 #endif 60 } 61 62 static void 63 virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d) 64 { 65 virtio_gpu_ctrl_hdr_bswap(&t2d->hdr); 66 le32_to_cpus(&t2d->r.x); 67 le32_to_cpus(&t2d->r.y); 68 le32_to_cpus(&t2d->r.width); 69 le32_to_cpus(&t2d->r.height); 70 le64_to_cpus(&t2d->offset); 71 le32_to_cpus(&t2d->resource_id); 72 le32_to_cpus(&t2d->padding); 73 } 74 75 #ifdef CONFIG_VIRGL 76 #include <virglrenderer.h> 77 #define VIRGL(_g, _virgl, _simple, ...) \ 78 do { \ 79 if (_g->use_virgl_renderer) { \ 80 _virgl(__VA_ARGS__); \ 81 } else { \ 82 _simple(__VA_ARGS__); \ 83 } \ 84 } while (0) 85 #else 86 #define VIRGL(_g, _virgl, _simple, ...) \ 87 do { \ 88 _simple(__VA_ARGS__); \ 89 } while (0) 90 #endif 91 92 static void update_cursor_data_simple(VirtIOGPU *g, 93 struct virtio_gpu_scanout *s, 94 uint32_t resource_id) 95 { 96 struct virtio_gpu_simple_resource *res; 97 uint32_t pixels; 98 99 res = virtio_gpu_find_resource(g, resource_id); 100 if (!res) { 101 return; 102 } 103 104 if (pixman_image_get_width(res->image) != s->current_cursor->width || 105 pixman_image_get_height(res->image) != s->current_cursor->height) { 106 return; 107 } 108 109 pixels = s->current_cursor->width * s->current_cursor->height; 110 memcpy(s->current_cursor->data, 111 pixman_image_get_data(res->image), 112 pixels * sizeof(uint32_t)); 113 } 114 115 #ifdef CONFIG_VIRGL 116 117 static void update_cursor_data_virgl(VirtIOGPU *g, 118 struct virtio_gpu_scanout *s, 119 uint32_t resource_id) 120 { 121 uint32_t width, height; 122 uint32_t pixels, *data; 123 124 data = virgl_renderer_get_cursor_data(resource_id, &width, &height); 125 if (!data) { 126 return; 127 } 128 129 if (width != s->current_cursor->width || 130 height != s->current_cursor->height) { 131 free(data); 132 return; 133 } 134 135 pixels = s->current_cursor->width * s->current_cursor->height; 136 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); 137 free(data); 138 } 139 140 #endif 141 142 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 143 { 144 struct virtio_gpu_scanout *s; 145 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 146 147 if (cursor->pos.scanout_id >= g->conf.max_outputs) { 148 return; 149 } 150 s = &g->scanout[cursor->pos.scanout_id]; 151 152 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 153 cursor->pos.x, 154 cursor->pos.y, 155 move ? "move" : "update", 156 cursor->resource_id); 157 158 if (!move) { 159 if (!s->current_cursor) { 160 s->current_cursor = cursor_alloc(64, 64); 161 } 162 163 s->current_cursor->hot_x = cursor->hot_x; 164 s->current_cursor->hot_y = cursor->hot_y; 165 166 if (cursor->resource_id > 0) { 167 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, 168 g, s, cursor->resource_id); 169 } 170 dpy_cursor_define(s->con, s->current_cursor); 171 172 s->cursor = *cursor; 173 } else { 174 s->cursor.pos.x = cursor->pos.x; 175 s->cursor.pos.y = cursor->pos.y; 176 } 177 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 178 cursor->resource_id ? 1 : 0); 179 } 180 181 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 182 { 183 VirtIOGPU *g = VIRTIO_GPU(vdev); 184 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 185 } 186 187 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 188 { 189 VirtIOGPU *g = VIRTIO_GPU(vdev); 190 struct virtio_gpu_config vgconfig; 191 192 memcpy(&vgconfig, config, sizeof(g->virtio_config)); 193 194 if (vgconfig.events_clear) { 195 g->virtio_config.events_read &= ~vgconfig.events_clear; 196 } 197 } 198 199 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, 200 Error **errp) 201 { 202 VirtIOGPU *g = VIRTIO_GPU(vdev); 203 204 if (virtio_gpu_virgl_enabled(g->conf)) { 205 features |= (1 << VIRTIO_GPU_F_VIRGL); 206 } 207 return features; 208 } 209 210 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) 211 { 212 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); 213 VirtIOGPU *g = VIRTIO_GPU(vdev); 214 215 g->use_virgl_renderer = ((features & virgl) == virgl); 216 trace_virtio_gpu_features(g->use_virgl_renderer); 217 } 218 219 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 220 { 221 g->virtio_config.events_read |= event_type; 222 virtio_notify_config(&g->parent_obj); 223 } 224 225 static struct virtio_gpu_simple_resource * 226 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 227 { 228 struct virtio_gpu_simple_resource *res; 229 230 QTAILQ_FOREACH(res, &g->reslist, next) { 231 if (res->resource_id == resource_id) { 232 return res; 233 } 234 } 235 return NULL; 236 } 237 238 void virtio_gpu_ctrl_response(VirtIOGPU *g, 239 struct virtio_gpu_ctrl_command *cmd, 240 struct virtio_gpu_ctrl_hdr *resp, 241 size_t resp_len) 242 { 243 size_t s; 244 245 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 246 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 247 resp->fence_id = cmd->cmd_hdr.fence_id; 248 resp->ctx_id = cmd->cmd_hdr.ctx_id; 249 } 250 virtio_gpu_ctrl_hdr_bswap(resp); 251 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 252 if (s != resp_len) { 253 qemu_log_mask(LOG_GUEST_ERROR, 254 "%s: response size incorrect %zu vs %zu\n", 255 __func__, s, resp_len); 256 } 257 virtqueue_push(cmd->vq, &cmd->elem, s); 258 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 259 cmd->finished = true; 260 } 261 262 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 263 struct virtio_gpu_ctrl_command *cmd, 264 enum virtio_gpu_ctrl_type type) 265 { 266 struct virtio_gpu_ctrl_hdr resp; 267 268 memset(&resp, 0, sizeof(resp)); 269 resp.type = type; 270 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 271 } 272 273 static void 274 virtio_gpu_fill_display_info(VirtIOGPU *g, 275 struct virtio_gpu_resp_display_info *dpy_info) 276 { 277 int i; 278 279 for (i = 0; i < g->conf.max_outputs; i++) { 280 if (g->enabled_output_bitmask & (1 << i)) { 281 dpy_info->pmodes[i].enabled = 1; 282 dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width); 283 dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height); 284 } 285 } 286 } 287 288 void virtio_gpu_get_display_info(VirtIOGPU *g, 289 struct virtio_gpu_ctrl_command *cmd) 290 { 291 struct virtio_gpu_resp_display_info display_info; 292 293 trace_virtio_gpu_cmd_get_display_info(); 294 memset(&display_info, 0, sizeof(display_info)); 295 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 296 virtio_gpu_fill_display_info(g, &display_info); 297 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 298 sizeof(display_info)); 299 } 300 301 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 302 { 303 switch (virtio_gpu_format) { 304 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 305 return PIXMAN_BE_b8g8r8x8; 306 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 307 return PIXMAN_BE_b8g8r8a8; 308 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 309 return PIXMAN_BE_x8r8g8b8; 310 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 311 return PIXMAN_BE_a8r8g8b8; 312 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 313 return PIXMAN_BE_r8g8b8x8; 314 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 315 return PIXMAN_BE_r8g8b8a8; 316 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 317 return PIXMAN_BE_x8b8g8r8; 318 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 319 return PIXMAN_BE_a8b8g8r8; 320 default: 321 return 0; 322 } 323 } 324 325 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 326 struct virtio_gpu_ctrl_command *cmd) 327 { 328 pixman_format_code_t pformat; 329 struct virtio_gpu_simple_resource *res; 330 struct virtio_gpu_resource_create_2d c2d; 331 332 VIRTIO_GPU_FILL_CMD(c2d); 333 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 334 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 335 c2d.width, c2d.height); 336 337 if (c2d.resource_id == 0) { 338 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 339 __func__); 340 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 341 return; 342 } 343 344 res = virtio_gpu_find_resource(g, c2d.resource_id); 345 if (res) { 346 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 347 __func__, c2d.resource_id); 348 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 349 return; 350 } 351 352 res = g_new0(struct virtio_gpu_simple_resource, 1); 353 354 res->width = c2d.width; 355 res->height = c2d.height; 356 res->format = c2d.format; 357 res->resource_id = c2d.resource_id; 358 359 pformat = get_pixman_format(c2d.format); 360 if (!pformat) { 361 qemu_log_mask(LOG_GUEST_ERROR, 362 "%s: host couldn't handle guest format %d\n", 363 __func__, c2d.format); 364 g_free(res); 365 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 366 return; 367 } 368 369 res->hostmem = PIXMAN_FORMAT_BPP(pformat) * c2d.width * c2d.height; 370 if (res->hostmem + g->hostmem < g->conf.max_hostmem) { 371 res->image = pixman_image_create_bits(pformat, 372 c2d.width, 373 c2d.height, 374 NULL, 0); 375 } 376 377 if (!res->image) { 378 qemu_log_mask(LOG_GUEST_ERROR, 379 "%s: resource creation failed %d %d %d\n", 380 __func__, c2d.resource_id, c2d.width, c2d.height); 381 g_free(res); 382 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 383 return; 384 } 385 386 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 387 g->hostmem += res->hostmem; 388 } 389 390 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 391 struct virtio_gpu_simple_resource *res) 392 { 393 pixman_image_unref(res->image); 394 virtio_gpu_cleanup_mapping(res); 395 QTAILQ_REMOVE(&g->reslist, res, next); 396 g->hostmem -= res->hostmem; 397 g_free(res); 398 } 399 400 static void virtio_gpu_resource_unref(VirtIOGPU *g, 401 struct virtio_gpu_ctrl_command *cmd) 402 { 403 struct virtio_gpu_simple_resource *res; 404 struct virtio_gpu_resource_unref unref; 405 406 VIRTIO_GPU_FILL_CMD(unref); 407 virtio_gpu_bswap_32(&unref, sizeof(unref)); 408 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 409 410 res = virtio_gpu_find_resource(g, unref.resource_id); 411 if (!res) { 412 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 413 __func__, unref.resource_id); 414 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 415 return; 416 } 417 virtio_gpu_resource_destroy(g, res); 418 } 419 420 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 421 struct virtio_gpu_ctrl_command *cmd) 422 { 423 struct virtio_gpu_simple_resource *res; 424 int h; 425 uint32_t src_offset, dst_offset, stride; 426 int bpp; 427 pixman_format_code_t format; 428 struct virtio_gpu_transfer_to_host_2d t2d; 429 430 VIRTIO_GPU_FILL_CMD(t2d); 431 virtio_gpu_t2d_bswap(&t2d); 432 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 433 434 res = virtio_gpu_find_resource(g, t2d.resource_id); 435 if (!res || !res->iov) { 436 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 437 __func__, t2d.resource_id); 438 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 439 return; 440 } 441 442 if (t2d.r.x > res->width || 443 t2d.r.y > res->height || 444 t2d.r.width > res->width || 445 t2d.r.height > res->height || 446 t2d.r.x + t2d.r.width > res->width || 447 t2d.r.y + t2d.r.height > res->height) { 448 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 449 " bounds for resource %d: %d %d %d %d vs %d %d\n", 450 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 451 t2d.r.width, t2d.r.height, res->width, res->height); 452 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 453 return; 454 } 455 456 format = pixman_image_get_format(res->image); 457 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 458 stride = pixman_image_get_stride(res->image); 459 460 if (t2d.offset || t2d.r.x || t2d.r.y || 461 t2d.r.width != pixman_image_get_width(res->image)) { 462 void *img_data = pixman_image_get_data(res->image); 463 for (h = 0; h < t2d.r.height; h++) { 464 src_offset = t2d.offset + stride * h; 465 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 466 467 iov_to_buf(res->iov, res->iov_cnt, src_offset, 468 (uint8_t *)img_data 469 + dst_offset, t2d.r.width * bpp); 470 } 471 } else { 472 iov_to_buf(res->iov, res->iov_cnt, 0, 473 pixman_image_get_data(res->image), 474 pixman_image_get_stride(res->image) 475 * pixman_image_get_height(res->image)); 476 } 477 } 478 479 static void virtio_gpu_resource_flush(VirtIOGPU *g, 480 struct virtio_gpu_ctrl_command *cmd) 481 { 482 struct virtio_gpu_simple_resource *res; 483 struct virtio_gpu_resource_flush rf; 484 pixman_region16_t flush_region; 485 int i; 486 487 VIRTIO_GPU_FILL_CMD(rf); 488 virtio_gpu_bswap_32(&rf, sizeof(rf)); 489 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 490 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 491 492 res = virtio_gpu_find_resource(g, rf.resource_id); 493 if (!res) { 494 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 495 __func__, rf.resource_id); 496 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 497 return; 498 } 499 500 if (rf.r.x > res->width || 501 rf.r.y > res->height || 502 rf.r.width > res->width || 503 rf.r.height > res->height || 504 rf.r.x + rf.r.width > res->width || 505 rf.r.y + rf.r.height > res->height) { 506 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 507 " bounds for resource %d: %d %d %d %d vs %d %d\n", 508 __func__, rf.resource_id, rf.r.x, rf.r.y, 509 rf.r.width, rf.r.height, res->width, res->height); 510 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 511 return; 512 } 513 514 pixman_region_init_rect(&flush_region, 515 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 516 for (i = 0; i < g->conf.max_outputs; i++) { 517 struct virtio_gpu_scanout *scanout; 518 pixman_region16_t region, finalregion; 519 pixman_box16_t *extents; 520 521 if (!(res->scanout_bitmask & (1 << i))) { 522 continue; 523 } 524 scanout = &g->scanout[i]; 525 526 pixman_region_init(&finalregion); 527 pixman_region_init_rect(®ion, scanout->x, scanout->y, 528 scanout->width, scanout->height); 529 530 pixman_region_intersect(&finalregion, &flush_region, ®ion); 531 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 532 extents = pixman_region_extents(&finalregion); 533 /* work out the area we need to update for each console */ 534 dpy_gfx_update(g->scanout[i].con, 535 extents->x1, extents->y1, 536 extents->x2 - extents->x1, 537 extents->y2 - extents->y1); 538 539 pixman_region_fini(®ion); 540 pixman_region_fini(&finalregion); 541 } 542 pixman_region_fini(&flush_region); 543 } 544 545 static void virtio_unref_resource(pixman_image_t *image, void *data) 546 { 547 pixman_image_unref(data); 548 } 549 550 static void virtio_gpu_set_scanout(VirtIOGPU *g, 551 struct virtio_gpu_ctrl_command *cmd) 552 { 553 struct virtio_gpu_simple_resource *res; 554 struct virtio_gpu_scanout *scanout; 555 pixman_format_code_t format; 556 uint32_t offset; 557 int bpp; 558 struct virtio_gpu_set_scanout ss; 559 560 VIRTIO_GPU_FILL_CMD(ss); 561 virtio_gpu_bswap_32(&ss, sizeof(ss)); 562 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 563 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 564 565 if (ss.scanout_id >= g->conf.max_outputs) { 566 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 567 __func__, ss.scanout_id); 568 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 569 return; 570 } 571 572 g->enable = 1; 573 if (ss.resource_id == 0) { 574 scanout = &g->scanout[ss.scanout_id]; 575 if (scanout->resource_id) { 576 res = virtio_gpu_find_resource(g, scanout->resource_id); 577 if (res) { 578 res->scanout_bitmask &= ~(1 << ss.scanout_id); 579 } 580 } 581 if (ss.scanout_id == 0) { 582 qemu_log_mask(LOG_GUEST_ERROR, 583 "%s: illegal scanout id specified %d", 584 __func__, ss.scanout_id); 585 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 586 return; 587 } 588 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); 589 scanout->ds = NULL; 590 scanout->width = 0; 591 scanout->height = 0; 592 return; 593 } 594 595 /* create a surface for this scanout */ 596 res = virtio_gpu_find_resource(g, ss.resource_id); 597 if (!res) { 598 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 599 __func__, ss.resource_id); 600 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 601 return; 602 } 603 604 if (ss.r.x > res->width || 605 ss.r.y > res->height || 606 ss.r.width > res->width || 607 ss.r.height > res->height || 608 ss.r.x + ss.r.width > res->width || 609 ss.r.y + ss.r.height > res->height) { 610 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 611 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 612 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 613 ss.r.width, ss.r.height, res->width, res->height); 614 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 615 return; 616 } 617 618 scanout = &g->scanout[ss.scanout_id]; 619 620 format = pixman_image_get_format(res->image); 621 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 622 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 623 if (!scanout->ds || surface_data(scanout->ds) 624 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 625 scanout->width != ss.r.width || 626 scanout->height != ss.r.height) { 627 pixman_image_t *rect; 628 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset; 629 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr, 630 pixman_image_get_stride(res->image)); 631 pixman_image_ref(res->image); 632 pixman_image_set_destroy_function(rect, virtio_unref_resource, 633 res->image); 634 /* realloc the surface ptr */ 635 scanout->ds = qemu_create_displaysurface_pixman(rect); 636 if (!scanout->ds) { 637 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 638 return; 639 } 640 pixman_image_unref(rect); 641 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 642 } 643 644 res->scanout_bitmask |= (1 << ss.scanout_id); 645 scanout->resource_id = ss.resource_id; 646 scanout->x = ss.r.x; 647 scanout->y = ss.r.y; 648 scanout->width = ss.r.width; 649 scanout->height = ss.r.height; 650 } 651 652 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 653 struct virtio_gpu_ctrl_command *cmd, 654 uint64_t **addr, struct iovec **iov) 655 { 656 struct virtio_gpu_mem_entry *ents; 657 size_t esize, s; 658 int i; 659 660 if (ab->nr_entries > 16384) { 661 qemu_log_mask(LOG_GUEST_ERROR, 662 "%s: nr_entries is too big (%d > 16384)\n", 663 __func__, ab->nr_entries); 664 return -1; 665 } 666 667 esize = sizeof(*ents) * ab->nr_entries; 668 ents = g_malloc(esize); 669 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 670 sizeof(*ab), ents, esize); 671 if (s != esize) { 672 qemu_log_mask(LOG_GUEST_ERROR, 673 "%s: command data size incorrect %zu vs %zu\n", 674 __func__, s, esize); 675 g_free(ents); 676 return -1; 677 } 678 679 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 680 if (addr) { 681 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries); 682 } 683 for (i = 0; i < ab->nr_entries; i++) { 684 uint64_t a = le64_to_cpu(ents[i].addr); 685 uint32_t l = le32_to_cpu(ents[i].length); 686 hwaddr len = l; 687 (*iov)[i].iov_len = l; 688 (*iov)[i].iov_base = cpu_physical_memory_map(a, &len, 1); 689 if (addr) { 690 (*addr)[i] = a; 691 } 692 if (!(*iov)[i].iov_base || len != l) { 693 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 694 " resource %d element %d\n", 695 __func__, ab->resource_id, i); 696 virtio_gpu_cleanup_mapping_iov(*iov, i); 697 g_free(ents); 698 *iov = NULL; 699 if (addr) { 700 g_free(*addr); 701 *addr = NULL; 702 } 703 return -1; 704 } 705 } 706 g_free(ents); 707 return 0; 708 } 709 710 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 711 { 712 int i; 713 714 for (i = 0; i < count; i++) { 715 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 716 iov[i].iov_len); 717 } 718 g_free(iov); 719 } 720 721 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 722 { 723 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 724 res->iov = NULL; 725 res->iov_cnt = 0; 726 g_free(res->addrs); 727 res->addrs = NULL; 728 } 729 730 static void 731 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 732 struct virtio_gpu_ctrl_command *cmd) 733 { 734 struct virtio_gpu_simple_resource *res; 735 struct virtio_gpu_resource_attach_backing ab; 736 int ret; 737 738 VIRTIO_GPU_FILL_CMD(ab); 739 virtio_gpu_bswap_32(&ab, sizeof(ab)); 740 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 741 742 res = virtio_gpu_find_resource(g, ab.resource_id); 743 if (!res) { 744 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 745 __func__, ab.resource_id); 746 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 747 return; 748 } 749 750 if (res->iov) { 751 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 752 return; 753 } 754 755 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov); 756 if (ret != 0) { 757 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 758 return; 759 } 760 761 res->iov_cnt = ab.nr_entries; 762 } 763 764 static void 765 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 766 struct virtio_gpu_ctrl_command *cmd) 767 { 768 struct virtio_gpu_simple_resource *res; 769 struct virtio_gpu_resource_detach_backing detach; 770 771 VIRTIO_GPU_FILL_CMD(detach); 772 virtio_gpu_bswap_32(&detach, sizeof(detach)); 773 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 774 775 res = virtio_gpu_find_resource(g, detach.resource_id); 776 if (!res || !res->iov) { 777 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 778 __func__, detach.resource_id); 779 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 780 return; 781 } 782 virtio_gpu_cleanup_mapping(res); 783 } 784 785 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 786 struct virtio_gpu_ctrl_command *cmd) 787 { 788 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 789 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 790 791 switch (cmd->cmd_hdr.type) { 792 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 793 virtio_gpu_get_display_info(g, cmd); 794 break; 795 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 796 virtio_gpu_resource_create_2d(g, cmd); 797 break; 798 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 799 virtio_gpu_resource_unref(g, cmd); 800 break; 801 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 802 virtio_gpu_resource_flush(g, cmd); 803 break; 804 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 805 virtio_gpu_transfer_to_host_2d(g, cmd); 806 break; 807 case VIRTIO_GPU_CMD_SET_SCANOUT: 808 virtio_gpu_set_scanout(g, cmd); 809 break; 810 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 811 virtio_gpu_resource_attach_backing(g, cmd); 812 break; 813 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 814 virtio_gpu_resource_detach_backing(g, cmd); 815 break; 816 default: 817 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 818 break; 819 } 820 if (!cmd->finished) { 821 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 822 VIRTIO_GPU_RESP_OK_NODATA); 823 } 824 } 825 826 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 827 { 828 VirtIOGPU *g = VIRTIO_GPU(vdev); 829 qemu_bh_schedule(g->ctrl_bh); 830 } 831 832 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 833 { 834 VirtIOGPU *g = VIRTIO_GPU(vdev); 835 qemu_bh_schedule(g->cursor_bh); 836 } 837 838 void virtio_gpu_process_cmdq(VirtIOGPU *g) 839 { 840 struct virtio_gpu_ctrl_command *cmd; 841 842 while (!QTAILQ_EMPTY(&g->cmdq)) { 843 cmd = QTAILQ_FIRST(&g->cmdq); 844 845 /* process command */ 846 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, 847 g, cmd); 848 if (cmd->waiting) { 849 break; 850 } 851 QTAILQ_REMOVE(&g->cmdq, cmd, next); 852 if (virtio_gpu_stats_enabled(g->conf)) { 853 g->stats.requests++; 854 } 855 856 if (!cmd->finished) { 857 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 858 g->inflight++; 859 if (virtio_gpu_stats_enabled(g->conf)) { 860 if (g->stats.max_inflight < g->inflight) { 861 g->stats.max_inflight = g->inflight; 862 } 863 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 864 } 865 } else { 866 g_free(cmd); 867 } 868 } 869 } 870 871 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 872 { 873 VirtIOGPU *g = VIRTIO_GPU(vdev); 874 struct virtio_gpu_ctrl_command *cmd; 875 876 if (!virtio_queue_ready(vq)) { 877 return; 878 } 879 880 #ifdef CONFIG_VIRGL 881 if (!g->renderer_inited && g->use_virgl_renderer) { 882 virtio_gpu_virgl_init(g); 883 g->renderer_inited = true; 884 } 885 #endif 886 887 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 888 while (cmd) { 889 cmd->vq = vq; 890 cmd->error = 0; 891 cmd->finished = false; 892 cmd->waiting = false; 893 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 894 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 895 } 896 897 virtio_gpu_process_cmdq(g); 898 899 #ifdef CONFIG_VIRGL 900 if (g->use_virgl_renderer) { 901 virtio_gpu_virgl_fence_poll(g); 902 } 903 #endif 904 } 905 906 static void virtio_gpu_ctrl_bh(void *opaque) 907 { 908 VirtIOGPU *g = opaque; 909 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 910 } 911 912 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 913 { 914 VirtIOGPU *g = VIRTIO_GPU(vdev); 915 VirtQueueElement *elem; 916 size_t s; 917 struct virtio_gpu_update_cursor cursor_info; 918 919 if (!virtio_queue_ready(vq)) { 920 return; 921 } 922 for (;;) { 923 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 924 if (!elem) { 925 break; 926 } 927 928 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 929 &cursor_info, sizeof(cursor_info)); 930 if (s != sizeof(cursor_info)) { 931 qemu_log_mask(LOG_GUEST_ERROR, 932 "%s: cursor size incorrect %zu vs %zu\n", 933 __func__, s, sizeof(cursor_info)); 934 } else { 935 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 936 update_cursor(g, &cursor_info); 937 } 938 virtqueue_push(vq, elem, 0); 939 virtio_notify(vdev, vq); 940 g_free(elem); 941 } 942 } 943 944 static void virtio_gpu_cursor_bh(void *opaque) 945 { 946 VirtIOGPU *g = opaque; 947 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 948 } 949 950 static void virtio_gpu_invalidate_display(void *opaque) 951 { 952 } 953 954 static void virtio_gpu_update_display(void *opaque) 955 { 956 } 957 958 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 959 { 960 } 961 962 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 963 { 964 VirtIOGPU *g = opaque; 965 966 if (idx >= g->conf.max_outputs) { 967 return -1; 968 } 969 970 g->req_state[idx].x = info->xoff; 971 g->req_state[idx].y = info->yoff; 972 g->req_state[idx].width = info->width; 973 g->req_state[idx].height = info->height; 974 975 if (info->width && info->height) { 976 g->enabled_output_bitmask |= (1 << idx); 977 } else { 978 g->enabled_output_bitmask &= ~(1 << idx); 979 } 980 981 /* send event to guest */ 982 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 983 return 0; 984 } 985 986 const GraphicHwOps virtio_gpu_ops = { 987 .invalidate = virtio_gpu_invalidate_display, 988 .gfx_update = virtio_gpu_update_display, 989 .text_update = virtio_gpu_text_update, 990 .ui_info = virtio_gpu_ui_info, 991 #ifdef CONFIG_VIRGL 992 .gl_block = virtio_gpu_gl_block, 993 #endif 994 }; 995 996 static const VMStateDescription vmstate_virtio_gpu_scanout = { 997 .name = "virtio-gpu-one-scanout", 998 .version_id = 1, 999 .fields = (VMStateField[]) { 1000 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1001 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1002 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1003 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1004 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1005 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1006 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1007 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1008 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1009 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1010 VMSTATE_END_OF_LIST() 1011 }, 1012 }; 1013 1014 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1015 .name = "virtio-gpu-scanouts", 1016 .version_id = 1, 1017 .fields = (VMStateField[]) { 1018 VMSTATE_INT32(enable, struct VirtIOGPU), 1019 VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL), 1020 VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU, 1021 conf.max_outputs, 1, 1022 vmstate_virtio_gpu_scanout, 1023 struct virtio_gpu_scanout), 1024 VMSTATE_END_OF_LIST() 1025 }, 1026 }; 1027 1028 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1029 VMStateField *field, QJSON *vmdesc) 1030 { 1031 VirtIOGPU *g = opaque; 1032 struct virtio_gpu_simple_resource *res; 1033 int i; 1034 1035 /* in 2d mode we should never find unprocessed commands here */ 1036 assert(QTAILQ_EMPTY(&g->cmdq)); 1037 1038 QTAILQ_FOREACH(res, &g->reslist, next) { 1039 qemu_put_be32(f, res->resource_id); 1040 qemu_put_be32(f, res->width); 1041 qemu_put_be32(f, res->height); 1042 qemu_put_be32(f, res->format); 1043 qemu_put_be32(f, res->iov_cnt); 1044 for (i = 0; i < res->iov_cnt; i++) { 1045 qemu_put_be64(f, res->addrs[i]); 1046 qemu_put_be32(f, res->iov[i].iov_len); 1047 } 1048 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1049 pixman_image_get_stride(res->image) * res->height); 1050 } 1051 qemu_put_be32(f, 0); /* end of list */ 1052 1053 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1054 } 1055 1056 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1057 VMStateField *field) 1058 { 1059 VirtIOGPU *g = opaque; 1060 struct virtio_gpu_simple_resource *res; 1061 struct virtio_gpu_scanout *scanout; 1062 uint32_t resource_id, pformat; 1063 int i; 1064 1065 g->hostmem = 0; 1066 1067 resource_id = qemu_get_be32(f); 1068 while (resource_id != 0) { 1069 res = g_new0(struct virtio_gpu_simple_resource, 1); 1070 res->resource_id = resource_id; 1071 res->width = qemu_get_be32(f); 1072 res->height = qemu_get_be32(f); 1073 res->format = qemu_get_be32(f); 1074 res->iov_cnt = qemu_get_be32(f); 1075 1076 /* allocate */ 1077 pformat = get_pixman_format(res->format); 1078 if (!pformat) { 1079 g_free(res); 1080 return -EINVAL; 1081 } 1082 res->image = pixman_image_create_bits(pformat, 1083 res->width, res->height, 1084 NULL, 0); 1085 if (!res->image) { 1086 g_free(res); 1087 return -EINVAL; 1088 } 1089 1090 res->hostmem = PIXMAN_FORMAT_BPP(pformat) * res->width * res->height; 1091 1092 res->addrs = g_new(uint64_t, res->iov_cnt); 1093 res->iov = g_new(struct iovec, res->iov_cnt); 1094 1095 /* read data */ 1096 for (i = 0; i < res->iov_cnt; i++) { 1097 res->addrs[i] = qemu_get_be64(f); 1098 res->iov[i].iov_len = qemu_get_be32(f); 1099 } 1100 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1101 pixman_image_get_stride(res->image) * res->height); 1102 1103 /* restore mapping */ 1104 for (i = 0; i < res->iov_cnt; i++) { 1105 hwaddr len = res->iov[i].iov_len; 1106 res->iov[i].iov_base = 1107 cpu_physical_memory_map(res->addrs[i], &len, 1); 1108 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1109 /* Clean up the half-a-mapping we just created... */ 1110 if (res->iov[i].iov_base) { 1111 cpu_physical_memory_unmap(res->iov[i].iov_base, 1112 len, 0, 0); 1113 } 1114 /* ...and the mappings for previous loop iterations */ 1115 res->iov_cnt = i; 1116 virtio_gpu_cleanup_mapping(res); 1117 pixman_image_unref(res->image); 1118 g_free(res); 1119 return -EINVAL; 1120 } 1121 } 1122 1123 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1124 g->hostmem += res->hostmem; 1125 1126 resource_id = qemu_get_be32(f); 1127 } 1128 1129 /* load & apply scanout state */ 1130 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1131 for (i = 0; i < g->conf.max_outputs; i++) { 1132 scanout = &g->scanout[i]; 1133 if (!scanout->resource_id) { 1134 continue; 1135 } 1136 res = virtio_gpu_find_resource(g, scanout->resource_id); 1137 if (!res) { 1138 return -EINVAL; 1139 } 1140 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1141 if (!scanout->ds) { 1142 return -EINVAL; 1143 } 1144 1145 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1146 dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height); 1147 if (scanout->cursor.resource_id) { 1148 update_cursor(g, &scanout->cursor); 1149 } 1150 res->scanout_bitmask |= (1 << i); 1151 } 1152 1153 return 0; 1154 } 1155 1156 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1157 { 1158 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1159 VirtIOGPU *g = VIRTIO_GPU(qdev); 1160 bool have_virgl; 1161 Error *local_err = NULL; 1162 int i; 1163 1164 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { 1165 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS); 1166 return; 1167 } 1168 1169 g->use_virgl_renderer = false; 1170 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) 1171 have_virgl = false; 1172 #else 1173 have_virgl = display_opengl; 1174 #endif 1175 if (!have_virgl) { 1176 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); 1177 } 1178 1179 if (virtio_gpu_virgl_enabled(g->conf)) { 1180 error_setg(&g->migration_blocker, "virgl is not yet migratable"); 1181 migrate_add_blocker(g->migration_blocker, &local_err); 1182 if (local_err) { 1183 error_propagate(errp, local_err); 1184 error_free(g->migration_blocker); 1185 return; 1186 } 1187 } 1188 1189 g->config_size = sizeof(struct virtio_gpu_config); 1190 g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs); 1191 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 1192 g->config_size); 1193 1194 g->req_state[0].width = g->conf.xres; 1195 g->req_state[0].height = g->conf.yres; 1196 1197 if (virtio_gpu_virgl_enabled(g->conf)) { 1198 /* use larger control queue in 3d mode */ 1199 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); 1200 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 1201 g->virtio_config.num_capsets = 1; 1202 } else { 1203 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 1204 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 1205 } 1206 1207 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 1208 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 1209 QTAILQ_INIT(&g->reslist); 1210 QTAILQ_INIT(&g->cmdq); 1211 QTAILQ_INIT(&g->fenceq); 1212 1213 g->enabled_output_bitmask = 1; 1214 g->qdev = qdev; 1215 1216 for (i = 0; i < g->conf.max_outputs; i++) { 1217 g->scanout[i].con = 1218 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 1219 if (i > 0) { 1220 dpy_gfx_replace_surface(g->scanout[i].con, NULL); 1221 } 1222 } 1223 } 1224 1225 static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp) 1226 { 1227 VirtIOGPU *g = VIRTIO_GPU(qdev); 1228 if (g->migration_blocker) { 1229 migrate_del_blocker(g->migration_blocker); 1230 error_free(g->migration_blocker); 1231 } 1232 } 1233 1234 static void virtio_gpu_instance_init(Object *obj) 1235 { 1236 } 1237 1238 static void virtio_gpu_reset(VirtIODevice *vdev) 1239 { 1240 VirtIOGPU *g = VIRTIO_GPU(vdev); 1241 struct virtio_gpu_simple_resource *res, *tmp; 1242 int i; 1243 1244 g->enable = 0; 1245 1246 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1247 virtio_gpu_resource_destroy(g, res); 1248 } 1249 for (i = 0; i < g->conf.max_outputs; i++) { 1250 g->scanout[i].resource_id = 0; 1251 g->scanout[i].width = 0; 1252 g->scanout[i].height = 0; 1253 g->scanout[i].x = 0; 1254 g->scanout[i].y = 0; 1255 g->scanout[i].ds = NULL; 1256 } 1257 1258 #ifdef CONFIG_VIRGL 1259 if (g->use_virgl_renderer) { 1260 virtio_gpu_virgl_reset(g); 1261 g->use_virgl_renderer = 0; 1262 } 1263 #endif 1264 } 1265 1266 /* 1267 * For historical reasons virtio_gpu does not adhere to virtio migration 1268 * scheme as described in doc/virtio-migration.txt, in a sense that no 1269 * save/load callback are provided to the core. Instead the device data 1270 * is saved/loaded after the core data. 1271 * 1272 * Because of this we need a special vmsd. 1273 */ 1274 static const VMStateDescription vmstate_virtio_gpu = { 1275 .name = "virtio-gpu", 1276 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1277 .version_id = VIRTIO_GPU_VM_VERSION, 1278 .fields = (VMStateField[]) { 1279 VMSTATE_VIRTIO_DEVICE /* core */, 1280 { 1281 .name = "virtio-gpu", 1282 .info = &(const VMStateInfo) { 1283 .name = "virtio-gpu", 1284 .get = virtio_gpu_load, 1285 .put = virtio_gpu_save, 1286 }, 1287 .flags = VMS_SINGLE, 1288 } /* device */, 1289 VMSTATE_END_OF_LIST() 1290 }, 1291 }; 1292 1293 static Property virtio_gpu_properties[] = { 1294 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), 1295 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 1296 256 * 1024 * 1024), 1297 #ifdef CONFIG_VIRGL 1298 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, 1299 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), 1300 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, 1301 VIRTIO_GPU_FLAG_STATS_ENABLED, false), 1302 #endif 1303 DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024), 1304 DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768), 1305 DEFINE_PROP_END_OF_LIST(), 1306 }; 1307 1308 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1309 { 1310 DeviceClass *dc = DEVICE_CLASS(klass); 1311 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1312 1313 vdc->realize = virtio_gpu_device_realize; 1314 vdc->unrealize = virtio_gpu_device_unrealize; 1315 vdc->get_config = virtio_gpu_get_config; 1316 vdc->set_config = virtio_gpu_set_config; 1317 vdc->get_features = virtio_gpu_get_features; 1318 vdc->set_features = virtio_gpu_set_features; 1319 1320 vdc->reset = virtio_gpu_reset; 1321 1322 set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); 1323 dc->props = virtio_gpu_properties; 1324 dc->vmsd = &vmstate_virtio_gpu; 1325 dc->hotpluggable = false; 1326 } 1327 1328 static const TypeInfo virtio_gpu_info = { 1329 .name = TYPE_VIRTIO_GPU, 1330 .parent = TYPE_VIRTIO_DEVICE, 1331 .instance_size = sizeof(VirtIOGPU), 1332 .instance_init = virtio_gpu_instance_init, 1333 .class_init = virtio_gpu_class_init, 1334 }; 1335 1336 static void virtio_register_types(void) 1337 { 1338 type_register_static(&virtio_gpu_info); 1339 } 1340 1341 type_init(virtio_register_types) 1342 1343 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 1344 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 1345 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 1346 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 1347 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 1348 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 1349 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 1350 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 1351 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 1352 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 1353 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 1354 1355 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); 1356 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); 1357 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); 1358 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); 1359 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); 1360 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); 1361 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); 1362 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); 1363 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); 1364 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); 1365