1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu-common.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "hw/virtio/virtio.h" 20 #include "hw/virtio/virtio-gpu.h" 21 #include "hw/virtio/virtio-bus.h" 22 #include "migration/blocker.h" 23 #include "qemu/log.h" 24 #include "qapi/error.h" 25 26 #define VIRTIO_GPU_VM_VERSION 1 27 28 static struct virtio_gpu_simple_resource* 29 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 30 31 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res); 32 33 static void 34 virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr) 35 { 36 le32_to_cpus(&hdr->type); 37 le32_to_cpus(&hdr->flags); 38 le64_to_cpus(&hdr->fence_id); 39 le32_to_cpus(&hdr->ctx_id); 40 le32_to_cpus(&hdr->padding); 41 } 42 43 static void virtio_gpu_bswap_32(void *ptr, 44 size_t size) 45 { 46 #ifdef HOST_WORDS_BIGENDIAN 47 48 size_t i; 49 struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr; 50 51 virtio_gpu_ctrl_hdr_bswap(hdr); 52 53 i = sizeof(struct virtio_gpu_ctrl_hdr); 54 while (i < size) { 55 le32_to_cpus((uint32_t *)(ptr + i)); 56 i = i + sizeof(uint32_t); 57 } 58 59 #endif 60 } 61 62 static void 63 virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d) 64 { 65 virtio_gpu_ctrl_hdr_bswap(&t2d->hdr); 66 le32_to_cpus(&t2d->r.x); 67 le32_to_cpus(&t2d->r.y); 68 le32_to_cpus(&t2d->r.width); 69 le32_to_cpus(&t2d->r.height); 70 le64_to_cpus(&t2d->offset); 71 le32_to_cpus(&t2d->resource_id); 72 le32_to_cpus(&t2d->padding); 73 } 74 75 #ifdef CONFIG_VIRGL 76 #include <virglrenderer.h> 77 #define VIRGL(_g, _virgl, _simple, ...) \ 78 do { \ 79 if (_g->use_virgl_renderer) { \ 80 _virgl(__VA_ARGS__); \ 81 } else { \ 82 _simple(__VA_ARGS__); \ 83 } \ 84 } while (0) 85 #else 86 #define VIRGL(_g, _virgl, _simple, ...) \ 87 do { \ 88 _simple(__VA_ARGS__); \ 89 } while (0) 90 #endif 91 92 static void update_cursor_data_simple(VirtIOGPU *g, 93 struct virtio_gpu_scanout *s, 94 uint32_t resource_id) 95 { 96 struct virtio_gpu_simple_resource *res; 97 uint32_t pixels; 98 99 res = virtio_gpu_find_resource(g, resource_id); 100 if (!res) { 101 return; 102 } 103 104 if (pixman_image_get_width(res->image) != s->current_cursor->width || 105 pixman_image_get_height(res->image) != s->current_cursor->height) { 106 return; 107 } 108 109 pixels = s->current_cursor->width * s->current_cursor->height; 110 memcpy(s->current_cursor->data, 111 pixman_image_get_data(res->image), 112 pixels * sizeof(uint32_t)); 113 } 114 115 #ifdef CONFIG_VIRGL 116 117 static void update_cursor_data_virgl(VirtIOGPU *g, 118 struct virtio_gpu_scanout *s, 119 uint32_t resource_id) 120 { 121 uint32_t width, height; 122 uint32_t pixels, *data; 123 124 data = virgl_renderer_get_cursor_data(resource_id, &width, &height); 125 if (!data) { 126 return; 127 } 128 129 if (width != s->current_cursor->width || 130 height != s->current_cursor->height) { 131 free(data); 132 return; 133 } 134 135 pixels = s->current_cursor->width * s->current_cursor->height; 136 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); 137 free(data); 138 } 139 140 #endif 141 142 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 143 { 144 struct virtio_gpu_scanout *s; 145 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 146 147 if (cursor->pos.scanout_id >= g->conf.max_outputs) { 148 return; 149 } 150 s = &g->scanout[cursor->pos.scanout_id]; 151 152 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 153 cursor->pos.x, 154 cursor->pos.y, 155 move ? "move" : "update", 156 cursor->resource_id); 157 158 if (!move) { 159 if (!s->current_cursor) { 160 s->current_cursor = cursor_alloc(64, 64); 161 } 162 163 s->current_cursor->hot_x = cursor->hot_x; 164 s->current_cursor->hot_y = cursor->hot_y; 165 166 if (cursor->resource_id > 0) { 167 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, 168 g, s, cursor->resource_id); 169 } 170 dpy_cursor_define(s->con, s->current_cursor); 171 172 s->cursor = *cursor; 173 } else { 174 s->cursor.pos.x = cursor->pos.x; 175 s->cursor.pos.y = cursor->pos.y; 176 } 177 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 178 cursor->resource_id ? 1 : 0); 179 } 180 181 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 182 { 183 VirtIOGPU *g = VIRTIO_GPU(vdev); 184 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 185 } 186 187 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 188 { 189 VirtIOGPU *g = VIRTIO_GPU(vdev); 190 struct virtio_gpu_config vgconfig; 191 192 memcpy(&vgconfig, config, sizeof(g->virtio_config)); 193 194 if (vgconfig.events_clear) { 195 g->virtio_config.events_read &= ~vgconfig.events_clear; 196 } 197 } 198 199 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, 200 Error **errp) 201 { 202 VirtIOGPU *g = VIRTIO_GPU(vdev); 203 204 if (virtio_gpu_virgl_enabled(g->conf)) { 205 features |= (1 << VIRTIO_GPU_F_VIRGL); 206 } 207 return features; 208 } 209 210 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) 211 { 212 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); 213 VirtIOGPU *g = VIRTIO_GPU(vdev); 214 215 g->use_virgl_renderer = ((features & virgl) == virgl); 216 trace_virtio_gpu_features(g->use_virgl_renderer); 217 } 218 219 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 220 { 221 g->virtio_config.events_read |= event_type; 222 virtio_notify_config(&g->parent_obj); 223 } 224 225 static struct virtio_gpu_simple_resource * 226 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 227 { 228 struct virtio_gpu_simple_resource *res; 229 230 QTAILQ_FOREACH(res, &g->reslist, next) { 231 if (res->resource_id == resource_id) { 232 return res; 233 } 234 } 235 return NULL; 236 } 237 238 void virtio_gpu_ctrl_response(VirtIOGPU *g, 239 struct virtio_gpu_ctrl_command *cmd, 240 struct virtio_gpu_ctrl_hdr *resp, 241 size_t resp_len) 242 { 243 size_t s; 244 245 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 246 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 247 resp->fence_id = cmd->cmd_hdr.fence_id; 248 resp->ctx_id = cmd->cmd_hdr.ctx_id; 249 } 250 virtio_gpu_ctrl_hdr_bswap(resp); 251 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 252 if (s != resp_len) { 253 qemu_log_mask(LOG_GUEST_ERROR, 254 "%s: response size incorrect %zu vs %zu\n", 255 __func__, s, resp_len); 256 } 257 virtqueue_push(cmd->vq, &cmd->elem, s); 258 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 259 cmd->finished = true; 260 } 261 262 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 263 struct virtio_gpu_ctrl_command *cmd, 264 enum virtio_gpu_ctrl_type type) 265 { 266 struct virtio_gpu_ctrl_hdr resp; 267 268 memset(&resp, 0, sizeof(resp)); 269 resp.type = type; 270 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 271 } 272 273 static void 274 virtio_gpu_fill_display_info(VirtIOGPU *g, 275 struct virtio_gpu_resp_display_info *dpy_info) 276 { 277 int i; 278 279 for (i = 0; i < g->conf.max_outputs; i++) { 280 if (g->enabled_output_bitmask & (1 << i)) { 281 dpy_info->pmodes[i].enabled = 1; 282 dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width); 283 dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height); 284 } 285 } 286 } 287 288 void virtio_gpu_get_display_info(VirtIOGPU *g, 289 struct virtio_gpu_ctrl_command *cmd) 290 { 291 struct virtio_gpu_resp_display_info display_info; 292 293 trace_virtio_gpu_cmd_get_display_info(); 294 memset(&display_info, 0, sizeof(display_info)); 295 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 296 virtio_gpu_fill_display_info(g, &display_info); 297 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 298 sizeof(display_info)); 299 } 300 301 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 302 { 303 switch (virtio_gpu_format) { 304 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 305 return PIXMAN_BE_b8g8r8x8; 306 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 307 return PIXMAN_BE_b8g8r8a8; 308 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 309 return PIXMAN_BE_x8r8g8b8; 310 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 311 return PIXMAN_BE_a8r8g8b8; 312 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 313 return PIXMAN_BE_r8g8b8x8; 314 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 315 return PIXMAN_BE_r8g8b8a8; 316 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 317 return PIXMAN_BE_x8b8g8r8; 318 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 319 return PIXMAN_BE_a8b8g8r8; 320 default: 321 return 0; 322 } 323 } 324 325 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 326 uint32_t width, uint32_t height) 327 { 328 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 329 * pixman_image_create_bits will fail in case it overflow. 330 */ 331 332 int bpp = PIXMAN_FORMAT_BPP(pformat); 333 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 334 return height * stride; 335 } 336 337 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 338 struct virtio_gpu_ctrl_command *cmd) 339 { 340 pixman_format_code_t pformat; 341 struct virtio_gpu_simple_resource *res; 342 struct virtio_gpu_resource_create_2d c2d; 343 344 VIRTIO_GPU_FILL_CMD(c2d); 345 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 346 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 347 c2d.width, c2d.height); 348 349 if (c2d.resource_id == 0) { 350 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 351 __func__); 352 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 353 return; 354 } 355 356 res = virtio_gpu_find_resource(g, c2d.resource_id); 357 if (res) { 358 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 359 __func__, c2d.resource_id); 360 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 361 return; 362 } 363 364 res = g_new0(struct virtio_gpu_simple_resource, 1); 365 366 res->width = c2d.width; 367 res->height = c2d.height; 368 res->format = c2d.format; 369 res->resource_id = c2d.resource_id; 370 371 pformat = get_pixman_format(c2d.format); 372 if (!pformat) { 373 qemu_log_mask(LOG_GUEST_ERROR, 374 "%s: host couldn't handle guest format %d\n", 375 __func__, c2d.format); 376 g_free(res); 377 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 378 return; 379 } 380 381 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 382 if (res->hostmem + g->hostmem < g->conf.max_hostmem) { 383 res->image = pixman_image_create_bits(pformat, 384 c2d.width, 385 c2d.height, 386 NULL, 0); 387 } 388 389 if (!res->image) { 390 qemu_log_mask(LOG_GUEST_ERROR, 391 "%s: resource creation failed %d %d %d\n", 392 __func__, c2d.resource_id, c2d.width, c2d.height); 393 g_free(res); 394 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 395 return; 396 } 397 398 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 399 g->hostmem += res->hostmem; 400 } 401 402 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 403 struct virtio_gpu_simple_resource *res) 404 { 405 pixman_image_unref(res->image); 406 virtio_gpu_cleanup_mapping(res); 407 QTAILQ_REMOVE(&g->reslist, res, next); 408 g->hostmem -= res->hostmem; 409 g_free(res); 410 } 411 412 static void virtio_gpu_resource_unref(VirtIOGPU *g, 413 struct virtio_gpu_ctrl_command *cmd) 414 { 415 struct virtio_gpu_simple_resource *res; 416 struct virtio_gpu_resource_unref unref; 417 418 VIRTIO_GPU_FILL_CMD(unref); 419 virtio_gpu_bswap_32(&unref, sizeof(unref)); 420 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 421 422 res = virtio_gpu_find_resource(g, unref.resource_id); 423 if (!res) { 424 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 425 __func__, unref.resource_id); 426 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 427 return; 428 } 429 virtio_gpu_resource_destroy(g, res); 430 } 431 432 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 433 struct virtio_gpu_ctrl_command *cmd) 434 { 435 struct virtio_gpu_simple_resource *res; 436 int h; 437 uint32_t src_offset, dst_offset, stride; 438 int bpp; 439 pixman_format_code_t format; 440 struct virtio_gpu_transfer_to_host_2d t2d; 441 442 VIRTIO_GPU_FILL_CMD(t2d); 443 virtio_gpu_t2d_bswap(&t2d); 444 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 445 446 res = virtio_gpu_find_resource(g, t2d.resource_id); 447 if (!res || !res->iov) { 448 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 449 __func__, t2d.resource_id); 450 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 451 return; 452 } 453 454 if (t2d.r.x > res->width || 455 t2d.r.y > res->height || 456 t2d.r.width > res->width || 457 t2d.r.height > res->height || 458 t2d.r.x + t2d.r.width > res->width || 459 t2d.r.y + t2d.r.height > res->height) { 460 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 461 " bounds for resource %d: %d %d %d %d vs %d %d\n", 462 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 463 t2d.r.width, t2d.r.height, res->width, res->height); 464 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 465 return; 466 } 467 468 format = pixman_image_get_format(res->image); 469 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 470 stride = pixman_image_get_stride(res->image); 471 472 if (t2d.offset || t2d.r.x || t2d.r.y || 473 t2d.r.width != pixman_image_get_width(res->image)) { 474 void *img_data = pixman_image_get_data(res->image); 475 for (h = 0; h < t2d.r.height; h++) { 476 src_offset = t2d.offset + stride * h; 477 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 478 479 iov_to_buf(res->iov, res->iov_cnt, src_offset, 480 (uint8_t *)img_data 481 + dst_offset, t2d.r.width * bpp); 482 } 483 } else { 484 iov_to_buf(res->iov, res->iov_cnt, 0, 485 pixman_image_get_data(res->image), 486 pixman_image_get_stride(res->image) 487 * pixman_image_get_height(res->image)); 488 } 489 } 490 491 static void virtio_gpu_resource_flush(VirtIOGPU *g, 492 struct virtio_gpu_ctrl_command *cmd) 493 { 494 struct virtio_gpu_simple_resource *res; 495 struct virtio_gpu_resource_flush rf; 496 pixman_region16_t flush_region; 497 int i; 498 499 VIRTIO_GPU_FILL_CMD(rf); 500 virtio_gpu_bswap_32(&rf, sizeof(rf)); 501 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 502 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 503 504 res = virtio_gpu_find_resource(g, rf.resource_id); 505 if (!res) { 506 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 507 __func__, rf.resource_id); 508 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 509 return; 510 } 511 512 if (rf.r.x > res->width || 513 rf.r.y > res->height || 514 rf.r.width > res->width || 515 rf.r.height > res->height || 516 rf.r.x + rf.r.width > res->width || 517 rf.r.y + rf.r.height > res->height) { 518 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 519 " bounds for resource %d: %d %d %d %d vs %d %d\n", 520 __func__, rf.resource_id, rf.r.x, rf.r.y, 521 rf.r.width, rf.r.height, res->width, res->height); 522 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 523 return; 524 } 525 526 pixman_region_init_rect(&flush_region, 527 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 528 for (i = 0; i < g->conf.max_outputs; i++) { 529 struct virtio_gpu_scanout *scanout; 530 pixman_region16_t region, finalregion; 531 pixman_box16_t *extents; 532 533 if (!(res->scanout_bitmask & (1 << i))) { 534 continue; 535 } 536 scanout = &g->scanout[i]; 537 538 pixman_region_init(&finalregion); 539 pixman_region_init_rect(®ion, scanout->x, scanout->y, 540 scanout->width, scanout->height); 541 542 pixman_region_intersect(&finalregion, &flush_region, ®ion); 543 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 544 extents = pixman_region_extents(&finalregion); 545 /* work out the area we need to update for each console */ 546 dpy_gfx_update(g->scanout[i].con, 547 extents->x1, extents->y1, 548 extents->x2 - extents->x1, 549 extents->y2 - extents->y1); 550 551 pixman_region_fini(®ion); 552 pixman_region_fini(&finalregion); 553 } 554 pixman_region_fini(&flush_region); 555 } 556 557 static void virtio_unref_resource(pixman_image_t *image, void *data) 558 { 559 pixman_image_unref(data); 560 } 561 562 static void virtio_gpu_set_scanout(VirtIOGPU *g, 563 struct virtio_gpu_ctrl_command *cmd) 564 { 565 struct virtio_gpu_simple_resource *res; 566 struct virtio_gpu_scanout *scanout; 567 pixman_format_code_t format; 568 uint32_t offset; 569 int bpp; 570 struct virtio_gpu_set_scanout ss; 571 572 VIRTIO_GPU_FILL_CMD(ss); 573 virtio_gpu_bswap_32(&ss, sizeof(ss)); 574 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 575 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 576 577 if (ss.scanout_id >= g->conf.max_outputs) { 578 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 579 __func__, ss.scanout_id); 580 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 581 return; 582 } 583 584 g->enable = 1; 585 if (ss.resource_id == 0) { 586 scanout = &g->scanout[ss.scanout_id]; 587 if (scanout->resource_id) { 588 res = virtio_gpu_find_resource(g, scanout->resource_id); 589 if (res) { 590 res->scanout_bitmask &= ~(1 << ss.scanout_id); 591 } 592 } 593 if (ss.scanout_id == 0) { 594 qemu_log_mask(LOG_GUEST_ERROR, 595 "%s: illegal scanout id specified %d", 596 __func__, ss.scanout_id); 597 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 598 return; 599 } 600 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); 601 scanout->ds = NULL; 602 scanout->width = 0; 603 scanout->height = 0; 604 return; 605 } 606 607 /* create a surface for this scanout */ 608 res = virtio_gpu_find_resource(g, ss.resource_id); 609 if (!res) { 610 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 611 __func__, ss.resource_id); 612 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 613 return; 614 } 615 616 if (ss.r.x > res->width || 617 ss.r.y > res->height || 618 ss.r.width > res->width || 619 ss.r.height > res->height || 620 ss.r.x + ss.r.width > res->width || 621 ss.r.y + ss.r.height > res->height) { 622 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 623 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 624 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 625 ss.r.width, ss.r.height, res->width, res->height); 626 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 627 return; 628 } 629 630 scanout = &g->scanout[ss.scanout_id]; 631 632 format = pixman_image_get_format(res->image); 633 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 634 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 635 if (!scanout->ds || surface_data(scanout->ds) 636 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 637 scanout->width != ss.r.width || 638 scanout->height != ss.r.height) { 639 pixman_image_t *rect; 640 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset; 641 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr, 642 pixman_image_get_stride(res->image)); 643 pixman_image_ref(res->image); 644 pixman_image_set_destroy_function(rect, virtio_unref_resource, 645 res->image); 646 /* realloc the surface ptr */ 647 scanout->ds = qemu_create_displaysurface_pixman(rect); 648 if (!scanout->ds) { 649 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 650 return; 651 } 652 pixman_image_unref(rect); 653 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 654 } 655 656 res->scanout_bitmask |= (1 << ss.scanout_id); 657 scanout->resource_id = ss.resource_id; 658 scanout->x = ss.r.x; 659 scanout->y = ss.r.y; 660 scanout->width = ss.r.width; 661 scanout->height = ss.r.height; 662 } 663 664 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 665 struct virtio_gpu_ctrl_command *cmd, 666 uint64_t **addr, struct iovec **iov) 667 { 668 struct virtio_gpu_mem_entry *ents; 669 size_t esize, s; 670 int i; 671 672 if (ab->nr_entries > 16384) { 673 qemu_log_mask(LOG_GUEST_ERROR, 674 "%s: nr_entries is too big (%d > 16384)\n", 675 __func__, ab->nr_entries); 676 return -1; 677 } 678 679 esize = sizeof(*ents) * ab->nr_entries; 680 ents = g_malloc(esize); 681 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 682 sizeof(*ab), ents, esize); 683 if (s != esize) { 684 qemu_log_mask(LOG_GUEST_ERROR, 685 "%s: command data size incorrect %zu vs %zu\n", 686 __func__, s, esize); 687 g_free(ents); 688 return -1; 689 } 690 691 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 692 if (addr) { 693 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries); 694 } 695 for (i = 0; i < ab->nr_entries; i++) { 696 uint64_t a = le64_to_cpu(ents[i].addr); 697 uint32_t l = le32_to_cpu(ents[i].length); 698 hwaddr len = l; 699 (*iov)[i].iov_len = l; 700 (*iov)[i].iov_base = cpu_physical_memory_map(a, &len, 1); 701 if (addr) { 702 (*addr)[i] = a; 703 } 704 if (!(*iov)[i].iov_base || len != l) { 705 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 706 " resource %d element %d\n", 707 __func__, ab->resource_id, i); 708 virtio_gpu_cleanup_mapping_iov(*iov, i); 709 g_free(ents); 710 *iov = NULL; 711 if (addr) { 712 g_free(*addr); 713 *addr = NULL; 714 } 715 return -1; 716 } 717 } 718 g_free(ents); 719 return 0; 720 } 721 722 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 723 { 724 int i; 725 726 for (i = 0; i < count; i++) { 727 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 728 iov[i].iov_len); 729 } 730 g_free(iov); 731 } 732 733 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 734 { 735 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 736 res->iov = NULL; 737 res->iov_cnt = 0; 738 g_free(res->addrs); 739 res->addrs = NULL; 740 } 741 742 static void 743 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 744 struct virtio_gpu_ctrl_command *cmd) 745 { 746 struct virtio_gpu_simple_resource *res; 747 struct virtio_gpu_resource_attach_backing ab; 748 int ret; 749 750 VIRTIO_GPU_FILL_CMD(ab); 751 virtio_gpu_bswap_32(&ab, sizeof(ab)); 752 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 753 754 res = virtio_gpu_find_resource(g, ab.resource_id); 755 if (!res) { 756 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 757 __func__, ab.resource_id); 758 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 759 return; 760 } 761 762 if (res->iov) { 763 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 764 return; 765 } 766 767 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov); 768 if (ret != 0) { 769 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 770 return; 771 } 772 773 res->iov_cnt = ab.nr_entries; 774 } 775 776 static void 777 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 778 struct virtio_gpu_ctrl_command *cmd) 779 { 780 struct virtio_gpu_simple_resource *res; 781 struct virtio_gpu_resource_detach_backing detach; 782 783 VIRTIO_GPU_FILL_CMD(detach); 784 virtio_gpu_bswap_32(&detach, sizeof(detach)); 785 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 786 787 res = virtio_gpu_find_resource(g, detach.resource_id); 788 if (!res || !res->iov) { 789 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 790 __func__, detach.resource_id); 791 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 792 return; 793 } 794 virtio_gpu_cleanup_mapping(res); 795 } 796 797 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 798 struct virtio_gpu_ctrl_command *cmd) 799 { 800 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 801 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 802 803 switch (cmd->cmd_hdr.type) { 804 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 805 virtio_gpu_get_display_info(g, cmd); 806 break; 807 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 808 virtio_gpu_resource_create_2d(g, cmd); 809 break; 810 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 811 virtio_gpu_resource_unref(g, cmd); 812 break; 813 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 814 virtio_gpu_resource_flush(g, cmd); 815 break; 816 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 817 virtio_gpu_transfer_to_host_2d(g, cmd); 818 break; 819 case VIRTIO_GPU_CMD_SET_SCANOUT: 820 virtio_gpu_set_scanout(g, cmd); 821 break; 822 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 823 virtio_gpu_resource_attach_backing(g, cmd); 824 break; 825 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 826 virtio_gpu_resource_detach_backing(g, cmd); 827 break; 828 default: 829 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 830 break; 831 } 832 if (!cmd->finished) { 833 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 834 VIRTIO_GPU_RESP_OK_NODATA); 835 } 836 } 837 838 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 839 { 840 VirtIOGPU *g = VIRTIO_GPU(vdev); 841 qemu_bh_schedule(g->ctrl_bh); 842 } 843 844 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 845 { 846 VirtIOGPU *g = VIRTIO_GPU(vdev); 847 qemu_bh_schedule(g->cursor_bh); 848 } 849 850 void virtio_gpu_process_cmdq(VirtIOGPU *g) 851 { 852 struct virtio_gpu_ctrl_command *cmd; 853 854 while (!QTAILQ_EMPTY(&g->cmdq)) { 855 cmd = QTAILQ_FIRST(&g->cmdq); 856 857 /* process command */ 858 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, 859 g, cmd); 860 if (cmd->waiting) { 861 break; 862 } 863 QTAILQ_REMOVE(&g->cmdq, cmd, next); 864 if (virtio_gpu_stats_enabled(g->conf)) { 865 g->stats.requests++; 866 } 867 868 if (!cmd->finished) { 869 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 870 g->inflight++; 871 if (virtio_gpu_stats_enabled(g->conf)) { 872 if (g->stats.max_inflight < g->inflight) { 873 g->stats.max_inflight = g->inflight; 874 } 875 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 876 } 877 } else { 878 g_free(cmd); 879 } 880 } 881 } 882 883 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 884 { 885 VirtIOGPU *g = VIRTIO_GPU(vdev); 886 struct virtio_gpu_ctrl_command *cmd; 887 888 if (!virtio_queue_ready(vq)) { 889 return; 890 } 891 892 #ifdef CONFIG_VIRGL 893 if (!g->renderer_inited && g->use_virgl_renderer) { 894 virtio_gpu_virgl_init(g); 895 g->renderer_inited = true; 896 } 897 #endif 898 899 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 900 while (cmd) { 901 cmd->vq = vq; 902 cmd->error = 0; 903 cmd->finished = false; 904 cmd->waiting = false; 905 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 906 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 907 } 908 909 virtio_gpu_process_cmdq(g); 910 911 #ifdef CONFIG_VIRGL 912 if (g->use_virgl_renderer) { 913 virtio_gpu_virgl_fence_poll(g); 914 } 915 #endif 916 } 917 918 static void virtio_gpu_ctrl_bh(void *opaque) 919 { 920 VirtIOGPU *g = opaque; 921 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 922 } 923 924 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 925 { 926 VirtIOGPU *g = VIRTIO_GPU(vdev); 927 VirtQueueElement *elem; 928 size_t s; 929 struct virtio_gpu_update_cursor cursor_info; 930 931 if (!virtio_queue_ready(vq)) { 932 return; 933 } 934 for (;;) { 935 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 936 if (!elem) { 937 break; 938 } 939 940 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 941 &cursor_info, sizeof(cursor_info)); 942 if (s != sizeof(cursor_info)) { 943 qemu_log_mask(LOG_GUEST_ERROR, 944 "%s: cursor size incorrect %zu vs %zu\n", 945 __func__, s, sizeof(cursor_info)); 946 } else { 947 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 948 update_cursor(g, &cursor_info); 949 } 950 virtqueue_push(vq, elem, 0); 951 virtio_notify(vdev, vq); 952 g_free(elem); 953 } 954 } 955 956 static void virtio_gpu_cursor_bh(void *opaque) 957 { 958 VirtIOGPU *g = opaque; 959 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 960 } 961 962 static void virtio_gpu_invalidate_display(void *opaque) 963 { 964 } 965 966 static void virtio_gpu_update_display(void *opaque) 967 { 968 } 969 970 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 971 { 972 } 973 974 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 975 { 976 VirtIOGPU *g = opaque; 977 978 if (idx >= g->conf.max_outputs) { 979 return -1; 980 } 981 982 g->req_state[idx].x = info->xoff; 983 g->req_state[idx].y = info->yoff; 984 g->req_state[idx].width = info->width; 985 g->req_state[idx].height = info->height; 986 987 if (info->width && info->height) { 988 g->enabled_output_bitmask |= (1 << idx); 989 } else { 990 g->enabled_output_bitmask &= ~(1 << idx); 991 } 992 993 /* send event to guest */ 994 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 995 return 0; 996 } 997 998 const GraphicHwOps virtio_gpu_ops = { 999 .invalidate = virtio_gpu_invalidate_display, 1000 .gfx_update = virtio_gpu_update_display, 1001 .text_update = virtio_gpu_text_update, 1002 .ui_info = virtio_gpu_ui_info, 1003 #ifdef CONFIG_VIRGL 1004 .gl_block = virtio_gpu_gl_block, 1005 #endif 1006 }; 1007 1008 static const VMStateDescription vmstate_virtio_gpu_scanout = { 1009 .name = "virtio-gpu-one-scanout", 1010 .version_id = 1, 1011 .fields = (VMStateField[]) { 1012 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1013 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1014 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1015 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1016 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1017 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1018 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1019 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1020 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1021 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1022 VMSTATE_END_OF_LIST() 1023 }, 1024 }; 1025 1026 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1027 .name = "virtio-gpu-scanouts", 1028 .version_id = 1, 1029 .fields = (VMStateField[]) { 1030 VMSTATE_INT32(enable, struct VirtIOGPU), 1031 VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL), 1032 VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU, 1033 conf.max_outputs, 1, 1034 vmstate_virtio_gpu_scanout, 1035 struct virtio_gpu_scanout), 1036 VMSTATE_END_OF_LIST() 1037 }, 1038 }; 1039 1040 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1041 VMStateField *field, QJSON *vmdesc) 1042 { 1043 VirtIOGPU *g = opaque; 1044 struct virtio_gpu_simple_resource *res; 1045 int i; 1046 1047 /* in 2d mode we should never find unprocessed commands here */ 1048 assert(QTAILQ_EMPTY(&g->cmdq)); 1049 1050 QTAILQ_FOREACH(res, &g->reslist, next) { 1051 qemu_put_be32(f, res->resource_id); 1052 qemu_put_be32(f, res->width); 1053 qemu_put_be32(f, res->height); 1054 qemu_put_be32(f, res->format); 1055 qemu_put_be32(f, res->iov_cnt); 1056 for (i = 0; i < res->iov_cnt; i++) { 1057 qemu_put_be64(f, res->addrs[i]); 1058 qemu_put_be32(f, res->iov[i].iov_len); 1059 } 1060 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1061 pixman_image_get_stride(res->image) * res->height); 1062 } 1063 qemu_put_be32(f, 0); /* end of list */ 1064 1065 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1066 } 1067 1068 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1069 VMStateField *field) 1070 { 1071 VirtIOGPU *g = opaque; 1072 struct virtio_gpu_simple_resource *res; 1073 struct virtio_gpu_scanout *scanout; 1074 uint32_t resource_id, pformat; 1075 int i; 1076 1077 g->hostmem = 0; 1078 1079 resource_id = qemu_get_be32(f); 1080 while (resource_id != 0) { 1081 res = g_new0(struct virtio_gpu_simple_resource, 1); 1082 res->resource_id = resource_id; 1083 res->width = qemu_get_be32(f); 1084 res->height = qemu_get_be32(f); 1085 res->format = qemu_get_be32(f); 1086 res->iov_cnt = qemu_get_be32(f); 1087 1088 /* allocate */ 1089 pformat = get_pixman_format(res->format); 1090 if (!pformat) { 1091 g_free(res); 1092 return -EINVAL; 1093 } 1094 res->image = pixman_image_create_bits(pformat, 1095 res->width, res->height, 1096 NULL, 0); 1097 if (!res->image) { 1098 g_free(res); 1099 return -EINVAL; 1100 } 1101 1102 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1103 1104 res->addrs = g_new(uint64_t, res->iov_cnt); 1105 res->iov = g_new(struct iovec, res->iov_cnt); 1106 1107 /* read data */ 1108 for (i = 0; i < res->iov_cnt; i++) { 1109 res->addrs[i] = qemu_get_be64(f); 1110 res->iov[i].iov_len = qemu_get_be32(f); 1111 } 1112 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1113 pixman_image_get_stride(res->image) * res->height); 1114 1115 /* restore mapping */ 1116 for (i = 0; i < res->iov_cnt; i++) { 1117 hwaddr len = res->iov[i].iov_len; 1118 res->iov[i].iov_base = 1119 cpu_physical_memory_map(res->addrs[i], &len, 1); 1120 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1121 /* Clean up the half-a-mapping we just created... */ 1122 if (res->iov[i].iov_base) { 1123 cpu_physical_memory_unmap(res->iov[i].iov_base, 1124 len, 0, 0); 1125 } 1126 /* ...and the mappings for previous loop iterations */ 1127 res->iov_cnt = i; 1128 virtio_gpu_cleanup_mapping(res); 1129 pixman_image_unref(res->image); 1130 g_free(res); 1131 return -EINVAL; 1132 } 1133 } 1134 1135 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1136 g->hostmem += res->hostmem; 1137 1138 resource_id = qemu_get_be32(f); 1139 } 1140 1141 /* load & apply scanout state */ 1142 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1143 for (i = 0; i < g->conf.max_outputs; i++) { 1144 scanout = &g->scanout[i]; 1145 if (!scanout->resource_id) { 1146 continue; 1147 } 1148 res = virtio_gpu_find_resource(g, scanout->resource_id); 1149 if (!res) { 1150 return -EINVAL; 1151 } 1152 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1153 if (!scanout->ds) { 1154 return -EINVAL; 1155 } 1156 1157 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1158 dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height); 1159 if (scanout->cursor.resource_id) { 1160 update_cursor(g, &scanout->cursor); 1161 } 1162 res->scanout_bitmask |= (1 << i); 1163 } 1164 1165 return 0; 1166 } 1167 1168 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1169 { 1170 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1171 VirtIOGPU *g = VIRTIO_GPU(qdev); 1172 bool have_virgl; 1173 Error *local_err = NULL; 1174 int i; 1175 1176 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { 1177 error_setg(errp, "virtio-gpu does not support vIOMMU yet"); 1178 return; 1179 } 1180 1181 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { 1182 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS); 1183 return; 1184 } 1185 1186 g->use_virgl_renderer = false; 1187 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) 1188 have_virgl = false; 1189 #else 1190 have_virgl = display_opengl; 1191 #endif 1192 if (!have_virgl) { 1193 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); 1194 } 1195 1196 if (virtio_gpu_virgl_enabled(g->conf)) { 1197 error_setg(&g->migration_blocker, "virgl is not yet migratable"); 1198 migrate_add_blocker(g->migration_blocker, &local_err); 1199 if (local_err) { 1200 error_propagate(errp, local_err); 1201 error_free(g->migration_blocker); 1202 return; 1203 } 1204 } 1205 1206 g->config_size = sizeof(struct virtio_gpu_config); 1207 g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs); 1208 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 1209 g->config_size); 1210 1211 g->req_state[0].width = g->conf.xres; 1212 g->req_state[0].height = g->conf.yres; 1213 1214 if (virtio_gpu_virgl_enabled(g->conf)) { 1215 /* use larger control queue in 3d mode */ 1216 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); 1217 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 1218 1219 #if defined(CONFIG_VIRGL) 1220 g->virtio_config.num_capsets = virtio_gpu_virgl_get_num_capsets(g); 1221 #else 1222 g->virtio_config.num_capsets = 0; 1223 #endif 1224 } else { 1225 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 1226 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 1227 } 1228 1229 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 1230 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 1231 QTAILQ_INIT(&g->reslist); 1232 QTAILQ_INIT(&g->cmdq); 1233 QTAILQ_INIT(&g->fenceq); 1234 1235 g->enabled_output_bitmask = 1; 1236 g->qdev = qdev; 1237 1238 for (i = 0; i < g->conf.max_outputs; i++) { 1239 g->scanout[i].con = 1240 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 1241 if (i > 0) { 1242 dpy_gfx_replace_surface(g->scanout[i].con, NULL); 1243 } 1244 } 1245 } 1246 1247 static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp) 1248 { 1249 VirtIOGPU *g = VIRTIO_GPU(qdev); 1250 if (g->migration_blocker) { 1251 migrate_del_blocker(g->migration_blocker); 1252 error_free(g->migration_blocker); 1253 } 1254 } 1255 1256 static void virtio_gpu_instance_init(Object *obj) 1257 { 1258 } 1259 1260 static void virtio_gpu_reset(VirtIODevice *vdev) 1261 { 1262 VirtIOGPU *g = VIRTIO_GPU(vdev); 1263 struct virtio_gpu_simple_resource *res, *tmp; 1264 int i; 1265 1266 g->enable = 0; 1267 1268 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1269 virtio_gpu_resource_destroy(g, res); 1270 } 1271 for (i = 0; i < g->conf.max_outputs; i++) { 1272 g->scanout[i].resource_id = 0; 1273 g->scanout[i].width = 0; 1274 g->scanout[i].height = 0; 1275 g->scanout[i].x = 0; 1276 g->scanout[i].y = 0; 1277 g->scanout[i].ds = NULL; 1278 } 1279 1280 #ifdef CONFIG_VIRGL 1281 if (g->use_virgl_renderer) { 1282 virtio_gpu_virgl_reset(g); 1283 g->use_virgl_renderer = 0; 1284 } 1285 #endif 1286 } 1287 1288 /* 1289 * For historical reasons virtio_gpu does not adhere to virtio migration 1290 * scheme as described in doc/virtio-migration.txt, in a sense that no 1291 * save/load callback are provided to the core. Instead the device data 1292 * is saved/loaded after the core data. 1293 * 1294 * Because of this we need a special vmsd. 1295 */ 1296 static const VMStateDescription vmstate_virtio_gpu = { 1297 .name = "virtio-gpu", 1298 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1299 .version_id = VIRTIO_GPU_VM_VERSION, 1300 .fields = (VMStateField[]) { 1301 VMSTATE_VIRTIO_DEVICE /* core */, 1302 { 1303 .name = "virtio-gpu", 1304 .info = &(const VMStateInfo) { 1305 .name = "virtio-gpu", 1306 .get = virtio_gpu_load, 1307 .put = virtio_gpu_save, 1308 }, 1309 .flags = VMS_SINGLE, 1310 } /* device */, 1311 VMSTATE_END_OF_LIST() 1312 }, 1313 }; 1314 1315 static Property virtio_gpu_properties[] = { 1316 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), 1317 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 1318 256 * 1024 * 1024), 1319 #ifdef CONFIG_VIRGL 1320 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, 1321 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), 1322 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, 1323 VIRTIO_GPU_FLAG_STATS_ENABLED, false), 1324 #endif 1325 DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024), 1326 DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768), 1327 DEFINE_PROP_END_OF_LIST(), 1328 }; 1329 1330 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1331 { 1332 DeviceClass *dc = DEVICE_CLASS(klass); 1333 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1334 1335 vdc->realize = virtio_gpu_device_realize; 1336 vdc->unrealize = virtio_gpu_device_unrealize; 1337 vdc->get_config = virtio_gpu_get_config; 1338 vdc->set_config = virtio_gpu_set_config; 1339 vdc->get_features = virtio_gpu_get_features; 1340 vdc->set_features = virtio_gpu_set_features; 1341 1342 vdc->reset = virtio_gpu_reset; 1343 1344 set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); 1345 dc->props = virtio_gpu_properties; 1346 dc->vmsd = &vmstate_virtio_gpu; 1347 dc->hotpluggable = false; 1348 } 1349 1350 static const TypeInfo virtio_gpu_info = { 1351 .name = TYPE_VIRTIO_GPU, 1352 .parent = TYPE_VIRTIO_DEVICE, 1353 .instance_size = sizeof(VirtIOGPU), 1354 .instance_init = virtio_gpu_instance_init, 1355 .class_init = virtio_gpu_class_init, 1356 }; 1357 1358 static void virtio_register_types(void) 1359 { 1360 type_register_static(&virtio_gpu_info); 1361 } 1362 1363 type_init(virtio_register_types) 1364 1365 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 1366 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 1367 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 1368 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 1369 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 1370 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 1371 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 1372 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 1373 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 1374 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 1375 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 1376 1377 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); 1378 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); 1379 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); 1380 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); 1381 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); 1382 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); 1383 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); 1384 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); 1385 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); 1386 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); 1387