1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu-common.h" 17 #include "qemu/iov.h" 18 #include "ui/console.h" 19 #include "trace.h" 20 #include "hw/virtio/virtio.h" 21 #include "hw/virtio/virtio-gpu.h" 22 #include "hw/virtio/virtio-bus.h" 23 #include "migration/blocker.h" 24 #include "qemu/log.h" 25 #include "qapi/error.h" 26 27 #define VIRTIO_GPU_VM_VERSION 1 28 29 static struct virtio_gpu_simple_resource* 30 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 31 32 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res); 33 34 static void 35 virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr) 36 { 37 le32_to_cpus(&hdr->type); 38 le32_to_cpus(&hdr->flags); 39 le64_to_cpus(&hdr->fence_id); 40 le32_to_cpus(&hdr->ctx_id); 41 le32_to_cpus(&hdr->padding); 42 } 43 44 static void virtio_gpu_bswap_32(void *ptr, 45 size_t size) 46 { 47 #ifdef HOST_WORDS_BIGENDIAN 48 49 size_t i; 50 struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr; 51 52 virtio_gpu_ctrl_hdr_bswap(hdr); 53 54 i = sizeof(struct virtio_gpu_ctrl_hdr); 55 while (i < size) { 56 le32_to_cpus((uint32_t *)(ptr + i)); 57 i = i + sizeof(uint32_t); 58 } 59 60 #endif 61 } 62 63 static void 64 virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d) 65 { 66 virtio_gpu_ctrl_hdr_bswap(&t2d->hdr); 67 le32_to_cpus(&t2d->r.x); 68 le32_to_cpus(&t2d->r.y); 69 le32_to_cpus(&t2d->r.width); 70 le32_to_cpus(&t2d->r.height); 71 le64_to_cpus(&t2d->offset); 72 le32_to_cpus(&t2d->resource_id); 73 le32_to_cpus(&t2d->padding); 74 } 75 76 #ifdef CONFIG_VIRGL 77 #include <virglrenderer.h> 78 #define VIRGL(_g, _virgl, _simple, ...) \ 79 do { \ 80 if (_g->use_virgl_renderer) { \ 81 _virgl(__VA_ARGS__); \ 82 } else { \ 83 _simple(__VA_ARGS__); \ 84 } \ 85 } while (0) 86 #else 87 #define VIRGL(_g, _virgl, _simple, ...) \ 88 do { \ 89 _simple(__VA_ARGS__); \ 90 } while (0) 91 #endif 92 93 static void update_cursor_data_simple(VirtIOGPU *g, 94 struct virtio_gpu_scanout *s, 95 uint32_t resource_id) 96 { 97 struct virtio_gpu_simple_resource *res; 98 uint32_t pixels; 99 100 res = virtio_gpu_find_resource(g, resource_id); 101 if (!res) { 102 return; 103 } 104 105 if (pixman_image_get_width(res->image) != s->current_cursor->width || 106 pixman_image_get_height(res->image) != s->current_cursor->height) { 107 return; 108 } 109 110 pixels = s->current_cursor->width * s->current_cursor->height; 111 memcpy(s->current_cursor->data, 112 pixman_image_get_data(res->image), 113 pixels * sizeof(uint32_t)); 114 } 115 116 #ifdef CONFIG_VIRGL 117 118 static void update_cursor_data_virgl(VirtIOGPU *g, 119 struct virtio_gpu_scanout *s, 120 uint32_t resource_id) 121 { 122 uint32_t width, height; 123 uint32_t pixels, *data; 124 125 data = virgl_renderer_get_cursor_data(resource_id, &width, &height); 126 if (!data) { 127 return; 128 } 129 130 if (width != s->current_cursor->width || 131 height != s->current_cursor->height) { 132 free(data); 133 return; 134 } 135 136 pixels = s->current_cursor->width * s->current_cursor->height; 137 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); 138 free(data); 139 } 140 141 #endif 142 143 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 144 { 145 struct virtio_gpu_scanout *s; 146 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 147 148 if (cursor->pos.scanout_id >= g->conf.max_outputs) { 149 return; 150 } 151 s = &g->scanout[cursor->pos.scanout_id]; 152 153 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 154 cursor->pos.x, 155 cursor->pos.y, 156 move ? "move" : "update", 157 cursor->resource_id); 158 159 if (!move) { 160 if (!s->current_cursor) { 161 s->current_cursor = cursor_alloc(64, 64); 162 } 163 164 s->current_cursor->hot_x = cursor->hot_x; 165 s->current_cursor->hot_y = cursor->hot_y; 166 167 if (cursor->resource_id > 0) { 168 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, 169 g, s, cursor->resource_id); 170 } 171 dpy_cursor_define(s->con, s->current_cursor); 172 173 s->cursor = *cursor; 174 } else { 175 s->cursor.pos.x = cursor->pos.x; 176 s->cursor.pos.y = cursor->pos.y; 177 } 178 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 179 cursor->resource_id ? 1 : 0); 180 } 181 182 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 183 { 184 VirtIOGPU *g = VIRTIO_GPU(vdev); 185 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 186 } 187 188 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 189 { 190 VirtIOGPU *g = VIRTIO_GPU(vdev); 191 struct virtio_gpu_config vgconfig; 192 193 memcpy(&vgconfig, config, sizeof(g->virtio_config)); 194 195 if (vgconfig.events_clear) { 196 g->virtio_config.events_read &= ~vgconfig.events_clear; 197 } 198 } 199 200 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, 201 Error **errp) 202 { 203 VirtIOGPU *g = VIRTIO_GPU(vdev); 204 205 if (virtio_gpu_virgl_enabled(g->conf)) { 206 features |= (1 << VIRTIO_GPU_F_VIRGL); 207 } 208 return features; 209 } 210 211 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) 212 { 213 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); 214 VirtIOGPU *g = VIRTIO_GPU(vdev); 215 216 g->use_virgl_renderer = ((features & virgl) == virgl); 217 trace_virtio_gpu_features(g->use_virgl_renderer); 218 } 219 220 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 221 { 222 g->virtio_config.events_read |= event_type; 223 virtio_notify_config(&g->parent_obj); 224 } 225 226 static struct virtio_gpu_simple_resource * 227 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 228 { 229 struct virtio_gpu_simple_resource *res; 230 231 QTAILQ_FOREACH(res, &g->reslist, next) { 232 if (res->resource_id == resource_id) { 233 return res; 234 } 235 } 236 return NULL; 237 } 238 239 void virtio_gpu_ctrl_response(VirtIOGPU *g, 240 struct virtio_gpu_ctrl_command *cmd, 241 struct virtio_gpu_ctrl_hdr *resp, 242 size_t resp_len) 243 { 244 size_t s; 245 246 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 247 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 248 resp->fence_id = cmd->cmd_hdr.fence_id; 249 resp->ctx_id = cmd->cmd_hdr.ctx_id; 250 } 251 virtio_gpu_ctrl_hdr_bswap(resp); 252 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 253 if (s != resp_len) { 254 qemu_log_mask(LOG_GUEST_ERROR, 255 "%s: response size incorrect %zu vs %zu\n", 256 __func__, s, resp_len); 257 } 258 virtqueue_push(cmd->vq, &cmd->elem, s); 259 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 260 cmd->finished = true; 261 } 262 263 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 264 struct virtio_gpu_ctrl_command *cmd, 265 enum virtio_gpu_ctrl_type type) 266 { 267 struct virtio_gpu_ctrl_hdr resp; 268 269 memset(&resp, 0, sizeof(resp)); 270 resp.type = type; 271 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 272 } 273 274 static void 275 virtio_gpu_fill_display_info(VirtIOGPU *g, 276 struct virtio_gpu_resp_display_info *dpy_info) 277 { 278 int i; 279 280 for (i = 0; i < g->conf.max_outputs; i++) { 281 if (g->enabled_output_bitmask & (1 << i)) { 282 dpy_info->pmodes[i].enabled = 1; 283 dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width); 284 dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height); 285 } 286 } 287 } 288 289 void virtio_gpu_get_display_info(VirtIOGPU *g, 290 struct virtio_gpu_ctrl_command *cmd) 291 { 292 struct virtio_gpu_resp_display_info display_info; 293 294 trace_virtio_gpu_cmd_get_display_info(); 295 memset(&display_info, 0, sizeof(display_info)); 296 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 297 virtio_gpu_fill_display_info(g, &display_info); 298 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 299 sizeof(display_info)); 300 } 301 302 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 303 { 304 switch (virtio_gpu_format) { 305 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 306 return PIXMAN_BE_b8g8r8x8; 307 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 308 return PIXMAN_BE_b8g8r8a8; 309 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 310 return PIXMAN_BE_x8r8g8b8; 311 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 312 return PIXMAN_BE_a8r8g8b8; 313 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 314 return PIXMAN_BE_r8g8b8x8; 315 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 316 return PIXMAN_BE_r8g8b8a8; 317 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 318 return PIXMAN_BE_x8b8g8r8; 319 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 320 return PIXMAN_BE_a8b8g8r8; 321 default: 322 return 0; 323 } 324 } 325 326 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 327 uint32_t width, uint32_t height) 328 { 329 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 330 * pixman_image_create_bits will fail in case it overflow. 331 */ 332 333 int bpp = PIXMAN_FORMAT_BPP(pformat); 334 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 335 return height * stride; 336 } 337 338 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 339 struct virtio_gpu_ctrl_command *cmd) 340 { 341 pixman_format_code_t pformat; 342 struct virtio_gpu_simple_resource *res; 343 struct virtio_gpu_resource_create_2d c2d; 344 345 VIRTIO_GPU_FILL_CMD(c2d); 346 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 347 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 348 c2d.width, c2d.height); 349 350 if (c2d.resource_id == 0) { 351 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 352 __func__); 353 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 354 return; 355 } 356 357 res = virtio_gpu_find_resource(g, c2d.resource_id); 358 if (res) { 359 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 360 __func__, c2d.resource_id); 361 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 362 return; 363 } 364 365 res = g_new0(struct virtio_gpu_simple_resource, 1); 366 367 res->width = c2d.width; 368 res->height = c2d.height; 369 res->format = c2d.format; 370 res->resource_id = c2d.resource_id; 371 372 pformat = get_pixman_format(c2d.format); 373 if (!pformat) { 374 qemu_log_mask(LOG_GUEST_ERROR, 375 "%s: host couldn't handle guest format %d\n", 376 __func__, c2d.format); 377 g_free(res); 378 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 379 return; 380 } 381 382 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 383 if (res->hostmem + g->hostmem < g->conf.max_hostmem) { 384 res->image = pixman_image_create_bits(pformat, 385 c2d.width, 386 c2d.height, 387 NULL, 0); 388 } 389 390 if (!res->image) { 391 qemu_log_mask(LOG_GUEST_ERROR, 392 "%s: resource creation failed %d %d %d\n", 393 __func__, c2d.resource_id, c2d.width, c2d.height); 394 g_free(res); 395 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 396 return; 397 } 398 399 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 400 g->hostmem += res->hostmem; 401 } 402 403 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 404 { 405 struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id]; 406 struct virtio_gpu_simple_resource *res; 407 DisplaySurface *ds = NULL; 408 409 if (scanout->resource_id == 0) { 410 return; 411 } 412 413 res = virtio_gpu_find_resource(g, scanout->resource_id); 414 if (res) { 415 res->scanout_bitmask &= ~(1 << scanout_id); 416 } 417 418 if (scanout_id == 0) { 419 /* primary head */ 420 ds = qemu_create_message_surface(scanout->width ?: 640, 421 scanout->height ?: 480, 422 "Guest disabled display."); 423 } 424 dpy_gfx_replace_surface(scanout->con, ds); 425 scanout->resource_id = 0; 426 scanout->ds = NULL; 427 scanout->width = 0; 428 scanout->height = 0; 429 } 430 431 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 432 struct virtio_gpu_simple_resource *res) 433 { 434 int i; 435 436 if (res->scanout_bitmask) { 437 for (i = 0; i < g->conf.max_outputs; i++) { 438 if (res->scanout_bitmask & (1 << i)) { 439 virtio_gpu_disable_scanout(g, i); 440 } 441 } 442 } 443 444 pixman_image_unref(res->image); 445 virtio_gpu_cleanup_mapping(res); 446 QTAILQ_REMOVE(&g->reslist, res, next); 447 g->hostmem -= res->hostmem; 448 g_free(res); 449 } 450 451 static void virtio_gpu_resource_unref(VirtIOGPU *g, 452 struct virtio_gpu_ctrl_command *cmd) 453 { 454 struct virtio_gpu_simple_resource *res; 455 struct virtio_gpu_resource_unref unref; 456 457 VIRTIO_GPU_FILL_CMD(unref); 458 virtio_gpu_bswap_32(&unref, sizeof(unref)); 459 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 460 461 res = virtio_gpu_find_resource(g, unref.resource_id); 462 if (!res) { 463 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 464 __func__, unref.resource_id); 465 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 466 return; 467 } 468 virtio_gpu_resource_destroy(g, res); 469 } 470 471 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 472 struct virtio_gpu_ctrl_command *cmd) 473 { 474 struct virtio_gpu_simple_resource *res; 475 int h; 476 uint32_t src_offset, dst_offset, stride; 477 int bpp; 478 pixman_format_code_t format; 479 struct virtio_gpu_transfer_to_host_2d t2d; 480 481 VIRTIO_GPU_FILL_CMD(t2d); 482 virtio_gpu_t2d_bswap(&t2d); 483 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 484 485 res = virtio_gpu_find_resource(g, t2d.resource_id); 486 if (!res || !res->iov) { 487 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 488 __func__, t2d.resource_id); 489 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 490 return; 491 } 492 493 if (t2d.r.x > res->width || 494 t2d.r.y > res->height || 495 t2d.r.width > res->width || 496 t2d.r.height > res->height || 497 t2d.r.x + t2d.r.width > res->width || 498 t2d.r.y + t2d.r.height > res->height) { 499 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 500 " bounds for resource %d: %d %d %d %d vs %d %d\n", 501 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 502 t2d.r.width, t2d.r.height, res->width, res->height); 503 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 504 return; 505 } 506 507 format = pixman_image_get_format(res->image); 508 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 509 stride = pixman_image_get_stride(res->image); 510 511 if (t2d.offset || t2d.r.x || t2d.r.y || 512 t2d.r.width != pixman_image_get_width(res->image)) { 513 void *img_data = pixman_image_get_data(res->image); 514 for (h = 0; h < t2d.r.height; h++) { 515 src_offset = t2d.offset + stride * h; 516 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 517 518 iov_to_buf(res->iov, res->iov_cnt, src_offset, 519 (uint8_t *)img_data 520 + dst_offset, t2d.r.width * bpp); 521 } 522 } else { 523 iov_to_buf(res->iov, res->iov_cnt, 0, 524 pixman_image_get_data(res->image), 525 pixman_image_get_stride(res->image) 526 * pixman_image_get_height(res->image)); 527 } 528 } 529 530 static void virtio_gpu_resource_flush(VirtIOGPU *g, 531 struct virtio_gpu_ctrl_command *cmd) 532 { 533 struct virtio_gpu_simple_resource *res; 534 struct virtio_gpu_resource_flush rf; 535 pixman_region16_t flush_region; 536 int i; 537 538 VIRTIO_GPU_FILL_CMD(rf); 539 virtio_gpu_bswap_32(&rf, sizeof(rf)); 540 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 541 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 542 543 res = virtio_gpu_find_resource(g, rf.resource_id); 544 if (!res) { 545 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 546 __func__, rf.resource_id); 547 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 548 return; 549 } 550 551 if (rf.r.x > res->width || 552 rf.r.y > res->height || 553 rf.r.width > res->width || 554 rf.r.height > res->height || 555 rf.r.x + rf.r.width > res->width || 556 rf.r.y + rf.r.height > res->height) { 557 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 558 " bounds for resource %d: %d %d %d %d vs %d %d\n", 559 __func__, rf.resource_id, rf.r.x, rf.r.y, 560 rf.r.width, rf.r.height, res->width, res->height); 561 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 562 return; 563 } 564 565 pixman_region_init_rect(&flush_region, 566 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 567 for (i = 0; i < g->conf.max_outputs; i++) { 568 struct virtio_gpu_scanout *scanout; 569 pixman_region16_t region, finalregion; 570 pixman_box16_t *extents; 571 572 if (!(res->scanout_bitmask & (1 << i))) { 573 continue; 574 } 575 scanout = &g->scanout[i]; 576 577 pixman_region_init(&finalregion); 578 pixman_region_init_rect(®ion, scanout->x, scanout->y, 579 scanout->width, scanout->height); 580 581 pixman_region_intersect(&finalregion, &flush_region, ®ion); 582 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 583 extents = pixman_region_extents(&finalregion); 584 /* work out the area we need to update for each console */ 585 dpy_gfx_update(g->scanout[i].con, 586 extents->x1, extents->y1, 587 extents->x2 - extents->x1, 588 extents->y2 - extents->y1); 589 590 pixman_region_fini(®ion); 591 pixman_region_fini(&finalregion); 592 } 593 pixman_region_fini(&flush_region); 594 } 595 596 static void virtio_unref_resource(pixman_image_t *image, void *data) 597 { 598 pixman_image_unref(data); 599 } 600 601 static void virtio_gpu_set_scanout(VirtIOGPU *g, 602 struct virtio_gpu_ctrl_command *cmd) 603 { 604 struct virtio_gpu_simple_resource *res, *ores; 605 struct virtio_gpu_scanout *scanout; 606 pixman_format_code_t format; 607 uint32_t offset; 608 int bpp; 609 struct virtio_gpu_set_scanout ss; 610 611 VIRTIO_GPU_FILL_CMD(ss); 612 virtio_gpu_bswap_32(&ss, sizeof(ss)); 613 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 614 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 615 616 if (ss.scanout_id >= g->conf.max_outputs) { 617 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 618 __func__, ss.scanout_id); 619 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 620 return; 621 } 622 623 g->enable = 1; 624 if (ss.resource_id == 0) { 625 virtio_gpu_disable_scanout(g, ss.scanout_id); 626 return; 627 } 628 629 /* create a surface for this scanout */ 630 res = virtio_gpu_find_resource(g, ss.resource_id); 631 if (!res) { 632 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 633 __func__, ss.resource_id); 634 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 635 return; 636 } 637 638 if (ss.r.x > res->width || 639 ss.r.y > res->height || 640 ss.r.width > res->width || 641 ss.r.height > res->height || 642 ss.r.x + ss.r.width > res->width || 643 ss.r.y + ss.r.height > res->height) { 644 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 645 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 646 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 647 ss.r.width, ss.r.height, res->width, res->height); 648 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 649 return; 650 } 651 652 scanout = &g->scanout[ss.scanout_id]; 653 654 format = pixman_image_get_format(res->image); 655 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 656 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 657 if (!scanout->ds || surface_data(scanout->ds) 658 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 659 scanout->width != ss.r.width || 660 scanout->height != ss.r.height) { 661 pixman_image_t *rect; 662 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset; 663 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr, 664 pixman_image_get_stride(res->image)); 665 pixman_image_ref(res->image); 666 pixman_image_set_destroy_function(rect, virtio_unref_resource, 667 res->image); 668 /* realloc the surface ptr */ 669 scanout->ds = qemu_create_displaysurface_pixman(rect); 670 if (!scanout->ds) { 671 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 672 return; 673 } 674 pixman_image_unref(rect); 675 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 676 } 677 678 ores = virtio_gpu_find_resource(g, scanout->resource_id); 679 if (ores) { 680 ores->scanout_bitmask &= ~(1 << ss.scanout_id); 681 } 682 683 res->scanout_bitmask |= (1 << ss.scanout_id); 684 scanout->resource_id = ss.resource_id; 685 scanout->x = ss.r.x; 686 scanout->y = ss.r.y; 687 scanout->width = ss.r.width; 688 scanout->height = ss.r.height; 689 } 690 691 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 692 struct virtio_gpu_ctrl_command *cmd, 693 uint64_t **addr, struct iovec **iov) 694 { 695 struct virtio_gpu_mem_entry *ents; 696 size_t esize, s; 697 int i; 698 699 if (ab->nr_entries > 16384) { 700 qemu_log_mask(LOG_GUEST_ERROR, 701 "%s: nr_entries is too big (%d > 16384)\n", 702 __func__, ab->nr_entries); 703 return -1; 704 } 705 706 esize = sizeof(*ents) * ab->nr_entries; 707 ents = g_malloc(esize); 708 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 709 sizeof(*ab), ents, esize); 710 if (s != esize) { 711 qemu_log_mask(LOG_GUEST_ERROR, 712 "%s: command data size incorrect %zu vs %zu\n", 713 __func__, s, esize); 714 g_free(ents); 715 return -1; 716 } 717 718 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 719 if (addr) { 720 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries); 721 } 722 for (i = 0; i < ab->nr_entries; i++) { 723 uint64_t a = le64_to_cpu(ents[i].addr); 724 uint32_t l = le32_to_cpu(ents[i].length); 725 hwaddr len = l; 726 (*iov)[i].iov_len = l; 727 (*iov)[i].iov_base = cpu_physical_memory_map(a, &len, 1); 728 if (addr) { 729 (*addr)[i] = a; 730 } 731 if (!(*iov)[i].iov_base || len != l) { 732 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 733 " resource %d element %d\n", 734 __func__, ab->resource_id, i); 735 virtio_gpu_cleanup_mapping_iov(*iov, i); 736 g_free(ents); 737 *iov = NULL; 738 if (addr) { 739 g_free(*addr); 740 *addr = NULL; 741 } 742 return -1; 743 } 744 } 745 g_free(ents); 746 return 0; 747 } 748 749 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 750 { 751 int i; 752 753 for (i = 0; i < count; i++) { 754 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 755 iov[i].iov_len); 756 } 757 g_free(iov); 758 } 759 760 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 761 { 762 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 763 res->iov = NULL; 764 res->iov_cnt = 0; 765 g_free(res->addrs); 766 res->addrs = NULL; 767 } 768 769 static void 770 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 771 struct virtio_gpu_ctrl_command *cmd) 772 { 773 struct virtio_gpu_simple_resource *res; 774 struct virtio_gpu_resource_attach_backing ab; 775 int ret; 776 777 VIRTIO_GPU_FILL_CMD(ab); 778 virtio_gpu_bswap_32(&ab, sizeof(ab)); 779 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 780 781 res = virtio_gpu_find_resource(g, ab.resource_id); 782 if (!res) { 783 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 784 __func__, ab.resource_id); 785 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 786 return; 787 } 788 789 if (res->iov) { 790 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 791 return; 792 } 793 794 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov); 795 if (ret != 0) { 796 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 797 return; 798 } 799 800 res->iov_cnt = ab.nr_entries; 801 } 802 803 static void 804 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 805 struct virtio_gpu_ctrl_command *cmd) 806 { 807 struct virtio_gpu_simple_resource *res; 808 struct virtio_gpu_resource_detach_backing detach; 809 810 VIRTIO_GPU_FILL_CMD(detach); 811 virtio_gpu_bswap_32(&detach, sizeof(detach)); 812 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 813 814 res = virtio_gpu_find_resource(g, detach.resource_id); 815 if (!res || !res->iov) { 816 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 817 __func__, detach.resource_id); 818 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 819 return; 820 } 821 virtio_gpu_cleanup_mapping(res); 822 } 823 824 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 825 struct virtio_gpu_ctrl_command *cmd) 826 { 827 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 828 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 829 830 switch (cmd->cmd_hdr.type) { 831 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 832 virtio_gpu_get_display_info(g, cmd); 833 break; 834 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 835 virtio_gpu_resource_create_2d(g, cmd); 836 break; 837 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 838 virtio_gpu_resource_unref(g, cmd); 839 break; 840 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 841 virtio_gpu_resource_flush(g, cmd); 842 break; 843 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 844 virtio_gpu_transfer_to_host_2d(g, cmd); 845 break; 846 case VIRTIO_GPU_CMD_SET_SCANOUT: 847 virtio_gpu_set_scanout(g, cmd); 848 break; 849 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 850 virtio_gpu_resource_attach_backing(g, cmd); 851 break; 852 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 853 virtio_gpu_resource_detach_backing(g, cmd); 854 break; 855 default: 856 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 857 break; 858 } 859 if (!cmd->finished) { 860 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 861 VIRTIO_GPU_RESP_OK_NODATA); 862 } 863 } 864 865 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 866 { 867 VirtIOGPU *g = VIRTIO_GPU(vdev); 868 qemu_bh_schedule(g->ctrl_bh); 869 } 870 871 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 872 { 873 VirtIOGPU *g = VIRTIO_GPU(vdev); 874 qemu_bh_schedule(g->cursor_bh); 875 } 876 877 void virtio_gpu_process_cmdq(VirtIOGPU *g) 878 { 879 struct virtio_gpu_ctrl_command *cmd; 880 881 while (!QTAILQ_EMPTY(&g->cmdq)) { 882 cmd = QTAILQ_FIRST(&g->cmdq); 883 884 /* process command */ 885 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, 886 g, cmd); 887 if (cmd->waiting) { 888 break; 889 } 890 QTAILQ_REMOVE(&g->cmdq, cmd, next); 891 if (virtio_gpu_stats_enabled(g->conf)) { 892 g->stats.requests++; 893 } 894 895 if (!cmd->finished) { 896 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 897 g->inflight++; 898 if (virtio_gpu_stats_enabled(g->conf)) { 899 if (g->stats.max_inflight < g->inflight) { 900 g->stats.max_inflight = g->inflight; 901 } 902 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 903 } 904 } else { 905 g_free(cmd); 906 } 907 } 908 } 909 910 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 911 { 912 VirtIOGPU *g = VIRTIO_GPU(vdev); 913 struct virtio_gpu_ctrl_command *cmd; 914 915 if (!virtio_queue_ready(vq)) { 916 return; 917 } 918 919 #ifdef CONFIG_VIRGL 920 if (!g->renderer_inited && g->use_virgl_renderer) { 921 virtio_gpu_virgl_init(g); 922 g->renderer_inited = true; 923 } 924 #endif 925 926 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 927 while (cmd) { 928 cmd->vq = vq; 929 cmd->error = 0; 930 cmd->finished = false; 931 cmd->waiting = false; 932 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 933 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 934 } 935 936 virtio_gpu_process_cmdq(g); 937 938 #ifdef CONFIG_VIRGL 939 if (g->use_virgl_renderer) { 940 virtio_gpu_virgl_fence_poll(g); 941 } 942 #endif 943 } 944 945 static void virtio_gpu_ctrl_bh(void *opaque) 946 { 947 VirtIOGPU *g = opaque; 948 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 949 } 950 951 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 952 { 953 VirtIOGPU *g = VIRTIO_GPU(vdev); 954 VirtQueueElement *elem; 955 size_t s; 956 struct virtio_gpu_update_cursor cursor_info; 957 958 if (!virtio_queue_ready(vq)) { 959 return; 960 } 961 for (;;) { 962 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 963 if (!elem) { 964 break; 965 } 966 967 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 968 &cursor_info, sizeof(cursor_info)); 969 if (s != sizeof(cursor_info)) { 970 qemu_log_mask(LOG_GUEST_ERROR, 971 "%s: cursor size incorrect %zu vs %zu\n", 972 __func__, s, sizeof(cursor_info)); 973 } else { 974 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 975 update_cursor(g, &cursor_info); 976 } 977 virtqueue_push(vq, elem, 0); 978 virtio_notify(vdev, vq); 979 g_free(elem); 980 } 981 } 982 983 static void virtio_gpu_cursor_bh(void *opaque) 984 { 985 VirtIOGPU *g = opaque; 986 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 987 } 988 989 static void virtio_gpu_invalidate_display(void *opaque) 990 { 991 } 992 993 static void virtio_gpu_update_display(void *opaque) 994 { 995 } 996 997 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 998 { 999 } 1000 1001 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 1002 { 1003 VirtIOGPU *g = opaque; 1004 1005 if (idx >= g->conf.max_outputs) { 1006 return -1; 1007 } 1008 1009 g->req_state[idx].x = info->xoff; 1010 g->req_state[idx].y = info->yoff; 1011 g->req_state[idx].width = info->width; 1012 g->req_state[idx].height = info->height; 1013 1014 if (info->width && info->height) { 1015 g->enabled_output_bitmask |= (1 << idx); 1016 } else { 1017 g->enabled_output_bitmask &= ~(1 << idx); 1018 } 1019 1020 /* send event to guest */ 1021 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 1022 return 0; 1023 } 1024 1025 const GraphicHwOps virtio_gpu_ops = { 1026 .invalidate = virtio_gpu_invalidate_display, 1027 .gfx_update = virtio_gpu_update_display, 1028 .text_update = virtio_gpu_text_update, 1029 .ui_info = virtio_gpu_ui_info, 1030 #ifdef CONFIG_VIRGL 1031 .gl_block = virtio_gpu_gl_block, 1032 #endif 1033 }; 1034 1035 static const VMStateDescription vmstate_virtio_gpu_scanout = { 1036 .name = "virtio-gpu-one-scanout", 1037 .version_id = 1, 1038 .fields = (VMStateField[]) { 1039 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 1040 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 1041 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 1042 VMSTATE_INT32(x, struct virtio_gpu_scanout), 1043 VMSTATE_INT32(y, struct virtio_gpu_scanout), 1044 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 1045 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 1046 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 1047 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 1048 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 1049 VMSTATE_END_OF_LIST() 1050 }, 1051 }; 1052 1053 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 1054 .name = "virtio-gpu-scanouts", 1055 .version_id = 1, 1056 .fields = (VMStateField[]) { 1057 VMSTATE_INT32(enable, struct VirtIOGPU), 1058 VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL), 1059 VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU, 1060 conf.max_outputs, 1, 1061 vmstate_virtio_gpu_scanout, 1062 struct virtio_gpu_scanout), 1063 VMSTATE_END_OF_LIST() 1064 }, 1065 }; 1066 1067 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 1068 VMStateField *field, QJSON *vmdesc) 1069 { 1070 VirtIOGPU *g = opaque; 1071 struct virtio_gpu_simple_resource *res; 1072 int i; 1073 1074 /* in 2d mode we should never find unprocessed commands here */ 1075 assert(QTAILQ_EMPTY(&g->cmdq)); 1076 1077 QTAILQ_FOREACH(res, &g->reslist, next) { 1078 qemu_put_be32(f, res->resource_id); 1079 qemu_put_be32(f, res->width); 1080 qemu_put_be32(f, res->height); 1081 qemu_put_be32(f, res->format); 1082 qemu_put_be32(f, res->iov_cnt); 1083 for (i = 0; i < res->iov_cnt; i++) { 1084 qemu_put_be64(f, res->addrs[i]); 1085 qemu_put_be32(f, res->iov[i].iov_len); 1086 } 1087 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 1088 pixman_image_get_stride(res->image) * res->height); 1089 } 1090 qemu_put_be32(f, 0); /* end of list */ 1091 1092 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 1093 } 1094 1095 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 1096 VMStateField *field) 1097 { 1098 VirtIOGPU *g = opaque; 1099 struct virtio_gpu_simple_resource *res; 1100 struct virtio_gpu_scanout *scanout; 1101 uint32_t resource_id, pformat; 1102 int i; 1103 1104 g->hostmem = 0; 1105 1106 resource_id = qemu_get_be32(f); 1107 while (resource_id != 0) { 1108 res = g_new0(struct virtio_gpu_simple_resource, 1); 1109 res->resource_id = resource_id; 1110 res->width = qemu_get_be32(f); 1111 res->height = qemu_get_be32(f); 1112 res->format = qemu_get_be32(f); 1113 res->iov_cnt = qemu_get_be32(f); 1114 1115 /* allocate */ 1116 pformat = get_pixman_format(res->format); 1117 if (!pformat) { 1118 g_free(res); 1119 return -EINVAL; 1120 } 1121 res->image = pixman_image_create_bits(pformat, 1122 res->width, res->height, 1123 NULL, 0); 1124 if (!res->image) { 1125 g_free(res); 1126 return -EINVAL; 1127 } 1128 1129 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 1130 1131 res->addrs = g_new(uint64_t, res->iov_cnt); 1132 res->iov = g_new(struct iovec, res->iov_cnt); 1133 1134 /* read data */ 1135 for (i = 0; i < res->iov_cnt; i++) { 1136 res->addrs[i] = qemu_get_be64(f); 1137 res->iov[i].iov_len = qemu_get_be32(f); 1138 } 1139 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 1140 pixman_image_get_stride(res->image) * res->height); 1141 1142 /* restore mapping */ 1143 for (i = 0; i < res->iov_cnt; i++) { 1144 hwaddr len = res->iov[i].iov_len; 1145 res->iov[i].iov_base = 1146 cpu_physical_memory_map(res->addrs[i], &len, 1); 1147 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1148 /* Clean up the half-a-mapping we just created... */ 1149 if (res->iov[i].iov_base) { 1150 cpu_physical_memory_unmap(res->iov[i].iov_base, 1151 len, 0, 0); 1152 } 1153 /* ...and the mappings for previous loop iterations */ 1154 res->iov_cnt = i; 1155 virtio_gpu_cleanup_mapping(res); 1156 pixman_image_unref(res->image); 1157 g_free(res); 1158 return -EINVAL; 1159 } 1160 } 1161 1162 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1163 g->hostmem += res->hostmem; 1164 1165 resource_id = qemu_get_be32(f); 1166 } 1167 1168 /* load & apply scanout state */ 1169 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1170 for (i = 0; i < g->conf.max_outputs; i++) { 1171 scanout = &g->scanout[i]; 1172 if (!scanout->resource_id) { 1173 continue; 1174 } 1175 res = virtio_gpu_find_resource(g, scanout->resource_id); 1176 if (!res) { 1177 return -EINVAL; 1178 } 1179 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1180 if (!scanout->ds) { 1181 return -EINVAL; 1182 } 1183 1184 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1185 dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height); 1186 if (scanout->cursor.resource_id) { 1187 update_cursor(g, &scanout->cursor); 1188 } 1189 res->scanout_bitmask |= (1 << i); 1190 } 1191 1192 return 0; 1193 } 1194 1195 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1196 { 1197 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1198 VirtIOGPU *g = VIRTIO_GPU(qdev); 1199 bool have_virgl; 1200 Error *local_err = NULL; 1201 int i; 1202 1203 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { 1204 error_setg(errp, "virtio-gpu does not support vIOMMU yet"); 1205 return; 1206 } 1207 1208 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { 1209 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS); 1210 return; 1211 } 1212 1213 g->use_virgl_renderer = false; 1214 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) 1215 have_virgl = false; 1216 #else 1217 have_virgl = display_opengl; 1218 #endif 1219 if (!have_virgl) { 1220 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); 1221 } 1222 1223 if (virtio_gpu_virgl_enabled(g->conf)) { 1224 error_setg(&g->migration_blocker, "virgl is not yet migratable"); 1225 migrate_add_blocker(g->migration_blocker, &local_err); 1226 if (local_err) { 1227 error_propagate(errp, local_err); 1228 error_free(g->migration_blocker); 1229 return; 1230 } 1231 } 1232 1233 g->config_size = sizeof(struct virtio_gpu_config); 1234 g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs); 1235 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 1236 g->config_size); 1237 1238 g->req_state[0].width = g->conf.xres; 1239 g->req_state[0].height = g->conf.yres; 1240 1241 if (virtio_gpu_virgl_enabled(g->conf)) { 1242 /* use larger control queue in 3d mode */ 1243 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); 1244 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 1245 1246 #if defined(CONFIG_VIRGL) 1247 g->virtio_config.num_capsets = virtio_gpu_virgl_get_num_capsets(g); 1248 #else 1249 g->virtio_config.num_capsets = 0; 1250 #endif 1251 } else { 1252 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 1253 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 1254 } 1255 1256 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 1257 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 1258 QTAILQ_INIT(&g->reslist); 1259 QTAILQ_INIT(&g->cmdq); 1260 QTAILQ_INIT(&g->fenceq); 1261 1262 g->enabled_output_bitmask = 1; 1263 g->qdev = qdev; 1264 1265 for (i = 0; i < g->conf.max_outputs; i++) { 1266 g->scanout[i].con = 1267 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 1268 if (i > 0) { 1269 dpy_gfx_replace_surface(g->scanout[i].con, NULL); 1270 } 1271 } 1272 } 1273 1274 static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp) 1275 { 1276 VirtIOGPU *g = VIRTIO_GPU(qdev); 1277 if (g->migration_blocker) { 1278 migrate_del_blocker(g->migration_blocker); 1279 error_free(g->migration_blocker); 1280 } 1281 } 1282 1283 static void virtio_gpu_instance_init(Object *obj) 1284 { 1285 } 1286 1287 static void virtio_gpu_reset(VirtIODevice *vdev) 1288 { 1289 VirtIOGPU *g = VIRTIO_GPU(vdev); 1290 struct virtio_gpu_simple_resource *res, *tmp; 1291 int i; 1292 1293 g->enable = 0; 1294 1295 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1296 virtio_gpu_resource_destroy(g, res); 1297 } 1298 for (i = 0; i < g->conf.max_outputs; i++) { 1299 g->scanout[i].resource_id = 0; 1300 g->scanout[i].width = 0; 1301 g->scanout[i].height = 0; 1302 g->scanout[i].x = 0; 1303 g->scanout[i].y = 0; 1304 g->scanout[i].ds = NULL; 1305 } 1306 1307 #ifdef CONFIG_VIRGL 1308 if (g->use_virgl_renderer) { 1309 virtio_gpu_virgl_reset(g); 1310 g->use_virgl_renderer = 0; 1311 } 1312 #endif 1313 } 1314 1315 /* 1316 * For historical reasons virtio_gpu does not adhere to virtio migration 1317 * scheme as described in doc/virtio-migration.txt, in a sense that no 1318 * save/load callback are provided to the core. Instead the device data 1319 * is saved/loaded after the core data. 1320 * 1321 * Because of this we need a special vmsd. 1322 */ 1323 static const VMStateDescription vmstate_virtio_gpu = { 1324 .name = "virtio-gpu", 1325 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1326 .version_id = VIRTIO_GPU_VM_VERSION, 1327 .fields = (VMStateField[]) { 1328 VMSTATE_VIRTIO_DEVICE /* core */, 1329 { 1330 .name = "virtio-gpu", 1331 .info = &(const VMStateInfo) { 1332 .name = "virtio-gpu", 1333 .get = virtio_gpu_load, 1334 .put = virtio_gpu_save, 1335 }, 1336 .flags = VMS_SINGLE, 1337 } /* device */, 1338 VMSTATE_END_OF_LIST() 1339 }, 1340 }; 1341 1342 static Property virtio_gpu_properties[] = { 1343 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), 1344 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 256 * MiB), 1345 #ifdef CONFIG_VIRGL 1346 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, 1347 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), 1348 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, 1349 VIRTIO_GPU_FLAG_STATS_ENABLED, false), 1350 #endif 1351 DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024), 1352 DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768), 1353 DEFINE_PROP_END_OF_LIST(), 1354 }; 1355 1356 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1357 { 1358 DeviceClass *dc = DEVICE_CLASS(klass); 1359 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1360 1361 vdc->realize = virtio_gpu_device_realize; 1362 vdc->unrealize = virtio_gpu_device_unrealize; 1363 vdc->get_config = virtio_gpu_get_config; 1364 vdc->set_config = virtio_gpu_set_config; 1365 vdc->get_features = virtio_gpu_get_features; 1366 vdc->set_features = virtio_gpu_set_features; 1367 1368 vdc->reset = virtio_gpu_reset; 1369 1370 set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); 1371 dc->props = virtio_gpu_properties; 1372 dc->vmsd = &vmstate_virtio_gpu; 1373 dc->hotpluggable = false; 1374 } 1375 1376 static const TypeInfo virtio_gpu_info = { 1377 .name = TYPE_VIRTIO_GPU, 1378 .parent = TYPE_VIRTIO_DEVICE, 1379 .instance_size = sizeof(VirtIOGPU), 1380 .instance_init = virtio_gpu_instance_init, 1381 .class_init = virtio_gpu_class_init, 1382 }; 1383 1384 static void virtio_register_types(void) 1385 { 1386 type_register_static(&virtio_gpu_info); 1387 } 1388 1389 type_init(virtio_register_types) 1390 1391 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 1392 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 1393 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 1394 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 1395 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 1396 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 1397 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 1398 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 1399 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 1400 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 1401 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 1402 1403 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); 1404 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); 1405 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); 1406 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); 1407 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); 1408 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); 1409 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); 1410 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); 1411 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); 1412 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); 1413