1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "sysemu/dma.h" 20 #include "sysemu/sysemu.h" 21 #include "hw/virtio/virtio.h" 22 #include "migration/qemu-file-types.h" 23 #include "hw/virtio/virtio-gpu.h" 24 #include "hw/virtio/virtio-gpu-bswap.h" 25 #include "hw/virtio/virtio-gpu-pixman.h" 26 #include "hw/virtio/virtio-bus.h" 27 #include "hw/display/edid.h" 28 #include "hw/qdev-properties.h" 29 #include "qemu/log.h" 30 #include "qemu/module.h" 31 #include "qapi/error.h" 32 #include "qemu/error-report.h" 33 34 #define VIRTIO_GPU_VM_VERSION 1 35 36 static struct virtio_gpu_simple_resource* 37 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 38 39 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 40 struct virtio_gpu_simple_resource *res); 41 42 void virtio_gpu_update_cursor_data(VirtIOGPU *g, 43 struct virtio_gpu_scanout *s, 44 uint32_t resource_id) 45 { 46 struct virtio_gpu_simple_resource *res; 47 uint32_t pixels; 48 49 res = virtio_gpu_find_resource(g, resource_id); 50 if (!res) { 51 return; 52 } 53 54 if (pixman_image_get_width(res->image) != s->current_cursor->width || 55 pixman_image_get_height(res->image) != s->current_cursor->height) { 56 return; 57 } 58 59 pixels = s->current_cursor->width * s->current_cursor->height; 60 memcpy(s->current_cursor->data, 61 pixman_image_get_data(res->image), 62 pixels * sizeof(uint32_t)); 63 } 64 65 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 66 { 67 struct virtio_gpu_scanout *s; 68 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 69 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; 70 71 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { 72 return; 73 } 74 s = &g->parent_obj.scanout[cursor->pos.scanout_id]; 75 76 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 77 cursor->pos.x, 78 cursor->pos.y, 79 move ? "move" : "update", 80 cursor->resource_id); 81 82 if (!move) { 83 if (!s->current_cursor) { 84 s->current_cursor = cursor_alloc(64, 64); 85 } 86 87 s->current_cursor->hot_x = cursor->hot_x; 88 s->current_cursor->hot_y = cursor->hot_y; 89 90 if (cursor->resource_id > 0) { 91 vgc->update_cursor_data(g, s, cursor->resource_id); 92 } 93 dpy_cursor_define(s->con, s->current_cursor); 94 95 s->cursor = *cursor; 96 } else { 97 s->cursor.pos.x = cursor->pos.x; 98 s->cursor.pos.y = cursor->pos.y; 99 } 100 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 101 cursor->resource_id ? 1 : 0); 102 } 103 104 static struct virtio_gpu_simple_resource * 105 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 106 { 107 struct virtio_gpu_simple_resource *res; 108 109 QTAILQ_FOREACH(res, &g->reslist, next) { 110 if (res->resource_id == resource_id) { 111 return res; 112 } 113 } 114 return NULL; 115 } 116 117 void virtio_gpu_ctrl_response(VirtIOGPU *g, 118 struct virtio_gpu_ctrl_command *cmd, 119 struct virtio_gpu_ctrl_hdr *resp, 120 size_t resp_len) 121 { 122 size_t s; 123 124 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 125 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 126 resp->fence_id = cmd->cmd_hdr.fence_id; 127 resp->ctx_id = cmd->cmd_hdr.ctx_id; 128 } 129 virtio_gpu_ctrl_hdr_bswap(resp); 130 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 131 if (s != resp_len) { 132 qemu_log_mask(LOG_GUEST_ERROR, 133 "%s: response size incorrect %zu vs %zu\n", 134 __func__, s, resp_len); 135 } 136 virtqueue_push(cmd->vq, &cmd->elem, s); 137 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 138 cmd->finished = true; 139 } 140 141 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 142 struct virtio_gpu_ctrl_command *cmd, 143 enum virtio_gpu_ctrl_type type) 144 { 145 struct virtio_gpu_ctrl_hdr resp; 146 147 memset(&resp, 0, sizeof(resp)); 148 resp.type = type; 149 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 150 } 151 152 void virtio_gpu_get_display_info(VirtIOGPU *g, 153 struct virtio_gpu_ctrl_command *cmd) 154 { 155 struct virtio_gpu_resp_display_info display_info; 156 157 trace_virtio_gpu_cmd_get_display_info(); 158 memset(&display_info, 0, sizeof(display_info)); 159 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 160 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); 161 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 162 sizeof(display_info)); 163 } 164 165 static void 166 virtio_gpu_generate_edid(VirtIOGPU *g, int scanout, 167 struct virtio_gpu_resp_edid *edid) 168 { 169 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 170 qemu_edid_info info = { 171 .width_mm = b->req_state[scanout].width_mm, 172 .height_mm = b->req_state[scanout].height_mm, 173 .prefx = b->req_state[scanout].width, 174 .prefy = b->req_state[scanout].height, 175 }; 176 177 edid->size = cpu_to_le32(sizeof(edid->edid)); 178 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info); 179 } 180 181 void virtio_gpu_get_edid(VirtIOGPU *g, 182 struct virtio_gpu_ctrl_command *cmd) 183 { 184 struct virtio_gpu_resp_edid edid; 185 struct virtio_gpu_cmd_get_edid get_edid; 186 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); 187 188 VIRTIO_GPU_FILL_CMD(get_edid); 189 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); 190 191 if (get_edid.scanout >= b->conf.max_outputs) { 192 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 193 return; 194 } 195 196 trace_virtio_gpu_cmd_get_edid(get_edid.scanout); 197 memset(&edid, 0, sizeof(edid)); 198 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; 199 virtio_gpu_generate_edid(g, get_edid.scanout, &edid); 200 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); 201 } 202 203 static uint32_t calc_image_hostmem(pixman_format_code_t pformat, 204 uint32_t width, uint32_t height) 205 { 206 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. 207 * pixman_image_create_bits will fail in case it overflow. 208 */ 209 210 int bpp = PIXMAN_FORMAT_BPP(pformat); 211 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); 212 return height * stride; 213 } 214 215 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 216 struct virtio_gpu_ctrl_command *cmd) 217 { 218 pixman_format_code_t pformat; 219 struct virtio_gpu_simple_resource *res; 220 struct virtio_gpu_resource_create_2d c2d; 221 222 VIRTIO_GPU_FILL_CMD(c2d); 223 virtio_gpu_bswap_32(&c2d, sizeof(c2d)); 224 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 225 c2d.width, c2d.height); 226 227 if (c2d.resource_id == 0) { 228 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 229 __func__); 230 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 231 return; 232 } 233 234 res = virtio_gpu_find_resource(g, c2d.resource_id); 235 if (res) { 236 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 237 __func__, c2d.resource_id); 238 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 239 return; 240 } 241 242 res = g_new0(struct virtio_gpu_simple_resource, 1); 243 244 res->width = c2d.width; 245 res->height = c2d.height; 246 res->format = c2d.format; 247 res->resource_id = c2d.resource_id; 248 249 pformat = virtio_gpu_get_pixman_format(c2d.format); 250 if (!pformat) { 251 qemu_log_mask(LOG_GUEST_ERROR, 252 "%s: host couldn't handle guest format %d\n", 253 __func__, c2d.format); 254 g_free(res); 255 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 256 return; 257 } 258 259 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); 260 if (res->hostmem + g->hostmem < g->conf_max_hostmem) { 261 res->image = pixman_image_create_bits(pformat, 262 c2d.width, 263 c2d.height, 264 NULL, 0); 265 } 266 267 if (!res->image) { 268 qemu_log_mask(LOG_GUEST_ERROR, 269 "%s: resource creation failed %d %d %d\n", 270 __func__, c2d.resource_id, c2d.width, c2d.height); 271 g_free(res); 272 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 273 return; 274 } 275 276 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 277 g->hostmem += res->hostmem; 278 } 279 280 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) 281 { 282 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 283 struct virtio_gpu_simple_resource *res; 284 285 if (scanout->resource_id == 0) { 286 return; 287 } 288 289 res = virtio_gpu_find_resource(g, scanout->resource_id); 290 if (res) { 291 res->scanout_bitmask &= ~(1 << scanout_id); 292 } 293 294 dpy_gfx_replace_surface(scanout->con, NULL); 295 scanout->resource_id = 0; 296 scanout->ds = NULL; 297 scanout->width = 0; 298 scanout->height = 0; 299 } 300 301 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 302 struct virtio_gpu_simple_resource *res) 303 { 304 int i; 305 306 if (res->scanout_bitmask) { 307 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 308 if (res->scanout_bitmask & (1 << i)) { 309 virtio_gpu_disable_scanout(g, i); 310 } 311 } 312 } 313 314 pixman_image_unref(res->image); 315 virtio_gpu_cleanup_mapping(g, res); 316 QTAILQ_REMOVE(&g->reslist, res, next); 317 g->hostmem -= res->hostmem; 318 g_free(res); 319 } 320 321 static void virtio_gpu_resource_unref(VirtIOGPU *g, 322 struct virtio_gpu_ctrl_command *cmd) 323 { 324 struct virtio_gpu_simple_resource *res; 325 struct virtio_gpu_resource_unref unref; 326 327 VIRTIO_GPU_FILL_CMD(unref); 328 virtio_gpu_bswap_32(&unref, sizeof(unref)); 329 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 330 331 res = virtio_gpu_find_resource(g, unref.resource_id); 332 if (!res) { 333 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 334 __func__, unref.resource_id); 335 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 336 return; 337 } 338 virtio_gpu_resource_destroy(g, res); 339 } 340 341 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 342 struct virtio_gpu_ctrl_command *cmd) 343 { 344 struct virtio_gpu_simple_resource *res; 345 int h; 346 uint32_t src_offset, dst_offset, stride; 347 int bpp; 348 pixman_format_code_t format; 349 struct virtio_gpu_transfer_to_host_2d t2d; 350 351 VIRTIO_GPU_FILL_CMD(t2d); 352 virtio_gpu_t2d_bswap(&t2d); 353 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 354 355 res = virtio_gpu_find_resource(g, t2d.resource_id); 356 if (!res || !res->iov) { 357 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 358 __func__, t2d.resource_id); 359 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 360 return; 361 } 362 363 if (t2d.r.x > res->width || 364 t2d.r.y > res->height || 365 t2d.r.width > res->width || 366 t2d.r.height > res->height || 367 t2d.r.x + t2d.r.width > res->width || 368 t2d.r.y + t2d.r.height > res->height) { 369 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 370 " bounds for resource %d: %d %d %d %d vs %d %d\n", 371 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 372 t2d.r.width, t2d.r.height, res->width, res->height); 373 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 374 return; 375 } 376 377 format = pixman_image_get_format(res->image); 378 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 379 stride = pixman_image_get_stride(res->image); 380 381 if (t2d.offset || t2d.r.x || t2d.r.y || 382 t2d.r.width != pixman_image_get_width(res->image)) { 383 void *img_data = pixman_image_get_data(res->image); 384 for (h = 0; h < t2d.r.height; h++) { 385 src_offset = t2d.offset + stride * h; 386 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 387 388 iov_to_buf(res->iov, res->iov_cnt, src_offset, 389 (uint8_t *)img_data 390 + dst_offset, t2d.r.width * bpp); 391 } 392 } else { 393 iov_to_buf(res->iov, res->iov_cnt, 0, 394 pixman_image_get_data(res->image), 395 pixman_image_get_stride(res->image) 396 * pixman_image_get_height(res->image)); 397 } 398 } 399 400 static void virtio_gpu_resource_flush(VirtIOGPU *g, 401 struct virtio_gpu_ctrl_command *cmd) 402 { 403 struct virtio_gpu_simple_resource *res; 404 struct virtio_gpu_resource_flush rf; 405 pixman_region16_t flush_region; 406 int i; 407 408 VIRTIO_GPU_FILL_CMD(rf); 409 virtio_gpu_bswap_32(&rf, sizeof(rf)); 410 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 411 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 412 413 res = virtio_gpu_find_resource(g, rf.resource_id); 414 if (!res) { 415 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 416 __func__, rf.resource_id); 417 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 418 return; 419 } 420 421 if (rf.r.x > res->width || 422 rf.r.y > res->height || 423 rf.r.width > res->width || 424 rf.r.height > res->height || 425 rf.r.x + rf.r.width > res->width || 426 rf.r.y + rf.r.height > res->height) { 427 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 428 " bounds for resource %d: %d %d %d %d vs %d %d\n", 429 __func__, rf.resource_id, rf.r.x, rf.r.y, 430 rf.r.width, rf.r.height, res->width, res->height); 431 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 432 return; 433 } 434 435 pixman_region_init_rect(&flush_region, 436 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 437 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 438 struct virtio_gpu_scanout *scanout; 439 pixman_region16_t region, finalregion; 440 pixman_box16_t *extents; 441 442 if (!(res->scanout_bitmask & (1 << i))) { 443 continue; 444 } 445 scanout = &g->parent_obj.scanout[i]; 446 447 pixman_region_init(&finalregion); 448 pixman_region_init_rect(®ion, scanout->x, scanout->y, 449 scanout->width, scanout->height); 450 451 pixman_region_intersect(&finalregion, &flush_region, ®ion); 452 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 453 extents = pixman_region_extents(&finalregion); 454 /* work out the area we need to update for each console */ 455 dpy_gfx_update(g->parent_obj.scanout[i].con, 456 extents->x1, extents->y1, 457 extents->x2 - extents->x1, 458 extents->y2 - extents->y1); 459 460 pixman_region_fini(®ion); 461 pixman_region_fini(&finalregion); 462 } 463 pixman_region_fini(&flush_region); 464 } 465 466 static void virtio_unref_resource(pixman_image_t *image, void *data) 467 { 468 pixman_image_unref(data); 469 } 470 471 static void virtio_gpu_set_scanout(VirtIOGPU *g, 472 struct virtio_gpu_ctrl_command *cmd) 473 { 474 struct virtio_gpu_simple_resource *res, *ores; 475 struct virtio_gpu_scanout *scanout; 476 pixman_format_code_t format; 477 uint32_t offset; 478 int bpp; 479 struct virtio_gpu_set_scanout ss; 480 481 VIRTIO_GPU_FILL_CMD(ss); 482 virtio_gpu_bswap_32(&ss, sizeof(ss)); 483 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 484 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 485 486 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { 487 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 488 __func__, ss.scanout_id); 489 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 490 return; 491 } 492 493 g->parent_obj.enable = 1; 494 if (ss.resource_id == 0) { 495 virtio_gpu_disable_scanout(g, ss.scanout_id); 496 return; 497 } 498 499 /* create a surface for this scanout */ 500 res = virtio_gpu_find_resource(g, ss.resource_id); 501 if (!res) { 502 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 503 __func__, ss.resource_id); 504 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 505 return; 506 } 507 508 if (ss.r.x > res->width || 509 ss.r.y > res->height || 510 ss.r.width < 16 || 511 ss.r.height < 16 || 512 ss.r.width > res->width || 513 ss.r.height > res->height || 514 ss.r.x + ss.r.width > res->width || 515 ss.r.y + ss.r.height > res->height) { 516 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 517 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 518 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 519 ss.r.width, ss.r.height, res->width, res->height); 520 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 521 return; 522 } 523 524 scanout = &g->parent_obj.scanout[ss.scanout_id]; 525 526 format = pixman_image_get_format(res->image); 527 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); 528 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 529 if (!scanout->ds || surface_data(scanout->ds) 530 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 531 scanout->width != ss.r.width || 532 scanout->height != ss.r.height) { 533 pixman_image_t *rect; 534 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset; 535 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr, 536 pixman_image_get_stride(res->image)); 537 pixman_image_ref(res->image); 538 pixman_image_set_destroy_function(rect, virtio_unref_resource, 539 res->image); 540 /* realloc the surface ptr */ 541 scanout->ds = qemu_create_displaysurface_pixman(rect); 542 if (!scanout->ds) { 543 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 544 return; 545 } 546 pixman_image_unref(rect); 547 dpy_gfx_replace_surface(g->parent_obj.scanout[ss.scanout_id].con, 548 scanout->ds); 549 } 550 551 ores = virtio_gpu_find_resource(g, scanout->resource_id); 552 if (ores) { 553 ores->scanout_bitmask &= ~(1 << ss.scanout_id); 554 } 555 556 res->scanout_bitmask |= (1 << ss.scanout_id); 557 scanout->resource_id = ss.resource_id; 558 scanout->x = ss.r.x; 559 scanout->y = ss.r.y; 560 scanout->width = ss.r.width; 561 scanout->height = ss.r.height; 562 } 563 564 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 565 struct virtio_gpu_resource_attach_backing *ab, 566 struct virtio_gpu_ctrl_command *cmd, 567 uint64_t **addr, struct iovec **iov, 568 uint32_t *niov) 569 { 570 struct virtio_gpu_mem_entry *ents; 571 size_t esize, s; 572 int e, v; 573 574 if (ab->nr_entries > 16384) { 575 qemu_log_mask(LOG_GUEST_ERROR, 576 "%s: nr_entries is too big (%d > 16384)\n", 577 __func__, ab->nr_entries); 578 return -1; 579 } 580 581 esize = sizeof(*ents) * ab->nr_entries; 582 ents = g_malloc(esize); 583 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 584 sizeof(*ab), ents, esize); 585 if (s != esize) { 586 qemu_log_mask(LOG_GUEST_ERROR, 587 "%s: command data size incorrect %zu vs %zu\n", 588 __func__, s, esize); 589 g_free(ents); 590 return -1; 591 } 592 593 *iov = NULL; 594 if (addr) { 595 *addr = NULL; 596 } 597 for (e = 0, v = 0; e < ab->nr_entries; e++) { 598 uint64_t a = le64_to_cpu(ents[e].addr); 599 uint32_t l = le32_to_cpu(ents[e].length); 600 hwaddr len; 601 void *map; 602 603 do { 604 len = l; 605 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, 606 a, &len, DMA_DIRECTION_TO_DEVICE); 607 if (!map) { 608 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 609 " resource %d element %d\n", 610 __func__, ab->resource_id, e); 611 virtio_gpu_cleanup_mapping_iov(g, *iov, v); 612 g_free(ents); 613 *iov = NULL; 614 if (addr) { 615 g_free(*addr); 616 *addr = NULL; 617 } 618 return -1; 619 } 620 621 if (!(v % 16)) { 622 *iov = g_realloc(*iov, sizeof(struct iovec) * (v + 16)); 623 if (addr) { 624 *addr = g_realloc(*addr, sizeof(uint64_t) * (v + 16)); 625 } 626 } 627 (*iov)[v].iov_base = map; 628 (*iov)[v].iov_len = len; 629 if (addr) { 630 (*addr)[v] = a; 631 } 632 633 a += len; 634 l -= len; 635 v += 1; 636 } while (l > 0); 637 } 638 *niov = v; 639 640 g_free(ents); 641 return 0; 642 } 643 644 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 645 struct iovec *iov, uint32_t count) 646 { 647 int i; 648 649 for (i = 0; i < count; i++) { 650 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 651 iov[i].iov_base, iov[i].iov_len, 652 DMA_DIRECTION_TO_DEVICE, 653 iov[i].iov_len); 654 } 655 g_free(iov); 656 } 657 658 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 659 struct virtio_gpu_simple_resource *res) 660 { 661 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); 662 res->iov = NULL; 663 res->iov_cnt = 0; 664 g_free(res->addrs); 665 res->addrs = NULL; 666 } 667 668 static void 669 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 670 struct virtio_gpu_ctrl_command *cmd) 671 { 672 struct virtio_gpu_simple_resource *res; 673 struct virtio_gpu_resource_attach_backing ab; 674 int ret; 675 676 VIRTIO_GPU_FILL_CMD(ab); 677 virtio_gpu_bswap_32(&ab, sizeof(ab)); 678 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 679 680 res = virtio_gpu_find_resource(g, ab.resource_id); 681 if (!res) { 682 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 683 __func__, ab.resource_id); 684 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 685 return; 686 } 687 688 if (res->iov) { 689 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 690 return; 691 } 692 693 ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs, 694 &res->iov, &res->iov_cnt); 695 if (ret != 0) { 696 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 697 return; 698 } 699 } 700 701 static void 702 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 703 struct virtio_gpu_ctrl_command *cmd) 704 { 705 struct virtio_gpu_simple_resource *res; 706 struct virtio_gpu_resource_detach_backing detach; 707 708 VIRTIO_GPU_FILL_CMD(detach); 709 virtio_gpu_bswap_32(&detach, sizeof(detach)); 710 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 711 712 res = virtio_gpu_find_resource(g, detach.resource_id); 713 if (!res || !res->iov) { 714 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 715 __func__, detach.resource_id); 716 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 717 return; 718 } 719 virtio_gpu_cleanup_mapping(g, res); 720 } 721 722 void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 723 struct virtio_gpu_ctrl_command *cmd) 724 { 725 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 726 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); 727 728 switch (cmd->cmd_hdr.type) { 729 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 730 virtio_gpu_get_display_info(g, cmd); 731 break; 732 case VIRTIO_GPU_CMD_GET_EDID: 733 virtio_gpu_get_edid(g, cmd); 734 break; 735 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 736 virtio_gpu_resource_create_2d(g, cmd); 737 break; 738 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 739 virtio_gpu_resource_unref(g, cmd); 740 break; 741 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 742 virtio_gpu_resource_flush(g, cmd); 743 break; 744 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 745 virtio_gpu_transfer_to_host_2d(g, cmd); 746 break; 747 case VIRTIO_GPU_CMD_SET_SCANOUT: 748 virtio_gpu_set_scanout(g, cmd); 749 break; 750 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 751 virtio_gpu_resource_attach_backing(g, cmd); 752 break; 753 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 754 virtio_gpu_resource_detach_backing(g, cmd); 755 break; 756 default: 757 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 758 break; 759 } 760 if (!cmd->finished) { 761 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 762 VIRTIO_GPU_RESP_OK_NODATA); 763 } 764 } 765 766 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 767 { 768 VirtIOGPU *g = VIRTIO_GPU(vdev); 769 qemu_bh_schedule(g->ctrl_bh); 770 } 771 772 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 773 { 774 VirtIOGPU *g = VIRTIO_GPU(vdev); 775 qemu_bh_schedule(g->cursor_bh); 776 } 777 778 void virtio_gpu_process_cmdq(VirtIOGPU *g) 779 { 780 struct virtio_gpu_ctrl_command *cmd; 781 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 782 783 if (g->processing_cmdq) { 784 return; 785 } 786 g->processing_cmdq = true; 787 while (!QTAILQ_EMPTY(&g->cmdq)) { 788 cmd = QTAILQ_FIRST(&g->cmdq); 789 790 if (g->parent_obj.renderer_blocked) { 791 break; 792 } 793 794 /* process command */ 795 vgc->process_cmd(g, cmd); 796 797 QTAILQ_REMOVE(&g->cmdq, cmd, next); 798 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 799 g->stats.requests++; 800 } 801 802 if (!cmd->finished) { 803 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 804 g->inflight++; 805 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { 806 if (g->stats.max_inflight < g->inflight) { 807 g->stats.max_inflight = g->inflight; 808 } 809 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 810 } 811 } else { 812 g_free(cmd); 813 } 814 } 815 g->processing_cmdq = false; 816 } 817 818 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 819 { 820 VirtIOGPU *g = VIRTIO_GPU(vdev); 821 struct virtio_gpu_ctrl_command *cmd; 822 823 if (!virtio_queue_ready(vq)) { 824 return; 825 } 826 827 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 828 while (cmd) { 829 cmd->vq = vq; 830 cmd->error = 0; 831 cmd->finished = false; 832 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 833 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 834 } 835 836 virtio_gpu_process_cmdq(g); 837 } 838 839 static void virtio_gpu_ctrl_bh(void *opaque) 840 { 841 VirtIOGPU *g = opaque; 842 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); 843 844 vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq); 845 } 846 847 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 848 { 849 VirtIOGPU *g = VIRTIO_GPU(vdev); 850 VirtQueueElement *elem; 851 size_t s; 852 struct virtio_gpu_update_cursor cursor_info; 853 854 if (!virtio_queue_ready(vq)) { 855 return; 856 } 857 for (;;) { 858 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 859 if (!elem) { 860 break; 861 } 862 863 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 864 &cursor_info, sizeof(cursor_info)); 865 if (s != sizeof(cursor_info)) { 866 qemu_log_mask(LOG_GUEST_ERROR, 867 "%s: cursor size incorrect %zu vs %zu\n", 868 __func__, s, sizeof(cursor_info)); 869 } else { 870 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); 871 update_cursor(g, &cursor_info); 872 } 873 virtqueue_push(vq, elem, 0); 874 virtio_notify(vdev, vq); 875 g_free(elem); 876 } 877 } 878 879 static void virtio_gpu_cursor_bh(void *opaque) 880 { 881 VirtIOGPU *g = opaque; 882 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); 883 } 884 885 static const VMStateDescription vmstate_virtio_gpu_scanout = { 886 .name = "virtio-gpu-one-scanout", 887 .version_id = 1, 888 .fields = (VMStateField[]) { 889 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), 890 VMSTATE_UINT32(width, struct virtio_gpu_scanout), 891 VMSTATE_UINT32(height, struct virtio_gpu_scanout), 892 VMSTATE_INT32(x, struct virtio_gpu_scanout), 893 VMSTATE_INT32(y, struct virtio_gpu_scanout), 894 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), 895 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), 896 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), 897 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), 898 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), 899 VMSTATE_END_OF_LIST() 900 }, 901 }; 902 903 static const VMStateDescription vmstate_virtio_gpu_scanouts = { 904 .name = "virtio-gpu-scanouts", 905 .version_id = 1, 906 .fields = (VMStateField[]) { 907 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), 908 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, 909 struct VirtIOGPU, NULL), 910 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, 911 parent_obj.conf.max_outputs, 1, 912 vmstate_virtio_gpu_scanout, 913 struct virtio_gpu_scanout), 914 VMSTATE_END_OF_LIST() 915 }, 916 }; 917 918 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, 919 const VMStateField *field, JSONWriter *vmdesc) 920 { 921 VirtIOGPU *g = opaque; 922 struct virtio_gpu_simple_resource *res; 923 int i; 924 925 /* in 2d mode we should never find unprocessed commands here */ 926 assert(QTAILQ_EMPTY(&g->cmdq)); 927 928 QTAILQ_FOREACH(res, &g->reslist, next) { 929 qemu_put_be32(f, res->resource_id); 930 qemu_put_be32(f, res->width); 931 qemu_put_be32(f, res->height); 932 qemu_put_be32(f, res->format); 933 qemu_put_be32(f, res->iov_cnt); 934 for (i = 0; i < res->iov_cnt; i++) { 935 qemu_put_be64(f, res->addrs[i]); 936 qemu_put_be32(f, res->iov[i].iov_len); 937 } 938 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), 939 pixman_image_get_stride(res->image) * res->height); 940 } 941 qemu_put_be32(f, 0); /* end of list */ 942 943 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); 944 } 945 946 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, 947 const VMStateField *field) 948 { 949 VirtIOGPU *g = opaque; 950 struct virtio_gpu_simple_resource *res; 951 struct virtio_gpu_scanout *scanout; 952 uint32_t resource_id, pformat; 953 int i; 954 955 g->hostmem = 0; 956 957 resource_id = qemu_get_be32(f); 958 while (resource_id != 0) { 959 res = virtio_gpu_find_resource(g, resource_id); 960 if (res) { 961 return -EINVAL; 962 } 963 964 res = g_new0(struct virtio_gpu_simple_resource, 1); 965 res->resource_id = resource_id; 966 res->width = qemu_get_be32(f); 967 res->height = qemu_get_be32(f); 968 res->format = qemu_get_be32(f); 969 res->iov_cnt = qemu_get_be32(f); 970 971 /* allocate */ 972 pformat = virtio_gpu_get_pixman_format(res->format); 973 if (!pformat) { 974 g_free(res); 975 return -EINVAL; 976 } 977 res->image = pixman_image_create_bits(pformat, 978 res->width, res->height, 979 NULL, 0); 980 if (!res->image) { 981 g_free(res); 982 return -EINVAL; 983 } 984 985 res->hostmem = calc_image_hostmem(pformat, res->width, res->height); 986 987 res->addrs = g_new(uint64_t, res->iov_cnt); 988 res->iov = g_new(struct iovec, res->iov_cnt); 989 990 /* read data */ 991 for (i = 0; i < res->iov_cnt; i++) { 992 res->addrs[i] = qemu_get_be64(f); 993 res->iov[i].iov_len = qemu_get_be32(f); 994 } 995 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), 996 pixman_image_get_stride(res->image) * res->height); 997 998 /* restore mapping */ 999 for (i = 0; i < res->iov_cnt; i++) { 1000 hwaddr len = res->iov[i].iov_len; 1001 res->iov[i].iov_base = 1002 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, 1003 res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE); 1004 1005 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { 1006 /* Clean up the half-a-mapping we just created... */ 1007 if (res->iov[i].iov_base) { 1008 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, 1009 res->iov[i].iov_base, 1010 len, 1011 DMA_DIRECTION_TO_DEVICE, 1012 0); 1013 } 1014 /* ...and the mappings for previous loop iterations */ 1015 res->iov_cnt = i; 1016 virtio_gpu_cleanup_mapping(g, res); 1017 pixman_image_unref(res->image); 1018 g_free(res); 1019 return -EINVAL; 1020 } 1021 } 1022 1023 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 1024 g->hostmem += res->hostmem; 1025 1026 resource_id = qemu_get_be32(f); 1027 } 1028 1029 /* load & apply scanout state */ 1030 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); 1031 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { 1032 scanout = &g->parent_obj.scanout[i]; 1033 if (!scanout->resource_id) { 1034 continue; 1035 } 1036 res = virtio_gpu_find_resource(g, scanout->resource_id); 1037 if (!res) { 1038 return -EINVAL; 1039 } 1040 scanout->ds = qemu_create_displaysurface_pixman(res->image); 1041 if (!scanout->ds) { 1042 return -EINVAL; 1043 } 1044 1045 dpy_gfx_replace_surface(scanout->con, scanout->ds); 1046 dpy_gfx_update_full(scanout->con); 1047 if (scanout->cursor.resource_id) { 1048 update_cursor(g, &scanout->cursor); 1049 } 1050 res->scanout_bitmask |= (1 << i); 1051 } 1052 1053 return 0; 1054 } 1055 1056 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 1057 { 1058 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 1059 VirtIOGPU *g = VIRTIO_GPU(qdev); 1060 1061 if (!virtio_gpu_base_device_realize(qdev, 1062 virtio_gpu_handle_ctrl_cb, 1063 virtio_gpu_handle_cursor_cb, 1064 errp)) { 1065 return; 1066 } 1067 1068 g->ctrl_vq = virtio_get_queue(vdev, 0); 1069 g->cursor_vq = virtio_get_queue(vdev, 1); 1070 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 1071 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 1072 QTAILQ_INIT(&g->reslist); 1073 QTAILQ_INIT(&g->cmdq); 1074 QTAILQ_INIT(&g->fenceq); 1075 } 1076 1077 void virtio_gpu_reset(VirtIODevice *vdev) 1078 { 1079 VirtIOGPU *g = VIRTIO_GPU(vdev); 1080 struct virtio_gpu_simple_resource *res, *tmp; 1081 struct virtio_gpu_ctrl_command *cmd; 1082 1083 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 1084 virtio_gpu_resource_destroy(g, res); 1085 } 1086 1087 while (!QTAILQ_EMPTY(&g->cmdq)) { 1088 cmd = QTAILQ_FIRST(&g->cmdq); 1089 QTAILQ_REMOVE(&g->cmdq, cmd, next); 1090 g_free(cmd); 1091 } 1092 1093 while (!QTAILQ_EMPTY(&g->fenceq)) { 1094 cmd = QTAILQ_FIRST(&g->fenceq); 1095 QTAILQ_REMOVE(&g->fenceq, cmd, next); 1096 g->inflight--; 1097 g_free(cmd); 1098 } 1099 1100 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); 1101 } 1102 1103 static void 1104 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 1105 { 1106 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1107 1108 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 1109 } 1110 1111 static void 1112 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 1113 { 1114 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 1115 const struct virtio_gpu_config *vgconfig = 1116 (const struct virtio_gpu_config *)config; 1117 1118 if (vgconfig->events_clear) { 1119 g->virtio_config.events_read &= ~vgconfig->events_clear; 1120 } 1121 } 1122 1123 /* 1124 * For historical reasons virtio_gpu does not adhere to virtio migration 1125 * scheme as described in doc/virtio-migration.txt, in a sense that no 1126 * save/load callback are provided to the core. Instead the device data 1127 * is saved/loaded after the core data. 1128 * 1129 * Because of this we need a special vmsd. 1130 */ 1131 static const VMStateDescription vmstate_virtio_gpu = { 1132 .name = "virtio-gpu", 1133 .minimum_version_id = VIRTIO_GPU_VM_VERSION, 1134 .version_id = VIRTIO_GPU_VM_VERSION, 1135 .fields = (VMStateField[]) { 1136 VMSTATE_VIRTIO_DEVICE /* core */, 1137 { 1138 .name = "virtio-gpu", 1139 .info = &(const VMStateInfo) { 1140 .name = "virtio-gpu", 1141 .get = virtio_gpu_load, 1142 .put = virtio_gpu_save, 1143 }, 1144 .flags = VMS_SINGLE, 1145 } /* device */, 1146 VMSTATE_END_OF_LIST() 1147 }, 1148 }; 1149 1150 static Property virtio_gpu_properties[] = { 1151 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), 1152 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 1153 256 * MiB), 1154 DEFINE_PROP_END_OF_LIST(), 1155 }; 1156 1157 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1158 { 1159 DeviceClass *dc = DEVICE_CLASS(klass); 1160 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1161 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); 1162 1163 vgc->handle_ctrl = virtio_gpu_handle_ctrl; 1164 vgc->process_cmd = virtio_gpu_simple_process_cmd; 1165 vgc->update_cursor_data = virtio_gpu_update_cursor_data; 1166 1167 vdc->realize = virtio_gpu_device_realize; 1168 vdc->reset = virtio_gpu_reset; 1169 vdc->get_config = virtio_gpu_get_config; 1170 vdc->set_config = virtio_gpu_set_config; 1171 1172 dc->vmsd = &vmstate_virtio_gpu; 1173 device_class_set_props(dc, virtio_gpu_properties); 1174 } 1175 1176 static const TypeInfo virtio_gpu_info = { 1177 .name = TYPE_VIRTIO_GPU, 1178 .parent = TYPE_VIRTIO_GPU_BASE, 1179 .instance_size = sizeof(VirtIOGPU), 1180 .class_size = sizeof(VirtIOGPUClass), 1181 .class_init = virtio_gpu_class_init, 1182 }; 1183 1184 static void virtio_register_types(void) 1185 { 1186 type_register_static(&virtio_gpu_info); 1187 } 1188 1189 type_init(virtio_register_types) 1190