1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu-common.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "hw/virtio/virtio.h" 20 #include "hw/virtio/virtio-gpu.h" 21 #include "hw/virtio/virtio-bus.h" 22 23 static struct virtio_gpu_simple_resource* 24 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 25 26 #ifdef CONFIG_VIRGL 27 #include "virglrenderer.h" 28 #define VIRGL(_g, _virgl, _simple, ...) \ 29 do { \ 30 if (_g->use_virgl_renderer) { \ 31 _virgl(__VA_ARGS__); \ 32 } else { \ 33 _simple(__VA_ARGS__); \ 34 } \ 35 } while (0) 36 #else 37 #define VIRGL(_g, _virgl, _simple, ...) \ 38 do { \ 39 _simple(__VA_ARGS__); \ 40 } while (0) 41 #endif 42 43 static void update_cursor_data_simple(VirtIOGPU *g, 44 struct virtio_gpu_scanout *s, 45 uint32_t resource_id) 46 { 47 struct virtio_gpu_simple_resource *res; 48 uint32_t pixels; 49 50 res = virtio_gpu_find_resource(g, resource_id); 51 if (!res) { 52 return; 53 } 54 55 if (pixman_image_get_width(res->image) != s->current_cursor->width || 56 pixman_image_get_height(res->image) != s->current_cursor->height) { 57 return; 58 } 59 60 pixels = s->current_cursor->width * s->current_cursor->height; 61 memcpy(s->current_cursor->data, 62 pixman_image_get_data(res->image), 63 pixels * sizeof(uint32_t)); 64 } 65 66 #ifdef CONFIG_VIRGL 67 68 static void update_cursor_data_virgl(VirtIOGPU *g, 69 struct virtio_gpu_scanout *s, 70 uint32_t resource_id) 71 { 72 uint32_t width, height; 73 uint32_t pixels, *data; 74 75 data = virgl_renderer_get_cursor_data(resource_id, &width, &height); 76 if (!data) { 77 return; 78 } 79 80 if (width != s->current_cursor->width || 81 height != s->current_cursor->height) { 82 return; 83 } 84 85 pixels = s->current_cursor->width * s->current_cursor->height; 86 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); 87 free(data); 88 } 89 90 #endif 91 92 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 93 { 94 struct virtio_gpu_scanout *s; 95 bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR; 96 97 if (cursor->pos.scanout_id >= g->conf.max_outputs) { 98 return; 99 } 100 s = &g->scanout[cursor->pos.scanout_id]; 101 102 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 103 cursor->pos.x, 104 cursor->pos.y, 105 move ? "move" : "update", 106 cursor->resource_id); 107 108 if (move) { 109 if (!s->current_cursor) { 110 s->current_cursor = cursor_alloc(64, 64); 111 } 112 113 s->current_cursor->hot_x = cursor->hot_x; 114 s->current_cursor->hot_y = cursor->hot_y; 115 116 if (cursor->resource_id > 0) { 117 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, 118 g, s, cursor->resource_id); 119 } 120 dpy_cursor_define(s->con, s->current_cursor); 121 } 122 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 123 cursor->resource_id ? 1 : 0); 124 } 125 126 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 127 { 128 VirtIOGPU *g = VIRTIO_GPU(vdev); 129 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 130 } 131 132 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 133 { 134 VirtIOGPU *g = VIRTIO_GPU(vdev); 135 struct virtio_gpu_config vgconfig; 136 137 memcpy(&vgconfig, config, sizeof(g->virtio_config)); 138 139 if (vgconfig.events_clear) { 140 g->virtio_config.events_read &= ~vgconfig.events_clear; 141 } 142 } 143 144 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, 145 Error **errp) 146 { 147 VirtIOGPU *g = VIRTIO_GPU(vdev); 148 149 if (virtio_gpu_virgl_enabled(g->conf)) { 150 features |= (1 << VIRTIO_GPU_F_VIRGL); 151 } 152 return features; 153 } 154 155 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) 156 { 157 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); 158 VirtIOGPU *g = VIRTIO_GPU(vdev); 159 160 g->use_virgl_renderer = ((features & virgl) == virgl); 161 trace_virtio_gpu_features(g->use_virgl_renderer); 162 } 163 164 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 165 { 166 g->virtio_config.events_read |= event_type; 167 virtio_notify_config(&g->parent_obj); 168 } 169 170 static struct virtio_gpu_simple_resource * 171 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 172 { 173 struct virtio_gpu_simple_resource *res; 174 175 QTAILQ_FOREACH(res, &g->reslist, next) { 176 if (res->resource_id == resource_id) { 177 return res; 178 } 179 } 180 return NULL; 181 } 182 183 void virtio_gpu_ctrl_response(VirtIOGPU *g, 184 struct virtio_gpu_ctrl_command *cmd, 185 struct virtio_gpu_ctrl_hdr *resp, 186 size_t resp_len) 187 { 188 size_t s; 189 190 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 191 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 192 resp->fence_id = cmd->cmd_hdr.fence_id; 193 resp->ctx_id = cmd->cmd_hdr.ctx_id; 194 } 195 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 196 if (s != resp_len) { 197 qemu_log_mask(LOG_GUEST_ERROR, 198 "%s: response size incorrect %zu vs %zu\n", 199 __func__, s, resp_len); 200 } 201 virtqueue_push(cmd->vq, &cmd->elem, s); 202 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 203 cmd->finished = true; 204 } 205 206 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 207 struct virtio_gpu_ctrl_command *cmd, 208 enum virtio_gpu_ctrl_type type) 209 { 210 struct virtio_gpu_ctrl_hdr resp; 211 212 memset(&resp, 0, sizeof(resp)); 213 resp.type = type; 214 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 215 } 216 217 static void 218 virtio_gpu_fill_display_info(VirtIOGPU *g, 219 struct virtio_gpu_resp_display_info *dpy_info) 220 { 221 int i; 222 223 for (i = 0; i < g->conf.max_outputs; i++) { 224 if (g->enabled_output_bitmask & (1 << i)) { 225 dpy_info->pmodes[i].enabled = 1; 226 dpy_info->pmodes[i].r.width = g->req_state[i].width; 227 dpy_info->pmodes[i].r.height = g->req_state[i].height; 228 } 229 } 230 } 231 232 void virtio_gpu_get_display_info(VirtIOGPU *g, 233 struct virtio_gpu_ctrl_command *cmd) 234 { 235 struct virtio_gpu_resp_display_info display_info; 236 237 trace_virtio_gpu_cmd_get_display_info(); 238 memset(&display_info, 0, sizeof(display_info)); 239 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 240 virtio_gpu_fill_display_info(g, &display_info); 241 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 242 sizeof(display_info)); 243 } 244 245 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 246 { 247 switch (virtio_gpu_format) { 248 #ifdef HOST_WORDS_BIGENDIAN 249 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 250 return PIXMAN_b8g8r8x8; 251 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 252 return PIXMAN_b8g8r8a8; 253 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 254 return PIXMAN_x8r8g8b8; 255 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 256 return PIXMAN_a8r8g8b8; 257 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 258 return PIXMAN_r8g8b8x8; 259 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 260 return PIXMAN_r8g8b8a8; 261 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 262 return PIXMAN_x8b8g8r8; 263 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 264 return PIXMAN_a8b8g8r8; 265 #else 266 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 267 return PIXMAN_x8r8g8b8; 268 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 269 return PIXMAN_a8r8g8b8; 270 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 271 return PIXMAN_b8g8r8x8; 272 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 273 return PIXMAN_b8g8r8a8; 274 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 275 return PIXMAN_x8b8g8r8; 276 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 277 return PIXMAN_a8b8g8r8; 278 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 279 return PIXMAN_r8g8b8x8; 280 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 281 return PIXMAN_r8g8b8a8; 282 #endif 283 default: 284 return 0; 285 } 286 } 287 288 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 289 struct virtio_gpu_ctrl_command *cmd) 290 { 291 pixman_format_code_t pformat; 292 struct virtio_gpu_simple_resource *res; 293 struct virtio_gpu_resource_create_2d c2d; 294 295 VIRTIO_GPU_FILL_CMD(c2d); 296 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 297 c2d.width, c2d.height); 298 299 if (c2d.resource_id == 0) { 300 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 301 __func__); 302 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 303 return; 304 } 305 306 res = virtio_gpu_find_resource(g, c2d.resource_id); 307 if (res) { 308 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 309 __func__, c2d.resource_id); 310 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 311 return; 312 } 313 314 res = g_new0(struct virtio_gpu_simple_resource, 1); 315 316 res->width = c2d.width; 317 res->height = c2d.height; 318 res->format = c2d.format; 319 res->resource_id = c2d.resource_id; 320 321 pformat = get_pixman_format(c2d.format); 322 if (!pformat) { 323 qemu_log_mask(LOG_GUEST_ERROR, 324 "%s: host couldn't handle guest format %d\n", 325 __func__, c2d.format); 326 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 327 return; 328 } 329 res->image = pixman_image_create_bits(pformat, 330 c2d.width, 331 c2d.height, 332 NULL, 0); 333 334 if (!res->image) { 335 qemu_log_mask(LOG_GUEST_ERROR, 336 "%s: resource creation failed %d %d %d\n", 337 __func__, c2d.resource_id, c2d.width, c2d.height); 338 g_free(res); 339 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 340 return; 341 } 342 343 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 344 } 345 346 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 347 struct virtio_gpu_simple_resource *res) 348 { 349 pixman_image_unref(res->image); 350 QTAILQ_REMOVE(&g->reslist, res, next); 351 g_free(res); 352 } 353 354 static void virtio_gpu_resource_unref(VirtIOGPU *g, 355 struct virtio_gpu_ctrl_command *cmd) 356 { 357 struct virtio_gpu_simple_resource *res; 358 struct virtio_gpu_resource_unref unref; 359 360 VIRTIO_GPU_FILL_CMD(unref); 361 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 362 363 res = virtio_gpu_find_resource(g, unref.resource_id); 364 if (!res) { 365 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 366 __func__, unref.resource_id); 367 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 368 return; 369 } 370 virtio_gpu_resource_destroy(g, res); 371 } 372 373 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 374 struct virtio_gpu_ctrl_command *cmd) 375 { 376 struct virtio_gpu_simple_resource *res; 377 int h; 378 uint32_t src_offset, dst_offset, stride; 379 int bpp; 380 pixman_format_code_t format; 381 struct virtio_gpu_transfer_to_host_2d t2d; 382 383 VIRTIO_GPU_FILL_CMD(t2d); 384 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 385 386 res = virtio_gpu_find_resource(g, t2d.resource_id); 387 if (!res || !res->iov) { 388 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 389 __func__, t2d.resource_id); 390 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 391 return; 392 } 393 394 if (t2d.r.x > res->width || 395 t2d.r.y > res->height || 396 t2d.r.width > res->width || 397 t2d.r.height > res->height || 398 t2d.r.x + t2d.r.width > res->width || 399 t2d.r.y + t2d.r.height > res->height) { 400 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 401 " bounds for resource %d: %d %d %d %d vs %d %d\n", 402 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 403 t2d.r.width, t2d.r.height, res->width, res->height); 404 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 405 return; 406 } 407 408 format = pixman_image_get_format(res->image); 409 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 410 stride = pixman_image_get_stride(res->image); 411 412 if (t2d.offset || t2d.r.x || t2d.r.y || 413 t2d.r.width != pixman_image_get_width(res->image)) { 414 void *img_data = pixman_image_get_data(res->image); 415 for (h = 0; h < t2d.r.height; h++) { 416 src_offset = t2d.offset + stride * h; 417 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 418 419 iov_to_buf(res->iov, res->iov_cnt, src_offset, 420 (uint8_t *)img_data 421 + dst_offset, t2d.r.width * bpp); 422 } 423 } else { 424 iov_to_buf(res->iov, res->iov_cnt, 0, 425 pixman_image_get_data(res->image), 426 pixman_image_get_stride(res->image) 427 * pixman_image_get_height(res->image)); 428 } 429 } 430 431 static void virtio_gpu_resource_flush(VirtIOGPU *g, 432 struct virtio_gpu_ctrl_command *cmd) 433 { 434 struct virtio_gpu_simple_resource *res; 435 struct virtio_gpu_resource_flush rf; 436 pixman_region16_t flush_region; 437 int i; 438 439 VIRTIO_GPU_FILL_CMD(rf); 440 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 441 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 442 443 res = virtio_gpu_find_resource(g, rf.resource_id); 444 if (!res) { 445 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 446 __func__, rf.resource_id); 447 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 448 return; 449 } 450 451 if (rf.r.x > res->width || 452 rf.r.y > res->height || 453 rf.r.width > res->width || 454 rf.r.height > res->height || 455 rf.r.x + rf.r.width > res->width || 456 rf.r.y + rf.r.height > res->height) { 457 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 458 " bounds for resource %d: %d %d %d %d vs %d %d\n", 459 __func__, rf.resource_id, rf.r.x, rf.r.y, 460 rf.r.width, rf.r.height, res->width, res->height); 461 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 462 return; 463 } 464 465 pixman_region_init_rect(&flush_region, 466 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 467 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUT; i++) { 468 struct virtio_gpu_scanout *scanout; 469 pixman_region16_t region, finalregion; 470 pixman_box16_t *extents; 471 472 if (!(res->scanout_bitmask & (1 << i))) { 473 continue; 474 } 475 scanout = &g->scanout[i]; 476 477 pixman_region_init(&finalregion); 478 pixman_region_init_rect(®ion, scanout->x, scanout->y, 479 scanout->width, scanout->height); 480 481 pixman_region_intersect(&finalregion, &flush_region, ®ion); 482 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 483 extents = pixman_region_extents(&finalregion); 484 /* work out the area we need to update for each console */ 485 dpy_gfx_update(g->scanout[i].con, 486 extents->x1, extents->y1, 487 extents->x2 - extents->x1, 488 extents->y2 - extents->y1); 489 490 pixman_region_fini(®ion); 491 pixman_region_fini(&finalregion); 492 } 493 pixman_region_fini(&flush_region); 494 } 495 496 static void virtio_gpu_set_scanout(VirtIOGPU *g, 497 struct virtio_gpu_ctrl_command *cmd) 498 { 499 struct virtio_gpu_simple_resource *res; 500 struct virtio_gpu_scanout *scanout; 501 pixman_format_code_t format; 502 uint32_t offset; 503 int bpp; 504 struct virtio_gpu_set_scanout ss; 505 506 VIRTIO_GPU_FILL_CMD(ss); 507 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 508 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 509 510 g->enable = 1; 511 if (ss.resource_id == 0) { 512 scanout = &g->scanout[ss.scanout_id]; 513 if (scanout->resource_id) { 514 res = virtio_gpu_find_resource(g, scanout->resource_id); 515 if (res) { 516 res->scanout_bitmask &= ~(1 << ss.scanout_id); 517 } 518 } 519 if (ss.scanout_id == 0 || 520 ss.scanout_id >= g->conf.max_outputs) { 521 qemu_log_mask(LOG_GUEST_ERROR, 522 "%s: illegal scanout id specified %d", 523 __func__, ss.scanout_id); 524 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 525 return; 526 } 527 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); 528 scanout->ds = NULL; 529 scanout->width = 0; 530 scanout->height = 0; 531 return; 532 } 533 534 /* create a surface for this scanout */ 535 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUT || 536 ss.scanout_id >= g->conf.max_outputs) { 537 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 538 __func__, ss.scanout_id); 539 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 540 return; 541 } 542 543 res = virtio_gpu_find_resource(g, ss.resource_id); 544 if (!res) { 545 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 546 __func__, ss.resource_id); 547 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 548 return; 549 } 550 551 if (ss.r.x > res->width || 552 ss.r.y > res->height || 553 ss.r.width > res->width || 554 ss.r.height > res->height || 555 ss.r.x + ss.r.width > res->width || 556 ss.r.y + ss.r.height > res->height) { 557 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 558 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 559 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 560 ss.r.width, ss.r.height, res->width, res->height); 561 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 562 return; 563 } 564 565 scanout = &g->scanout[ss.scanout_id]; 566 567 format = pixman_image_get_format(res->image); 568 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 569 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 570 if (!scanout->ds || surface_data(scanout->ds) 571 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 572 scanout->width != ss.r.width || 573 scanout->height != ss.r.height) { 574 /* realloc the surface ptr */ 575 scanout->ds = qemu_create_displaysurface_pixman(res->image); 576 if (!scanout->ds) { 577 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 578 return; 579 } 580 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 581 } 582 583 res->scanout_bitmask |= (1 << ss.scanout_id); 584 scanout->resource_id = ss.resource_id; 585 scanout->x = ss.r.x; 586 scanout->y = ss.r.y; 587 scanout->width = ss.r.width; 588 scanout->height = ss.r.height; 589 } 590 591 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 592 struct virtio_gpu_ctrl_command *cmd, 593 struct iovec **iov) 594 { 595 struct virtio_gpu_mem_entry *ents; 596 size_t esize, s; 597 int i; 598 599 if (ab->nr_entries > 16384) { 600 qemu_log_mask(LOG_GUEST_ERROR, 601 "%s: nr_entries is too big (%d > 16384)\n", 602 __func__, ab->nr_entries); 603 return -1; 604 } 605 606 esize = sizeof(*ents) * ab->nr_entries; 607 ents = g_malloc(esize); 608 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 609 sizeof(*ab), ents, esize); 610 if (s != esize) { 611 qemu_log_mask(LOG_GUEST_ERROR, 612 "%s: command data size incorrect %zu vs %zu\n", 613 __func__, s, esize); 614 g_free(ents); 615 return -1; 616 } 617 618 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 619 for (i = 0; i < ab->nr_entries; i++) { 620 hwaddr len = ents[i].length; 621 (*iov)[i].iov_len = ents[i].length; 622 (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); 623 if (!(*iov)[i].iov_base || len != ents[i].length) { 624 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 625 " resource %d element %d\n", 626 __func__, ab->resource_id, i); 627 virtio_gpu_cleanup_mapping_iov(*iov, i); 628 g_free(ents); 629 *iov = NULL; 630 return -1; 631 } 632 } 633 g_free(ents); 634 return 0; 635 } 636 637 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 638 { 639 int i; 640 641 for (i = 0; i < count; i++) { 642 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 643 iov[i].iov_len); 644 } 645 g_free(iov); 646 } 647 648 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 649 { 650 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 651 res->iov = NULL; 652 res->iov_cnt = 0; 653 } 654 655 static void 656 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 657 struct virtio_gpu_ctrl_command *cmd) 658 { 659 struct virtio_gpu_simple_resource *res; 660 struct virtio_gpu_resource_attach_backing ab; 661 int ret; 662 663 VIRTIO_GPU_FILL_CMD(ab); 664 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 665 666 res = virtio_gpu_find_resource(g, ab.resource_id); 667 if (!res) { 668 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 669 __func__, ab.resource_id); 670 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 671 return; 672 } 673 674 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->iov); 675 if (ret != 0) { 676 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 677 return; 678 } 679 680 res->iov_cnt = ab.nr_entries; 681 } 682 683 static void 684 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 685 struct virtio_gpu_ctrl_command *cmd) 686 { 687 struct virtio_gpu_simple_resource *res; 688 struct virtio_gpu_resource_detach_backing detach; 689 690 VIRTIO_GPU_FILL_CMD(detach); 691 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 692 693 res = virtio_gpu_find_resource(g, detach.resource_id); 694 if (!res || !res->iov) { 695 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 696 __func__, detach.resource_id); 697 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 698 return; 699 } 700 virtio_gpu_cleanup_mapping(res); 701 } 702 703 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 704 struct virtio_gpu_ctrl_command *cmd) 705 { 706 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 707 708 switch (cmd->cmd_hdr.type) { 709 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 710 virtio_gpu_get_display_info(g, cmd); 711 break; 712 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 713 virtio_gpu_resource_create_2d(g, cmd); 714 break; 715 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 716 virtio_gpu_resource_unref(g, cmd); 717 break; 718 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 719 virtio_gpu_resource_flush(g, cmd); 720 break; 721 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 722 virtio_gpu_transfer_to_host_2d(g, cmd); 723 break; 724 case VIRTIO_GPU_CMD_SET_SCANOUT: 725 virtio_gpu_set_scanout(g, cmd); 726 break; 727 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 728 virtio_gpu_resource_attach_backing(g, cmd); 729 break; 730 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 731 virtio_gpu_resource_detach_backing(g, cmd); 732 break; 733 default: 734 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 735 break; 736 } 737 if (!cmd->finished) { 738 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 739 VIRTIO_GPU_RESP_OK_NODATA); 740 } 741 } 742 743 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 744 { 745 VirtIOGPU *g = VIRTIO_GPU(vdev); 746 qemu_bh_schedule(g->ctrl_bh); 747 } 748 749 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 750 { 751 VirtIOGPU *g = VIRTIO_GPU(vdev); 752 qemu_bh_schedule(g->cursor_bh); 753 } 754 755 void virtio_gpu_process_cmdq(VirtIOGPU *g) 756 { 757 struct virtio_gpu_ctrl_command *cmd; 758 759 while (!QTAILQ_EMPTY(&g->cmdq)) { 760 cmd = QTAILQ_FIRST(&g->cmdq); 761 762 /* process command */ 763 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, 764 g, cmd); 765 if (cmd->waiting) { 766 break; 767 } 768 QTAILQ_REMOVE(&g->cmdq, cmd, next); 769 if (virtio_gpu_stats_enabled(g->conf)) { 770 g->stats.requests++; 771 } 772 773 if (!cmd->finished) { 774 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 775 g->inflight++; 776 if (virtio_gpu_stats_enabled(g->conf)) { 777 if (g->stats.max_inflight < g->inflight) { 778 g->stats.max_inflight = g->inflight; 779 } 780 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 781 } 782 } else { 783 g_free(cmd); 784 } 785 } 786 } 787 788 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 789 { 790 VirtIOGPU *g = VIRTIO_GPU(vdev); 791 struct virtio_gpu_ctrl_command *cmd; 792 793 if (!virtio_queue_ready(vq)) { 794 return; 795 } 796 797 #ifdef CONFIG_VIRGL 798 if (!g->renderer_inited && g->use_virgl_renderer) { 799 virtio_gpu_virgl_init(g); 800 g->renderer_inited = true; 801 } 802 #endif 803 804 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 805 while (cmd) { 806 cmd->vq = vq; 807 cmd->error = 0; 808 cmd->finished = false; 809 cmd->waiting = false; 810 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 811 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 812 } 813 814 virtio_gpu_process_cmdq(g); 815 816 #ifdef CONFIG_VIRGL 817 if (g->use_virgl_renderer) { 818 virtio_gpu_virgl_fence_poll(g); 819 } 820 #endif 821 } 822 823 static void virtio_gpu_ctrl_bh(void *opaque) 824 { 825 VirtIOGPU *g = opaque; 826 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 827 } 828 829 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 830 { 831 VirtIOGPU *g = VIRTIO_GPU(vdev); 832 VirtQueueElement *elem; 833 size_t s; 834 struct virtio_gpu_update_cursor cursor_info; 835 836 if (!virtio_queue_ready(vq)) { 837 return; 838 } 839 for (;;) { 840 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 841 if (!elem) { 842 break; 843 } 844 845 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 846 &cursor_info, sizeof(cursor_info)); 847 if (s != sizeof(cursor_info)) { 848 qemu_log_mask(LOG_GUEST_ERROR, 849 "%s: cursor size incorrect %zu vs %zu\n", 850 __func__, s, sizeof(cursor_info)); 851 } else { 852 update_cursor(g, &cursor_info); 853 } 854 virtqueue_push(vq, elem, 0); 855 virtio_notify(vdev, vq); 856 g_free(elem); 857 } 858 } 859 860 static void virtio_gpu_cursor_bh(void *opaque) 861 { 862 VirtIOGPU *g = opaque; 863 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 864 } 865 866 static void virtio_gpu_invalidate_display(void *opaque) 867 { 868 } 869 870 static void virtio_gpu_update_display(void *opaque) 871 { 872 } 873 874 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 875 { 876 } 877 878 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 879 { 880 VirtIOGPU *g = opaque; 881 882 if (idx > g->conf.max_outputs) { 883 return -1; 884 } 885 886 g->req_state[idx].x = info->xoff; 887 g->req_state[idx].y = info->yoff; 888 g->req_state[idx].width = info->width; 889 g->req_state[idx].height = info->height; 890 891 if (info->width && info->height) { 892 g->enabled_output_bitmask |= (1 << idx); 893 } else { 894 g->enabled_output_bitmask &= ~(1 << idx); 895 } 896 897 /* send event to guest */ 898 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 899 return 0; 900 } 901 902 static void virtio_gpu_gl_block(void *opaque, bool block) 903 { 904 VirtIOGPU *g = opaque; 905 906 g->renderer_blocked = block; 907 if (!block) { 908 virtio_gpu_process_cmdq(g); 909 } 910 } 911 912 const GraphicHwOps virtio_gpu_ops = { 913 .invalidate = virtio_gpu_invalidate_display, 914 .gfx_update = virtio_gpu_update_display, 915 .text_update = virtio_gpu_text_update, 916 .ui_info = virtio_gpu_ui_info, 917 .gl_block = virtio_gpu_gl_block, 918 }; 919 920 static const VMStateDescription vmstate_virtio_gpu_unmigratable = { 921 .name = "virtio-gpu", 922 .unmigratable = 1, 923 }; 924 925 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 926 { 927 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 928 VirtIOGPU *g = VIRTIO_GPU(qdev); 929 bool have_virgl; 930 int i; 931 932 g->config_size = sizeof(struct virtio_gpu_config); 933 g->virtio_config.num_scanouts = g->conf.max_outputs; 934 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 935 g->config_size); 936 937 g->req_state[0].width = 1024; 938 g->req_state[0].height = 768; 939 940 g->use_virgl_renderer = false; 941 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) 942 have_virgl = false; 943 #else 944 have_virgl = display_opengl; 945 #endif 946 if (!have_virgl) { 947 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); 948 } 949 950 if (virtio_gpu_virgl_enabled(g->conf)) { 951 /* use larger control queue in 3d mode */ 952 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); 953 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 954 g->virtio_config.num_capsets = 1; 955 } else { 956 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 957 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 958 } 959 960 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 961 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 962 QTAILQ_INIT(&g->reslist); 963 QTAILQ_INIT(&g->cmdq); 964 QTAILQ_INIT(&g->fenceq); 965 966 g->enabled_output_bitmask = 1; 967 g->qdev = qdev; 968 969 for (i = 0; i < g->conf.max_outputs; i++) { 970 g->scanout[i].con = 971 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 972 if (i > 0) { 973 dpy_gfx_replace_surface(g->scanout[i].con, NULL); 974 } 975 } 976 977 vmstate_register(qdev, -1, &vmstate_virtio_gpu_unmigratable, g); 978 } 979 980 static void virtio_gpu_instance_init(Object *obj) 981 { 982 } 983 984 static void virtio_gpu_reset(VirtIODevice *vdev) 985 { 986 VirtIOGPU *g = VIRTIO_GPU(vdev); 987 struct virtio_gpu_simple_resource *res, *tmp; 988 int i; 989 990 g->enable = 0; 991 992 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 993 virtio_gpu_resource_destroy(g, res); 994 } 995 for (i = 0; i < g->conf.max_outputs; i++) { 996 #if 0 997 g->req_state[i].x = 0; 998 g->req_state[i].y = 0; 999 if (i == 0) { 1000 g->req_state[0].width = 1024; 1001 g->req_state[0].height = 768; 1002 } else { 1003 g->req_state[i].width = 0; 1004 g->req_state[i].height = 0; 1005 } 1006 #endif 1007 g->scanout[i].resource_id = 0; 1008 g->scanout[i].width = 0; 1009 g->scanout[i].height = 0; 1010 g->scanout[i].x = 0; 1011 g->scanout[i].y = 0; 1012 g->scanout[i].ds = NULL; 1013 } 1014 g->enabled_output_bitmask = 1; 1015 1016 #ifdef CONFIG_VIRGL 1017 if (g->use_virgl_renderer) { 1018 virtio_gpu_virgl_reset(g); 1019 g->use_virgl_renderer = 0; 1020 } 1021 #endif 1022 } 1023 1024 static Property virtio_gpu_properties[] = { 1025 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), 1026 #ifdef CONFIG_VIRGL 1027 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, 1028 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), 1029 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, 1030 VIRTIO_GPU_FLAG_STATS_ENABLED, false), 1031 #endif 1032 DEFINE_PROP_END_OF_LIST(), 1033 }; 1034 1035 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1036 { 1037 DeviceClass *dc = DEVICE_CLASS(klass); 1038 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1039 1040 vdc->realize = virtio_gpu_device_realize; 1041 vdc->get_config = virtio_gpu_get_config; 1042 vdc->set_config = virtio_gpu_set_config; 1043 vdc->get_features = virtio_gpu_get_features; 1044 vdc->set_features = virtio_gpu_set_features; 1045 1046 vdc->reset = virtio_gpu_reset; 1047 1048 dc->props = virtio_gpu_properties; 1049 } 1050 1051 static const TypeInfo virtio_gpu_info = { 1052 .name = TYPE_VIRTIO_GPU, 1053 .parent = TYPE_VIRTIO_DEVICE, 1054 .instance_size = sizeof(VirtIOGPU), 1055 .instance_init = virtio_gpu_instance_init, 1056 .class_init = virtio_gpu_class_init, 1057 }; 1058 1059 static void virtio_register_types(void) 1060 { 1061 type_register_static(&virtio_gpu_info); 1062 } 1063 1064 type_init(virtio_register_types) 1065 1066 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 1067 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 1068 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 1069 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 1070 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 1071 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 1072 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 1073 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 1074 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 1075 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 1076 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 1077 1078 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); 1079 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); 1080 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); 1081 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); 1082 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); 1083 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); 1084 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); 1085 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); 1086 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); 1087 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); 1088