1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu-common.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "hw/virtio/virtio.h" 20 #include "hw/virtio/virtio-gpu.h" 21 #include "hw/virtio/virtio-bus.h" 22 #include "qemu/log.h" 23 24 static struct virtio_gpu_simple_resource* 25 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 26 27 #ifdef CONFIG_VIRGL 28 #include "virglrenderer.h" 29 #define VIRGL(_g, _virgl, _simple, ...) \ 30 do { \ 31 if (_g->use_virgl_renderer) { \ 32 _virgl(__VA_ARGS__); \ 33 } else { \ 34 _simple(__VA_ARGS__); \ 35 } \ 36 } while (0) 37 #else 38 #define VIRGL(_g, _virgl, _simple, ...) \ 39 do { \ 40 _simple(__VA_ARGS__); \ 41 } while (0) 42 #endif 43 44 static void update_cursor_data_simple(VirtIOGPU *g, 45 struct virtio_gpu_scanout *s, 46 uint32_t resource_id) 47 { 48 struct virtio_gpu_simple_resource *res; 49 uint32_t pixels; 50 51 res = virtio_gpu_find_resource(g, resource_id); 52 if (!res) { 53 return; 54 } 55 56 if (pixman_image_get_width(res->image) != s->current_cursor->width || 57 pixman_image_get_height(res->image) != s->current_cursor->height) { 58 return; 59 } 60 61 pixels = s->current_cursor->width * s->current_cursor->height; 62 memcpy(s->current_cursor->data, 63 pixman_image_get_data(res->image), 64 pixels * sizeof(uint32_t)); 65 } 66 67 #ifdef CONFIG_VIRGL 68 69 static void update_cursor_data_virgl(VirtIOGPU *g, 70 struct virtio_gpu_scanout *s, 71 uint32_t resource_id) 72 { 73 uint32_t width, height; 74 uint32_t pixels, *data; 75 76 data = virgl_renderer_get_cursor_data(resource_id, &width, &height); 77 if (!data) { 78 return; 79 } 80 81 if (width != s->current_cursor->width || 82 height != s->current_cursor->height) { 83 return; 84 } 85 86 pixels = s->current_cursor->width * s->current_cursor->height; 87 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); 88 free(data); 89 } 90 91 #endif 92 93 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 94 { 95 struct virtio_gpu_scanout *s; 96 bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR; 97 98 if (cursor->pos.scanout_id >= g->conf.max_outputs) { 99 return; 100 } 101 s = &g->scanout[cursor->pos.scanout_id]; 102 103 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 104 cursor->pos.x, 105 cursor->pos.y, 106 move ? "move" : "update", 107 cursor->resource_id); 108 109 if (move) { 110 if (!s->current_cursor) { 111 s->current_cursor = cursor_alloc(64, 64); 112 } 113 114 s->current_cursor->hot_x = cursor->hot_x; 115 s->current_cursor->hot_y = cursor->hot_y; 116 117 if (cursor->resource_id > 0) { 118 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, 119 g, s, cursor->resource_id); 120 } 121 dpy_cursor_define(s->con, s->current_cursor); 122 } 123 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 124 cursor->resource_id ? 1 : 0); 125 } 126 127 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 128 { 129 VirtIOGPU *g = VIRTIO_GPU(vdev); 130 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 131 } 132 133 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 134 { 135 VirtIOGPU *g = VIRTIO_GPU(vdev); 136 struct virtio_gpu_config vgconfig; 137 138 memcpy(&vgconfig, config, sizeof(g->virtio_config)); 139 140 if (vgconfig.events_clear) { 141 g->virtio_config.events_read &= ~vgconfig.events_clear; 142 } 143 } 144 145 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, 146 Error **errp) 147 { 148 VirtIOGPU *g = VIRTIO_GPU(vdev); 149 150 if (virtio_gpu_virgl_enabled(g->conf)) { 151 features |= (1 << VIRTIO_GPU_F_VIRGL); 152 } 153 return features; 154 } 155 156 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) 157 { 158 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); 159 VirtIOGPU *g = VIRTIO_GPU(vdev); 160 161 g->use_virgl_renderer = ((features & virgl) == virgl); 162 trace_virtio_gpu_features(g->use_virgl_renderer); 163 } 164 165 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 166 { 167 g->virtio_config.events_read |= event_type; 168 virtio_notify_config(&g->parent_obj); 169 } 170 171 static struct virtio_gpu_simple_resource * 172 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 173 { 174 struct virtio_gpu_simple_resource *res; 175 176 QTAILQ_FOREACH(res, &g->reslist, next) { 177 if (res->resource_id == resource_id) { 178 return res; 179 } 180 } 181 return NULL; 182 } 183 184 void virtio_gpu_ctrl_response(VirtIOGPU *g, 185 struct virtio_gpu_ctrl_command *cmd, 186 struct virtio_gpu_ctrl_hdr *resp, 187 size_t resp_len) 188 { 189 size_t s; 190 191 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 192 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 193 resp->fence_id = cmd->cmd_hdr.fence_id; 194 resp->ctx_id = cmd->cmd_hdr.ctx_id; 195 } 196 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 197 if (s != resp_len) { 198 qemu_log_mask(LOG_GUEST_ERROR, 199 "%s: response size incorrect %zu vs %zu\n", 200 __func__, s, resp_len); 201 } 202 virtqueue_push(cmd->vq, &cmd->elem, s); 203 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 204 cmd->finished = true; 205 } 206 207 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 208 struct virtio_gpu_ctrl_command *cmd, 209 enum virtio_gpu_ctrl_type type) 210 { 211 struct virtio_gpu_ctrl_hdr resp; 212 213 memset(&resp, 0, sizeof(resp)); 214 resp.type = type; 215 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 216 } 217 218 static void 219 virtio_gpu_fill_display_info(VirtIOGPU *g, 220 struct virtio_gpu_resp_display_info *dpy_info) 221 { 222 int i; 223 224 for (i = 0; i < g->conf.max_outputs; i++) { 225 if (g->enabled_output_bitmask & (1 << i)) { 226 dpy_info->pmodes[i].enabled = 1; 227 dpy_info->pmodes[i].r.width = g->req_state[i].width; 228 dpy_info->pmodes[i].r.height = g->req_state[i].height; 229 } 230 } 231 } 232 233 void virtio_gpu_get_display_info(VirtIOGPU *g, 234 struct virtio_gpu_ctrl_command *cmd) 235 { 236 struct virtio_gpu_resp_display_info display_info; 237 238 trace_virtio_gpu_cmd_get_display_info(); 239 memset(&display_info, 0, sizeof(display_info)); 240 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 241 virtio_gpu_fill_display_info(g, &display_info); 242 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 243 sizeof(display_info)); 244 } 245 246 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 247 { 248 switch (virtio_gpu_format) { 249 #ifdef HOST_WORDS_BIGENDIAN 250 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 251 return PIXMAN_b8g8r8x8; 252 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 253 return PIXMAN_b8g8r8a8; 254 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 255 return PIXMAN_x8r8g8b8; 256 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 257 return PIXMAN_a8r8g8b8; 258 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 259 return PIXMAN_r8g8b8x8; 260 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 261 return PIXMAN_r8g8b8a8; 262 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 263 return PIXMAN_x8b8g8r8; 264 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 265 return PIXMAN_a8b8g8r8; 266 #else 267 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 268 return PIXMAN_x8r8g8b8; 269 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 270 return PIXMAN_a8r8g8b8; 271 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 272 return PIXMAN_b8g8r8x8; 273 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 274 return PIXMAN_b8g8r8a8; 275 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 276 return PIXMAN_x8b8g8r8; 277 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 278 return PIXMAN_a8b8g8r8; 279 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 280 return PIXMAN_r8g8b8x8; 281 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 282 return PIXMAN_r8g8b8a8; 283 #endif 284 default: 285 return 0; 286 } 287 } 288 289 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 290 struct virtio_gpu_ctrl_command *cmd) 291 { 292 pixman_format_code_t pformat; 293 struct virtio_gpu_simple_resource *res; 294 struct virtio_gpu_resource_create_2d c2d; 295 296 VIRTIO_GPU_FILL_CMD(c2d); 297 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 298 c2d.width, c2d.height); 299 300 if (c2d.resource_id == 0) { 301 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 302 __func__); 303 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 304 return; 305 } 306 307 res = virtio_gpu_find_resource(g, c2d.resource_id); 308 if (res) { 309 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 310 __func__, c2d.resource_id); 311 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 312 return; 313 } 314 315 res = g_new0(struct virtio_gpu_simple_resource, 1); 316 317 res->width = c2d.width; 318 res->height = c2d.height; 319 res->format = c2d.format; 320 res->resource_id = c2d.resource_id; 321 322 pformat = get_pixman_format(c2d.format); 323 if (!pformat) { 324 qemu_log_mask(LOG_GUEST_ERROR, 325 "%s: host couldn't handle guest format %d\n", 326 __func__, c2d.format); 327 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 328 return; 329 } 330 res->image = pixman_image_create_bits(pformat, 331 c2d.width, 332 c2d.height, 333 NULL, 0); 334 335 if (!res->image) { 336 qemu_log_mask(LOG_GUEST_ERROR, 337 "%s: resource creation failed %d %d %d\n", 338 __func__, c2d.resource_id, c2d.width, c2d.height); 339 g_free(res); 340 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 341 return; 342 } 343 344 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 345 } 346 347 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 348 struct virtio_gpu_simple_resource *res) 349 { 350 pixman_image_unref(res->image); 351 QTAILQ_REMOVE(&g->reslist, res, next); 352 g_free(res); 353 } 354 355 static void virtio_gpu_resource_unref(VirtIOGPU *g, 356 struct virtio_gpu_ctrl_command *cmd) 357 { 358 struct virtio_gpu_simple_resource *res; 359 struct virtio_gpu_resource_unref unref; 360 361 VIRTIO_GPU_FILL_CMD(unref); 362 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 363 364 res = virtio_gpu_find_resource(g, unref.resource_id); 365 if (!res) { 366 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 367 __func__, unref.resource_id); 368 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 369 return; 370 } 371 virtio_gpu_resource_destroy(g, res); 372 } 373 374 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 375 struct virtio_gpu_ctrl_command *cmd) 376 { 377 struct virtio_gpu_simple_resource *res; 378 int h; 379 uint32_t src_offset, dst_offset, stride; 380 int bpp; 381 pixman_format_code_t format; 382 struct virtio_gpu_transfer_to_host_2d t2d; 383 384 VIRTIO_GPU_FILL_CMD(t2d); 385 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 386 387 res = virtio_gpu_find_resource(g, t2d.resource_id); 388 if (!res || !res->iov) { 389 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 390 __func__, t2d.resource_id); 391 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 392 return; 393 } 394 395 if (t2d.r.x > res->width || 396 t2d.r.y > res->height || 397 t2d.r.width > res->width || 398 t2d.r.height > res->height || 399 t2d.r.x + t2d.r.width > res->width || 400 t2d.r.y + t2d.r.height > res->height) { 401 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 402 " bounds for resource %d: %d %d %d %d vs %d %d\n", 403 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 404 t2d.r.width, t2d.r.height, res->width, res->height); 405 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 406 return; 407 } 408 409 format = pixman_image_get_format(res->image); 410 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 411 stride = pixman_image_get_stride(res->image); 412 413 if (t2d.offset || t2d.r.x || t2d.r.y || 414 t2d.r.width != pixman_image_get_width(res->image)) { 415 void *img_data = pixman_image_get_data(res->image); 416 for (h = 0; h < t2d.r.height; h++) { 417 src_offset = t2d.offset + stride * h; 418 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 419 420 iov_to_buf(res->iov, res->iov_cnt, src_offset, 421 (uint8_t *)img_data 422 + dst_offset, t2d.r.width * bpp); 423 } 424 } else { 425 iov_to_buf(res->iov, res->iov_cnt, 0, 426 pixman_image_get_data(res->image), 427 pixman_image_get_stride(res->image) 428 * pixman_image_get_height(res->image)); 429 } 430 } 431 432 static void virtio_gpu_resource_flush(VirtIOGPU *g, 433 struct virtio_gpu_ctrl_command *cmd) 434 { 435 struct virtio_gpu_simple_resource *res; 436 struct virtio_gpu_resource_flush rf; 437 pixman_region16_t flush_region; 438 int i; 439 440 VIRTIO_GPU_FILL_CMD(rf); 441 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 442 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 443 444 res = virtio_gpu_find_resource(g, rf.resource_id); 445 if (!res) { 446 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 447 __func__, rf.resource_id); 448 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 449 return; 450 } 451 452 if (rf.r.x > res->width || 453 rf.r.y > res->height || 454 rf.r.width > res->width || 455 rf.r.height > res->height || 456 rf.r.x + rf.r.width > res->width || 457 rf.r.y + rf.r.height > res->height) { 458 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 459 " bounds for resource %d: %d %d %d %d vs %d %d\n", 460 __func__, rf.resource_id, rf.r.x, rf.r.y, 461 rf.r.width, rf.r.height, res->width, res->height); 462 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 463 return; 464 } 465 466 pixman_region_init_rect(&flush_region, 467 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 468 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUT; i++) { 469 struct virtio_gpu_scanout *scanout; 470 pixman_region16_t region, finalregion; 471 pixman_box16_t *extents; 472 473 if (!(res->scanout_bitmask & (1 << i))) { 474 continue; 475 } 476 scanout = &g->scanout[i]; 477 478 pixman_region_init(&finalregion); 479 pixman_region_init_rect(®ion, scanout->x, scanout->y, 480 scanout->width, scanout->height); 481 482 pixman_region_intersect(&finalregion, &flush_region, ®ion); 483 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 484 extents = pixman_region_extents(&finalregion); 485 /* work out the area we need to update for each console */ 486 dpy_gfx_update(g->scanout[i].con, 487 extents->x1, extents->y1, 488 extents->x2 - extents->x1, 489 extents->y2 - extents->y1); 490 491 pixman_region_fini(®ion); 492 pixman_region_fini(&finalregion); 493 } 494 pixman_region_fini(&flush_region); 495 } 496 497 static void virtio_gpu_set_scanout(VirtIOGPU *g, 498 struct virtio_gpu_ctrl_command *cmd) 499 { 500 struct virtio_gpu_simple_resource *res; 501 struct virtio_gpu_scanout *scanout; 502 pixman_format_code_t format; 503 uint32_t offset; 504 int bpp; 505 struct virtio_gpu_set_scanout ss; 506 507 VIRTIO_GPU_FILL_CMD(ss); 508 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 509 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 510 511 g->enable = 1; 512 if (ss.resource_id == 0) { 513 scanout = &g->scanout[ss.scanout_id]; 514 if (scanout->resource_id) { 515 res = virtio_gpu_find_resource(g, scanout->resource_id); 516 if (res) { 517 res->scanout_bitmask &= ~(1 << ss.scanout_id); 518 } 519 } 520 if (ss.scanout_id == 0 || 521 ss.scanout_id >= g->conf.max_outputs) { 522 qemu_log_mask(LOG_GUEST_ERROR, 523 "%s: illegal scanout id specified %d", 524 __func__, ss.scanout_id); 525 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 526 return; 527 } 528 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); 529 scanout->ds = NULL; 530 scanout->width = 0; 531 scanout->height = 0; 532 return; 533 } 534 535 /* create a surface for this scanout */ 536 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUT || 537 ss.scanout_id >= g->conf.max_outputs) { 538 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 539 __func__, ss.scanout_id); 540 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 541 return; 542 } 543 544 res = virtio_gpu_find_resource(g, ss.resource_id); 545 if (!res) { 546 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 547 __func__, ss.resource_id); 548 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 549 return; 550 } 551 552 if (ss.r.x > res->width || 553 ss.r.y > res->height || 554 ss.r.width > res->width || 555 ss.r.height > res->height || 556 ss.r.x + ss.r.width > res->width || 557 ss.r.y + ss.r.height > res->height) { 558 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 559 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 560 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 561 ss.r.width, ss.r.height, res->width, res->height); 562 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 563 return; 564 } 565 566 scanout = &g->scanout[ss.scanout_id]; 567 568 format = pixman_image_get_format(res->image); 569 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 570 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 571 if (!scanout->ds || surface_data(scanout->ds) 572 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 573 scanout->width != ss.r.width || 574 scanout->height != ss.r.height) { 575 /* realloc the surface ptr */ 576 scanout->ds = qemu_create_displaysurface_pixman(res->image); 577 if (!scanout->ds) { 578 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 579 return; 580 } 581 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 582 } 583 584 res->scanout_bitmask |= (1 << ss.scanout_id); 585 scanout->resource_id = ss.resource_id; 586 scanout->x = ss.r.x; 587 scanout->y = ss.r.y; 588 scanout->width = ss.r.width; 589 scanout->height = ss.r.height; 590 } 591 592 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 593 struct virtio_gpu_ctrl_command *cmd, 594 struct iovec **iov) 595 { 596 struct virtio_gpu_mem_entry *ents; 597 size_t esize, s; 598 int i; 599 600 if (ab->nr_entries > 16384) { 601 qemu_log_mask(LOG_GUEST_ERROR, 602 "%s: nr_entries is too big (%d > 16384)\n", 603 __func__, ab->nr_entries); 604 return -1; 605 } 606 607 esize = sizeof(*ents) * ab->nr_entries; 608 ents = g_malloc(esize); 609 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 610 sizeof(*ab), ents, esize); 611 if (s != esize) { 612 qemu_log_mask(LOG_GUEST_ERROR, 613 "%s: command data size incorrect %zu vs %zu\n", 614 __func__, s, esize); 615 g_free(ents); 616 return -1; 617 } 618 619 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 620 for (i = 0; i < ab->nr_entries; i++) { 621 hwaddr len = ents[i].length; 622 (*iov)[i].iov_len = ents[i].length; 623 (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); 624 if (!(*iov)[i].iov_base || len != ents[i].length) { 625 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 626 " resource %d element %d\n", 627 __func__, ab->resource_id, i); 628 virtio_gpu_cleanup_mapping_iov(*iov, i); 629 g_free(ents); 630 *iov = NULL; 631 return -1; 632 } 633 } 634 g_free(ents); 635 return 0; 636 } 637 638 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 639 { 640 int i; 641 642 for (i = 0; i < count; i++) { 643 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 644 iov[i].iov_len); 645 } 646 g_free(iov); 647 } 648 649 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 650 { 651 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 652 res->iov = NULL; 653 res->iov_cnt = 0; 654 } 655 656 static void 657 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 658 struct virtio_gpu_ctrl_command *cmd) 659 { 660 struct virtio_gpu_simple_resource *res; 661 struct virtio_gpu_resource_attach_backing ab; 662 int ret; 663 664 VIRTIO_GPU_FILL_CMD(ab); 665 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 666 667 res = virtio_gpu_find_resource(g, ab.resource_id); 668 if (!res) { 669 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 670 __func__, ab.resource_id); 671 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 672 return; 673 } 674 675 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->iov); 676 if (ret != 0) { 677 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 678 return; 679 } 680 681 res->iov_cnt = ab.nr_entries; 682 } 683 684 static void 685 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 686 struct virtio_gpu_ctrl_command *cmd) 687 { 688 struct virtio_gpu_simple_resource *res; 689 struct virtio_gpu_resource_detach_backing detach; 690 691 VIRTIO_GPU_FILL_CMD(detach); 692 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 693 694 res = virtio_gpu_find_resource(g, detach.resource_id); 695 if (!res || !res->iov) { 696 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 697 __func__, detach.resource_id); 698 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 699 return; 700 } 701 virtio_gpu_cleanup_mapping(res); 702 } 703 704 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 705 struct virtio_gpu_ctrl_command *cmd) 706 { 707 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 708 709 switch (cmd->cmd_hdr.type) { 710 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 711 virtio_gpu_get_display_info(g, cmd); 712 break; 713 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 714 virtio_gpu_resource_create_2d(g, cmd); 715 break; 716 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 717 virtio_gpu_resource_unref(g, cmd); 718 break; 719 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 720 virtio_gpu_resource_flush(g, cmd); 721 break; 722 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 723 virtio_gpu_transfer_to_host_2d(g, cmd); 724 break; 725 case VIRTIO_GPU_CMD_SET_SCANOUT: 726 virtio_gpu_set_scanout(g, cmd); 727 break; 728 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 729 virtio_gpu_resource_attach_backing(g, cmd); 730 break; 731 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 732 virtio_gpu_resource_detach_backing(g, cmd); 733 break; 734 default: 735 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 736 break; 737 } 738 if (!cmd->finished) { 739 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 740 VIRTIO_GPU_RESP_OK_NODATA); 741 } 742 } 743 744 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 745 { 746 VirtIOGPU *g = VIRTIO_GPU(vdev); 747 qemu_bh_schedule(g->ctrl_bh); 748 } 749 750 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 751 { 752 VirtIOGPU *g = VIRTIO_GPU(vdev); 753 qemu_bh_schedule(g->cursor_bh); 754 } 755 756 void virtio_gpu_process_cmdq(VirtIOGPU *g) 757 { 758 struct virtio_gpu_ctrl_command *cmd; 759 760 while (!QTAILQ_EMPTY(&g->cmdq)) { 761 cmd = QTAILQ_FIRST(&g->cmdq); 762 763 /* process command */ 764 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, 765 g, cmd); 766 if (cmd->waiting) { 767 break; 768 } 769 QTAILQ_REMOVE(&g->cmdq, cmd, next); 770 if (virtio_gpu_stats_enabled(g->conf)) { 771 g->stats.requests++; 772 } 773 774 if (!cmd->finished) { 775 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 776 g->inflight++; 777 if (virtio_gpu_stats_enabled(g->conf)) { 778 if (g->stats.max_inflight < g->inflight) { 779 g->stats.max_inflight = g->inflight; 780 } 781 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 782 } 783 } else { 784 g_free(cmd); 785 } 786 } 787 } 788 789 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 790 { 791 VirtIOGPU *g = VIRTIO_GPU(vdev); 792 struct virtio_gpu_ctrl_command *cmd; 793 794 if (!virtio_queue_ready(vq)) { 795 return; 796 } 797 798 #ifdef CONFIG_VIRGL 799 if (!g->renderer_inited && g->use_virgl_renderer) { 800 virtio_gpu_virgl_init(g); 801 g->renderer_inited = true; 802 } 803 #endif 804 805 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 806 while (cmd) { 807 cmd->vq = vq; 808 cmd->error = 0; 809 cmd->finished = false; 810 cmd->waiting = false; 811 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 812 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 813 } 814 815 virtio_gpu_process_cmdq(g); 816 817 #ifdef CONFIG_VIRGL 818 if (g->use_virgl_renderer) { 819 virtio_gpu_virgl_fence_poll(g); 820 } 821 #endif 822 } 823 824 static void virtio_gpu_ctrl_bh(void *opaque) 825 { 826 VirtIOGPU *g = opaque; 827 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 828 } 829 830 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 831 { 832 VirtIOGPU *g = VIRTIO_GPU(vdev); 833 VirtQueueElement *elem; 834 size_t s; 835 struct virtio_gpu_update_cursor cursor_info; 836 837 if (!virtio_queue_ready(vq)) { 838 return; 839 } 840 for (;;) { 841 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 842 if (!elem) { 843 break; 844 } 845 846 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 847 &cursor_info, sizeof(cursor_info)); 848 if (s != sizeof(cursor_info)) { 849 qemu_log_mask(LOG_GUEST_ERROR, 850 "%s: cursor size incorrect %zu vs %zu\n", 851 __func__, s, sizeof(cursor_info)); 852 } else { 853 update_cursor(g, &cursor_info); 854 } 855 virtqueue_push(vq, elem, 0); 856 virtio_notify(vdev, vq); 857 g_free(elem); 858 } 859 } 860 861 static void virtio_gpu_cursor_bh(void *opaque) 862 { 863 VirtIOGPU *g = opaque; 864 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 865 } 866 867 static void virtio_gpu_invalidate_display(void *opaque) 868 { 869 } 870 871 static void virtio_gpu_update_display(void *opaque) 872 { 873 } 874 875 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 876 { 877 } 878 879 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 880 { 881 VirtIOGPU *g = opaque; 882 883 if (idx > g->conf.max_outputs) { 884 return -1; 885 } 886 887 g->req_state[idx].x = info->xoff; 888 g->req_state[idx].y = info->yoff; 889 g->req_state[idx].width = info->width; 890 g->req_state[idx].height = info->height; 891 892 if (info->width && info->height) { 893 g->enabled_output_bitmask |= (1 << idx); 894 } else { 895 g->enabled_output_bitmask &= ~(1 << idx); 896 } 897 898 /* send event to guest */ 899 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 900 return 0; 901 } 902 903 static void virtio_gpu_gl_block(void *opaque, bool block) 904 { 905 VirtIOGPU *g = opaque; 906 907 g->renderer_blocked = block; 908 if (!block) { 909 virtio_gpu_process_cmdq(g); 910 } 911 } 912 913 const GraphicHwOps virtio_gpu_ops = { 914 .invalidate = virtio_gpu_invalidate_display, 915 .gfx_update = virtio_gpu_update_display, 916 .text_update = virtio_gpu_text_update, 917 .ui_info = virtio_gpu_ui_info, 918 .gl_block = virtio_gpu_gl_block, 919 }; 920 921 static const VMStateDescription vmstate_virtio_gpu_unmigratable = { 922 .name = "virtio-gpu", 923 .unmigratable = 1, 924 }; 925 926 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 927 { 928 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 929 VirtIOGPU *g = VIRTIO_GPU(qdev); 930 bool have_virgl; 931 int i; 932 933 g->config_size = sizeof(struct virtio_gpu_config); 934 g->virtio_config.num_scanouts = g->conf.max_outputs; 935 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 936 g->config_size); 937 938 g->req_state[0].width = 1024; 939 g->req_state[0].height = 768; 940 941 g->use_virgl_renderer = false; 942 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) 943 have_virgl = false; 944 #else 945 have_virgl = display_opengl; 946 #endif 947 if (!have_virgl) { 948 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); 949 } 950 951 if (virtio_gpu_virgl_enabled(g->conf)) { 952 /* use larger control queue in 3d mode */ 953 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); 954 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 955 g->virtio_config.num_capsets = 1; 956 } else { 957 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 958 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 959 } 960 961 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 962 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 963 QTAILQ_INIT(&g->reslist); 964 QTAILQ_INIT(&g->cmdq); 965 QTAILQ_INIT(&g->fenceq); 966 967 g->enabled_output_bitmask = 1; 968 g->qdev = qdev; 969 970 for (i = 0; i < g->conf.max_outputs; i++) { 971 g->scanout[i].con = 972 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 973 if (i > 0) { 974 dpy_gfx_replace_surface(g->scanout[i].con, NULL); 975 } 976 } 977 978 vmstate_register(qdev, -1, &vmstate_virtio_gpu_unmigratable, g); 979 } 980 981 static void virtio_gpu_instance_init(Object *obj) 982 { 983 } 984 985 static void virtio_gpu_reset(VirtIODevice *vdev) 986 { 987 VirtIOGPU *g = VIRTIO_GPU(vdev); 988 struct virtio_gpu_simple_resource *res, *tmp; 989 int i; 990 991 g->enable = 0; 992 993 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 994 virtio_gpu_resource_destroy(g, res); 995 } 996 for (i = 0; i < g->conf.max_outputs; i++) { 997 #if 0 998 g->req_state[i].x = 0; 999 g->req_state[i].y = 0; 1000 if (i == 0) { 1001 g->req_state[0].width = 1024; 1002 g->req_state[0].height = 768; 1003 } else { 1004 g->req_state[i].width = 0; 1005 g->req_state[i].height = 0; 1006 } 1007 #endif 1008 g->scanout[i].resource_id = 0; 1009 g->scanout[i].width = 0; 1010 g->scanout[i].height = 0; 1011 g->scanout[i].x = 0; 1012 g->scanout[i].y = 0; 1013 g->scanout[i].ds = NULL; 1014 } 1015 g->enabled_output_bitmask = 1; 1016 1017 #ifdef CONFIG_VIRGL 1018 if (g->use_virgl_renderer) { 1019 virtio_gpu_virgl_reset(g); 1020 g->use_virgl_renderer = 0; 1021 } 1022 #endif 1023 } 1024 1025 static Property virtio_gpu_properties[] = { 1026 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), 1027 #ifdef CONFIG_VIRGL 1028 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, 1029 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), 1030 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, 1031 VIRTIO_GPU_FLAG_STATS_ENABLED, false), 1032 #endif 1033 DEFINE_PROP_END_OF_LIST(), 1034 }; 1035 1036 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1037 { 1038 DeviceClass *dc = DEVICE_CLASS(klass); 1039 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1040 1041 vdc->realize = virtio_gpu_device_realize; 1042 vdc->get_config = virtio_gpu_get_config; 1043 vdc->set_config = virtio_gpu_set_config; 1044 vdc->get_features = virtio_gpu_get_features; 1045 vdc->set_features = virtio_gpu_set_features; 1046 1047 vdc->reset = virtio_gpu_reset; 1048 1049 dc->props = virtio_gpu_properties; 1050 } 1051 1052 static const TypeInfo virtio_gpu_info = { 1053 .name = TYPE_VIRTIO_GPU, 1054 .parent = TYPE_VIRTIO_DEVICE, 1055 .instance_size = sizeof(VirtIOGPU), 1056 .instance_init = virtio_gpu_instance_init, 1057 .class_init = virtio_gpu_class_init, 1058 }; 1059 1060 static void virtio_register_types(void) 1061 { 1062 type_register_static(&virtio_gpu_info); 1063 } 1064 1065 type_init(virtio_register_types) 1066 1067 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 1068 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 1069 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 1070 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 1071 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 1072 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 1073 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 1074 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 1075 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 1076 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 1077 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 1078 1079 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); 1080 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); 1081 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); 1082 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); 1083 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); 1084 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); 1085 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); 1086 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); 1087 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); 1088 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); 1089