1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu-common.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "hw/virtio/virtio.h" 20 #include "hw/virtio/virtio-gpu.h" 21 #include "hw/virtio/virtio-bus.h" 22 23 static struct virtio_gpu_simple_resource* 24 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 25 26 #ifdef CONFIG_VIRGL 27 #include "virglrenderer.h" 28 #define VIRGL(_g, _virgl, _simple, ...) \ 29 do { \ 30 if (_g->use_virgl_renderer) { \ 31 _virgl(__VA_ARGS__); \ 32 } else { \ 33 _simple(__VA_ARGS__); \ 34 } \ 35 } while (0) 36 #else 37 #define VIRGL(_g, _virgl, _simple, ...) \ 38 do { \ 39 _simple(__VA_ARGS__); \ 40 } while (0) 41 #endif 42 43 static void update_cursor_data_simple(VirtIOGPU *g, 44 struct virtio_gpu_scanout *s, 45 uint32_t resource_id) 46 { 47 struct virtio_gpu_simple_resource *res; 48 uint32_t pixels; 49 50 res = virtio_gpu_find_resource(g, resource_id); 51 if (!res) { 52 return; 53 } 54 55 if (pixman_image_get_width(res->image) != s->current_cursor->width || 56 pixman_image_get_height(res->image) != s->current_cursor->height) { 57 return; 58 } 59 60 pixels = s->current_cursor->width * s->current_cursor->height; 61 memcpy(s->current_cursor->data, 62 pixman_image_get_data(res->image), 63 pixels * sizeof(uint32_t)); 64 } 65 66 #ifdef CONFIG_VIRGL 67 68 static void update_cursor_data_virgl(VirtIOGPU *g, 69 struct virtio_gpu_scanout *s, 70 uint32_t resource_id) 71 { 72 uint32_t width, height; 73 uint32_t pixels, *data; 74 75 data = virgl_renderer_get_cursor_data(resource_id, &width, &height); 76 if (!data) { 77 return; 78 } 79 80 if (width != s->current_cursor->width || 81 height != s->current_cursor->height) { 82 return; 83 } 84 85 pixels = s->current_cursor->width * s->current_cursor->height; 86 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); 87 free(data); 88 } 89 90 #endif 91 92 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 93 { 94 struct virtio_gpu_scanout *s; 95 bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR; 96 97 if (cursor->pos.scanout_id >= g->conf.max_outputs) { 98 return; 99 } 100 s = &g->scanout[cursor->pos.scanout_id]; 101 102 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 103 cursor->pos.x, 104 cursor->pos.y, 105 move ? "move" : "update", 106 cursor->resource_id); 107 108 if (move) { 109 if (!s->current_cursor) { 110 s->current_cursor = cursor_alloc(64, 64); 111 } 112 113 s->current_cursor->hot_x = cursor->hot_x; 114 s->current_cursor->hot_y = cursor->hot_y; 115 116 if (cursor->resource_id > 0) { 117 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, 118 g, s, cursor->resource_id); 119 } 120 dpy_cursor_define(s->con, s->current_cursor); 121 } 122 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 123 cursor->resource_id ? 1 : 0); 124 } 125 126 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 127 { 128 VirtIOGPU *g = VIRTIO_GPU(vdev); 129 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 130 } 131 132 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 133 { 134 VirtIOGPU *g = VIRTIO_GPU(vdev); 135 struct virtio_gpu_config vgconfig; 136 137 memcpy(&vgconfig, config, sizeof(g->virtio_config)); 138 139 if (vgconfig.events_clear) { 140 g->virtio_config.events_read &= ~vgconfig.events_clear; 141 } 142 } 143 144 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, 145 Error **errp) 146 { 147 VirtIOGPU *g = VIRTIO_GPU(vdev); 148 149 if (virtio_gpu_virgl_enabled(g->conf)) { 150 features |= (1 << VIRTIO_GPU_F_VIRGL); 151 } 152 return features; 153 } 154 155 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) 156 { 157 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); 158 VirtIOGPU *g = VIRTIO_GPU(vdev); 159 160 g->use_virgl_renderer = ((features & virgl) == virgl); 161 trace_virtio_gpu_features(g->use_virgl_renderer); 162 } 163 164 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 165 { 166 g->virtio_config.events_read |= event_type; 167 virtio_notify_config(&g->parent_obj); 168 } 169 170 static struct virtio_gpu_simple_resource * 171 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 172 { 173 struct virtio_gpu_simple_resource *res; 174 175 QTAILQ_FOREACH(res, &g->reslist, next) { 176 if (res->resource_id == resource_id) { 177 return res; 178 } 179 } 180 return NULL; 181 } 182 183 void virtio_gpu_ctrl_response(VirtIOGPU *g, 184 struct virtio_gpu_ctrl_command *cmd, 185 struct virtio_gpu_ctrl_hdr *resp, 186 size_t resp_len) 187 { 188 size_t s; 189 190 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 191 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 192 resp->fence_id = cmd->cmd_hdr.fence_id; 193 resp->ctx_id = cmd->cmd_hdr.ctx_id; 194 } 195 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 196 if (s != resp_len) { 197 qemu_log_mask(LOG_GUEST_ERROR, 198 "%s: response size incorrect %zu vs %zu\n", 199 __func__, s, resp_len); 200 } 201 virtqueue_push(cmd->vq, &cmd->elem, s); 202 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 203 cmd->finished = true; 204 } 205 206 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 207 struct virtio_gpu_ctrl_command *cmd, 208 enum virtio_gpu_ctrl_type type) 209 { 210 struct virtio_gpu_ctrl_hdr resp; 211 212 memset(&resp, 0, sizeof(resp)); 213 resp.type = type; 214 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 215 } 216 217 static void 218 virtio_gpu_fill_display_info(VirtIOGPU *g, 219 struct virtio_gpu_resp_display_info *dpy_info) 220 { 221 int i; 222 223 for (i = 0; i < g->conf.max_outputs; i++) { 224 if (g->enabled_output_bitmask & (1 << i)) { 225 dpy_info->pmodes[i].enabled = 1; 226 dpy_info->pmodes[i].r.width = g->req_state[i].width; 227 dpy_info->pmodes[i].r.height = g->req_state[i].height; 228 } 229 } 230 } 231 232 void virtio_gpu_get_display_info(VirtIOGPU *g, 233 struct virtio_gpu_ctrl_command *cmd) 234 { 235 struct virtio_gpu_resp_display_info display_info; 236 237 trace_virtio_gpu_cmd_get_display_info(); 238 memset(&display_info, 0, sizeof(display_info)); 239 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 240 virtio_gpu_fill_display_info(g, &display_info); 241 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 242 sizeof(display_info)); 243 } 244 245 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 246 { 247 switch (virtio_gpu_format) { 248 #ifdef HOST_WORDS_BIGENDIAN 249 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 250 return PIXMAN_b8g8r8x8; 251 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 252 return PIXMAN_b8g8r8a8; 253 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 254 return PIXMAN_x8r8g8b8; 255 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 256 return PIXMAN_a8r8g8b8; 257 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 258 return PIXMAN_r8g8b8x8; 259 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 260 return PIXMAN_r8g8b8a8; 261 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 262 return PIXMAN_x8b8g8r8; 263 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 264 return PIXMAN_a8b8g8r8; 265 #else 266 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 267 return PIXMAN_x8r8g8b8; 268 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 269 return PIXMAN_a8r8g8b8; 270 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 271 return PIXMAN_b8g8r8x8; 272 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 273 return PIXMAN_b8g8r8a8; 274 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 275 return PIXMAN_x8b8g8r8; 276 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 277 return PIXMAN_a8b8g8r8; 278 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 279 return PIXMAN_r8g8b8x8; 280 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 281 return PIXMAN_r8g8b8a8; 282 #endif 283 default: 284 return 0; 285 } 286 } 287 288 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 289 struct virtio_gpu_ctrl_command *cmd) 290 { 291 pixman_format_code_t pformat; 292 struct virtio_gpu_simple_resource *res; 293 struct virtio_gpu_resource_create_2d c2d; 294 295 VIRTIO_GPU_FILL_CMD(c2d); 296 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 297 c2d.width, c2d.height); 298 299 if (c2d.resource_id == 0) { 300 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 301 __func__); 302 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 303 return; 304 } 305 306 res = virtio_gpu_find_resource(g, c2d.resource_id); 307 if (res) { 308 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 309 __func__, c2d.resource_id); 310 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 311 return; 312 } 313 314 res = g_new0(struct virtio_gpu_simple_resource, 1); 315 316 res->width = c2d.width; 317 res->height = c2d.height; 318 res->format = c2d.format; 319 res->resource_id = c2d.resource_id; 320 321 pformat = get_pixman_format(c2d.format); 322 if (!pformat) { 323 qemu_log_mask(LOG_GUEST_ERROR, 324 "%s: host couldn't handle guest format %d\n", 325 __func__, c2d.format); 326 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 327 return; 328 } 329 res->image = pixman_image_create_bits(pformat, 330 c2d.width, 331 c2d.height, 332 NULL, 0); 333 334 if (!res->image) { 335 qemu_log_mask(LOG_GUEST_ERROR, 336 "%s: resource creation failed %d %d %d\n", 337 __func__, c2d.resource_id, c2d.width, c2d.height); 338 g_free(res); 339 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 340 return; 341 } 342 343 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 344 } 345 346 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 347 struct virtio_gpu_simple_resource *res) 348 { 349 pixman_image_unref(res->image); 350 QTAILQ_REMOVE(&g->reslist, res, next); 351 g_free(res); 352 } 353 354 static void virtio_gpu_resource_unref(VirtIOGPU *g, 355 struct virtio_gpu_ctrl_command *cmd) 356 { 357 struct virtio_gpu_simple_resource *res; 358 struct virtio_gpu_resource_unref unref; 359 360 VIRTIO_GPU_FILL_CMD(unref); 361 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 362 363 res = virtio_gpu_find_resource(g, unref.resource_id); 364 if (!res) { 365 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 366 __func__, unref.resource_id); 367 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 368 return; 369 } 370 virtio_gpu_resource_destroy(g, res); 371 } 372 373 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 374 struct virtio_gpu_ctrl_command *cmd) 375 { 376 struct virtio_gpu_simple_resource *res; 377 int h; 378 uint32_t src_offset, dst_offset, stride; 379 int bpp; 380 pixman_format_code_t format; 381 struct virtio_gpu_transfer_to_host_2d t2d; 382 383 VIRTIO_GPU_FILL_CMD(t2d); 384 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 385 386 res = virtio_gpu_find_resource(g, t2d.resource_id); 387 if (!res || !res->iov) { 388 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 389 __func__, t2d.resource_id); 390 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 391 return; 392 } 393 394 if (t2d.r.x > res->width || 395 t2d.r.y > res->height || 396 t2d.r.width > res->width || 397 t2d.r.height > res->height || 398 t2d.r.x + t2d.r.width > res->width || 399 t2d.r.y + t2d.r.height > res->height) { 400 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 401 " bounds for resource %d: %d %d %d %d vs %d %d\n", 402 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 403 t2d.r.width, t2d.r.height, res->width, res->height); 404 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 405 return; 406 } 407 408 format = pixman_image_get_format(res->image); 409 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 410 stride = pixman_image_get_stride(res->image); 411 412 if (t2d.offset || t2d.r.x || t2d.r.y || 413 t2d.r.width != pixman_image_get_width(res->image)) { 414 void *img_data = pixman_image_get_data(res->image); 415 for (h = 0; h < t2d.r.height; h++) { 416 src_offset = t2d.offset + stride * h; 417 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 418 419 iov_to_buf(res->iov, res->iov_cnt, src_offset, 420 (uint8_t *)img_data 421 + dst_offset, t2d.r.width * bpp); 422 } 423 } else { 424 iov_to_buf(res->iov, res->iov_cnt, 0, 425 pixman_image_get_data(res->image), 426 pixman_image_get_stride(res->image) 427 * pixman_image_get_height(res->image)); 428 } 429 } 430 431 static void virtio_gpu_resource_flush(VirtIOGPU *g, 432 struct virtio_gpu_ctrl_command *cmd) 433 { 434 struct virtio_gpu_simple_resource *res; 435 struct virtio_gpu_resource_flush rf; 436 pixman_region16_t flush_region; 437 int i; 438 439 VIRTIO_GPU_FILL_CMD(rf); 440 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 441 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 442 443 res = virtio_gpu_find_resource(g, rf.resource_id); 444 if (!res) { 445 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 446 __func__, rf.resource_id); 447 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 448 return; 449 } 450 451 if (rf.r.x > res->width || 452 rf.r.y > res->height || 453 rf.r.width > res->width || 454 rf.r.height > res->height || 455 rf.r.x + rf.r.width > res->width || 456 rf.r.y + rf.r.height > res->height) { 457 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 458 " bounds for resource %d: %d %d %d %d vs %d %d\n", 459 __func__, rf.resource_id, rf.r.x, rf.r.y, 460 rf.r.width, rf.r.height, res->width, res->height); 461 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 462 return; 463 } 464 465 pixman_region_init_rect(&flush_region, 466 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 467 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUT; i++) { 468 struct virtio_gpu_scanout *scanout; 469 pixman_region16_t region, finalregion; 470 pixman_box16_t *extents; 471 472 if (!(res->scanout_bitmask & (1 << i))) { 473 continue; 474 } 475 scanout = &g->scanout[i]; 476 477 pixman_region_init(&finalregion); 478 pixman_region_init_rect(®ion, scanout->x, scanout->y, 479 scanout->width, scanout->height); 480 481 pixman_region_intersect(&finalregion, &flush_region, ®ion); 482 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 483 extents = pixman_region_extents(&finalregion); 484 /* work out the area we need to update for each console */ 485 dpy_gfx_update(g->scanout[i].con, 486 extents->x1, extents->y1, 487 extents->x2 - extents->x1, 488 extents->y2 - extents->y1); 489 490 pixman_region_fini(®ion); 491 pixman_region_fini(&finalregion); 492 } 493 pixman_region_fini(&flush_region); 494 } 495 496 static void virtio_gpu_set_scanout(VirtIOGPU *g, 497 struct virtio_gpu_ctrl_command *cmd) 498 { 499 struct virtio_gpu_simple_resource *res; 500 struct virtio_gpu_scanout *scanout; 501 pixman_format_code_t format; 502 uint32_t offset; 503 int bpp; 504 struct virtio_gpu_set_scanout ss; 505 506 VIRTIO_GPU_FILL_CMD(ss); 507 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 508 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 509 510 g->enable = 1; 511 if (ss.resource_id == 0) { 512 scanout = &g->scanout[ss.scanout_id]; 513 if (scanout->resource_id) { 514 res = virtio_gpu_find_resource(g, scanout->resource_id); 515 if (res) { 516 res->scanout_bitmask &= ~(1 << ss.scanout_id); 517 } 518 } 519 if (ss.scanout_id == 0 || 520 ss.scanout_id >= g->conf.max_outputs) { 521 qemu_log_mask(LOG_GUEST_ERROR, 522 "%s: illegal scanout id specified %d", 523 __func__, ss.scanout_id); 524 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 525 return; 526 } 527 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); 528 scanout->ds = NULL; 529 scanout->width = 0; 530 scanout->height = 0; 531 return; 532 } 533 534 /* create a surface for this scanout */ 535 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUT || 536 ss.scanout_id >= g->conf.max_outputs) { 537 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 538 __func__, ss.scanout_id); 539 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 540 return; 541 } 542 543 res = virtio_gpu_find_resource(g, ss.resource_id); 544 if (!res) { 545 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 546 __func__, ss.resource_id); 547 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 548 return; 549 } 550 551 if (ss.r.x > res->width || 552 ss.r.y > res->height || 553 ss.r.width > res->width || 554 ss.r.height > res->height || 555 ss.r.x + ss.r.width > res->width || 556 ss.r.y + ss.r.height > res->height) { 557 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 558 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 559 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 560 ss.r.width, ss.r.height, res->width, res->height); 561 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 562 return; 563 } 564 565 scanout = &g->scanout[ss.scanout_id]; 566 567 format = pixman_image_get_format(res->image); 568 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 569 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 570 if (!scanout->ds || surface_data(scanout->ds) 571 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 572 scanout->width != ss.r.width || 573 scanout->height != ss.r.height) { 574 /* realloc the surface ptr */ 575 scanout->ds = qemu_create_displaysurface_from 576 (ss.r.width, ss.r.height, format, 577 pixman_image_get_stride(res->image), 578 (uint8_t *)pixman_image_get_data(res->image) + offset); 579 if (!scanout->ds) { 580 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 581 return; 582 } 583 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 584 } 585 586 res->scanout_bitmask |= (1 << ss.scanout_id); 587 scanout->resource_id = ss.resource_id; 588 scanout->x = ss.r.x; 589 scanout->y = ss.r.y; 590 scanout->width = ss.r.width; 591 scanout->height = ss.r.height; 592 } 593 594 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 595 struct virtio_gpu_ctrl_command *cmd, 596 struct iovec **iov) 597 { 598 struct virtio_gpu_mem_entry *ents; 599 size_t esize, s; 600 int i; 601 602 if (ab->nr_entries > 16384) { 603 qemu_log_mask(LOG_GUEST_ERROR, 604 "%s: nr_entries is too big (%d > 16384)\n", 605 __func__, ab->nr_entries); 606 return -1; 607 } 608 609 esize = sizeof(*ents) * ab->nr_entries; 610 ents = g_malloc(esize); 611 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 612 sizeof(*ab), ents, esize); 613 if (s != esize) { 614 qemu_log_mask(LOG_GUEST_ERROR, 615 "%s: command data size incorrect %zu vs %zu\n", 616 __func__, s, esize); 617 g_free(ents); 618 return -1; 619 } 620 621 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 622 for (i = 0; i < ab->nr_entries; i++) { 623 hwaddr len = ents[i].length; 624 (*iov)[i].iov_len = ents[i].length; 625 (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); 626 if (!(*iov)[i].iov_base || len != ents[i].length) { 627 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 628 " resource %d element %d\n", 629 __func__, ab->resource_id, i); 630 virtio_gpu_cleanup_mapping_iov(*iov, i); 631 g_free(ents); 632 *iov = NULL; 633 return -1; 634 } 635 } 636 g_free(ents); 637 return 0; 638 } 639 640 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 641 { 642 int i; 643 644 for (i = 0; i < count; i++) { 645 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 646 iov[i].iov_len); 647 } 648 g_free(iov); 649 } 650 651 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 652 { 653 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 654 res->iov = NULL; 655 res->iov_cnt = 0; 656 } 657 658 static void 659 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 660 struct virtio_gpu_ctrl_command *cmd) 661 { 662 struct virtio_gpu_simple_resource *res; 663 struct virtio_gpu_resource_attach_backing ab; 664 int ret; 665 666 VIRTIO_GPU_FILL_CMD(ab); 667 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 668 669 res = virtio_gpu_find_resource(g, ab.resource_id); 670 if (!res) { 671 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 672 __func__, ab.resource_id); 673 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 674 return; 675 } 676 677 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->iov); 678 if (ret != 0) { 679 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 680 return; 681 } 682 683 res->iov_cnt = ab.nr_entries; 684 } 685 686 static void 687 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 688 struct virtio_gpu_ctrl_command *cmd) 689 { 690 struct virtio_gpu_simple_resource *res; 691 struct virtio_gpu_resource_detach_backing detach; 692 693 VIRTIO_GPU_FILL_CMD(detach); 694 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 695 696 res = virtio_gpu_find_resource(g, detach.resource_id); 697 if (!res || !res->iov) { 698 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 699 __func__, detach.resource_id); 700 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 701 return; 702 } 703 virtio_gpu_cleanup_mapping(res); 704 } 705 706 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 707 struct virtio_gpu_ctrl_command *cmd) 708 { 709 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 710 711 switch (cmd->cmd_hdr.type) { 712 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 713 virtio_gpu_get_display_info(g, cmd); 714 break; 715 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 716 virtio_gpu_resource_create_2d(g, cmd); 717 break; 718 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 719 virtio_gpu_resource_unref(g, cmd); 720 break; 721 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 722 virtio_gpu_resource_flush(g, cmd); 723 break; 724 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 725 virtio_gpu_transfer_to_host_2d(g, cmd); 726 break; 727 case VIRTIO_GPU_CMD_SET_SCANOUT: 728 virtio_gpu_set_scanout(g, cmd); 729 break; 730 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 731 virtio_gpu_resource_attach_backing(g, cmd); 732 break; 733 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 734 virtio_gpu_resource_detach_backing(g, cmd); 735 break; 736 default: 737 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 738 break; 739 } 740 if (!cmd->finished) { 741 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 742 VIRTIO_GPU_RESP_OK_NODATA); 743 } 744 } 745 746 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 747 { 748 VirtIOGPU *g = VIRTIO_GPU(vdev); 749 qemu_bh_schedule(g->ctrl_bh); 750 } 751 752 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 753 { 754 VirtIOGPU *g = VIRTIO_GPU(vdev); 755 qemu_bh_schedule(g->cursor_bh); 756 } 757 758 void virtio_gpu_process_cmdq(VirtIOGPU *g) 759 { 760 struct virtio_gpu_ctrl_command *cmd; 761 762 while (!QTAILQ_EMPTY(&g->cmdq)) { 763 cmd = QTAILQ_FIRST(&g->cmdq); 764 765 /* process command */ 766 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, 767 g, cmd); 768 if (cmd->waiting) { 769 break; 770 } 771 QTAILQ_REMOVE(&g->cmdq, cmd, next); 772 if (virtio_gpu_stats_enabled(g->conf)) { 773 g->stats.requests++; 774 } 775 776 if (!cmd->finished) { 777 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 778 g->inflight++; 779 if (virtio_gpu_stats_enabled(g->conf)) { 780 if (g->stats.max_inflight < g->inflight) { 781 g->stats.max_inflight = g->inflight; 782 } 783 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 784 } 785 } else { 786 g_free(cmd); 787 } 788 } 789 } 790 791 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 792 { 793 VirtIOGPU *g = VIRTIO_GPU(vdev); 794 struct virtio_gpu_ctrl_command *cmd; 795 796 if (!virtio_queue_ready(vq)) { 797 return; 798 } 799 800 #ifdef CONFIG_VIRGL 801 if (!g->renderer_inited && g->use_virgl_renderer) { 802 virtio_gpu_virgl_init(g); 803 g->renderer_inited = true; 804 } 805 #endif 806 807 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 808 while (cmd) { 809 cmd->vq = vq; 810 cmd->error = 0; 811 cmd->finished = false; 812 cmd->waiting = false; 813 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); 814 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); 815 } 816 817 virtio_gpu_process_cmdq(g); 818 819 #ifdef CONFIG_VIRGL 820 if (g->use_virgl_renderer) { 821 virtio_gpu_virgl_fence_poll(g); 822 } 823 #endif 824 } 825 826 static void virtio_gpu_ctrl_bh(void *opaque) 827 { 828 VirtIOGPU *g = opaque; 829 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 830 } 831 832 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 833 { 834 VirtIOGPU *g = VIRTIO_GPU(vdev); 835 VirtQueueElement *elem; 836 size_t s; 837 struct virtio_gpu_update_cursor cursor_info; 838 839 if (!virtio_queue_ready(vq)) { 840 return; 841 } 842 for (;;) { 843 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 844 if (!elem) { 845 break; 846 } 847 848 s = iov_to_buf(elem->out_sg, elem->out_num, 0, 849 &cursor_info, sizeof(cursor_info)); 850 if (s != sizeof(cursor_info)) { 851 qemu_log_mask(LOG_GUEST_ERROR, 852 "%s: cursor size incorrect %zu vs %zu\n", 853 __func__, s, sizeof(cursor_info)); 854 } else { 855 update_cursor(g, &cursor_info); 856 } 857 virtqueue_push(vq, elem, 0); 858 virtio_notify(vdev, vq); 859 g_free(elem); 860 } 861 } 862 863 static void virtio_gpu_cursor_bh(void *opaque) 864 { 865 VirtIOGPU *g = opaque; 866 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 867 } 868 869 static void virtio_gpu_invalidate_display(void *opaque) 870 { 871 } 872 873 static void virtio_gpu_update_display(void *opaque) 874 { 875 } 876 877 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 878 { 879 } 880 881 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 882 { 883 VirtIOGPU *g = opaque; 884 885 if (idx > g->conf.max_outputs) { 886 return -1; 887 } 888 889 g->req_state[idx].x = info->xoff; 890 g->req_state[idx].y = info->yoff; 891 g->req_state[idx].width = info->width; 892 g->req_state[idx].height = info->height; 893 894 if (info->width && info->height) { 895 g->enabled_output_bitmask |= (1 << idx); 896 } else { 897 g->enabled_output_bitmask &= ~(1 << idx); 898 } 899 900 /* send event to guest */ 901 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 902 return 0; 903 } 904 905 static void virtio_gpu_gl_block(void *opaque, bool block) 906 { 907 VirtIOGPU *g = opaque; 908 909 g->renderer_blocked = block; 910 if (!block) { 911 virtio_gpu_process_cmdq(g); 912 } 913 } 914 915 const GraphicHwOps virtio_gpu_ops = { 916 .invalidate = virtio_gpu_invalidate_display, 917 .gfx_update = virtio_gpu_update_display, 918 .text_update = virtio_gpu_text_update, 919 .ui_info = virtio_gpu_ui_info, 920 .gl_block = virtio_gpu_gl_block, 921 }; 922 923 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 924 { 925 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 926 VirtIOGPU *g = VIRTIO_GPU(qdev); 927 bool have_virgl; 928 int i; 929 930 g->config_size = sizeof(struct virtio_gpu_config); 931 g->virtio_config.num_scanouts = g->conf.max_outputs; 932 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 933 g->config_size); 934 935 g->req_state[0].width = 1024; 936 g->req_state[0].height = 768; 937 938 g->use_virgl_renderer = false; 939 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) 940 have_virgl = false; 941 #else 942 have_virgl = display_opengl; 943 #endif 944 if (!have_virgl) { 945 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); 946 } 947 948 if (virtio_gpu_virgl_enabled(g->conf)) { 949 /* use larger control queue in 3d mode */ 950 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); 951 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 952 g->virtio_config.num_capsets = 1; 953 } else { 954 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 955 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 956 } 957 958 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 959 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 960 QTAILQ_INIT(&g->reslist); 961 QTAILQ_INIT(&g->cmdq); 962 QTAILQ_INIT(&g->fenceq); 963 964 g->enabled_output_bitmask = 1; 965 g->qdev = qdev; 966 967 for (i = 0; i < g->conf.max_outputs; i++) { 968 g->scanout[i].con = 969 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 970 if (i > 0) { 971 dpy_gfx_replace_surface(g->scanout[i].con, NULL); 972 } 973 } 974 } 975 976 static void virtio_gpu_instance_init(Object *obj) 977 { 978 } 979 980 static void virtio_gpu_reset(VirtIODevice *vdev) 981 { 982 VirtIOGPU *g = VIRTIO_GPU(vdev); 983 struct virtio_gpu_simple_resource *res, *tmp; 984 int i; 985 986 g->enable = 0; 987 988 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 989 virtio_gpu_resource_destroy(g, res); 990 } 991 for (i = 0; i < g->conf.max_outputs; i++) { 992 #if 0 993 g->req_state[i].x = 0; 994 g->req_state[i].y = 0; 995 if (i == 0) { 996 g->req_state[0].width = 1024; 997 g->req_state[0].height = 768; 998 } else { 999 g->req_state[i].width = 0; 1000 g->req_state[i].height = 0; 1001 } 1002 #endif 1003 g->scanout[i].resource_id = 0; 1004 g->scanout[i].width = 0; 1005 g->scanout[i].height = 0; 1006 g->scanout[i].x = 0; 1007 g->scanout[i].y = 0; 1008 g->scanout[i].ds = NULL; 1009 } 1010 g->enabled_output_bitmask = 1; 1011 1012 #ifdef CONFIG_VIRGL 1013 if (g->use_virgl_renderer) { 1014 virtio_gpu_virgl_reset(g); 1015 g->use_virgl_renderer = 0; 1016 } 1017 #endif 1018 } 1019 1020 static Property virtio_gpu_properties[] = { 1021 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), 1022 #ifdef CONFIG_VIRGL 1023 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, 1024 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), 1025 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, 1026 VIRTIO_GPU_FLAG_STATS_ENABLED, false), 1027 #endif 1028 DEFINE_PROP_END_OF_LIST(), 1029 }; 1030 1031 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 1032 { 1033 DeviceClass *dc = DEVICE_CLASS(klass); 1034 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1035 1036 vdc->realize = virtio_gpu_device_realize; 1037 vdc->get_config = virtio_gpu_get_config; 1038 vdc->set_config = virtio_gpu_set_config; 1039 vdc->get_features = virtio_gpu_get_features; 1040 vdc->set_features = virtio_gpu_set_features; 1041 1042 vdc->reset = virtio_gpu_reset; 1043 1044 dc->props = virtio_gpu_properties; 1045 } 1046 1047 static const TypeInfo virtio_gpu_info = { 1048 .name = TYPE_VIRTIO_GPU, 1049 .parent = TYPE_VIRTIO_DEVICE, 1050 .instance_size = sizeof(VirtIOGPU), 1051 .instance_init = virtio_gpu_instance_init, 1052 .class_init = virtio_gpu_class_init, 1053 }; 1054 1055 static void virtio_register_types(void) 1056 { 1057 type_register_static(&virtio_gpu_info); 1058 } 1059 1060 type_init(virtio_register_types) 1061 1062 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 1063 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 1064 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 1065 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 1066 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 1067 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 1068 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 1069 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 1070 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 1071 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 1072 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 1073 1074 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); 1075 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); 1076 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); 1077 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); 1078 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); 1079 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); 1080 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); 1081 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); 1082 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); 1083 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); 1084