1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu-common.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "trace.h" 19 #include "hw/virtio/virtio.h" 20 #include "hw/virtio/virtio-gpu.h" 21 #include "hw/virtio/virtio-bus.h" 22 23 static struct virtio_gpu_simple_resource* 24 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 25 26 #ifdef CONFIG_VIRGL 27 #include "virglrenderer.h" 28 #define VIRGL(_g, _virgl, _simple, ...) \ 29 do { \ 30 if (_g->use_virgl_renderer) { \ 31 _virgl(__VA_ARGS__); \ 32 } else { \ 33 _simple(__VA_ARGS__); \ 34 } \ 35 } while (0) 36 #else 37 #define VIRGL(_g, _virgl, _simple, ...) \ 38 do { \ 39 _simple(__VA_ARGS__); \ 40 } while (0) 41 #endif 42 43 static void update_cursor_data_simple(VirtIOGPU *g, 44 struct virtio_gpu_scanout *s, 45 uint32_t resource_id) 46 { 47 struct virtio_gpu_simple_resource *res; 48 uint32_t pixels; 49 50 res = virtio_gpu_find_resource(g, resource_id); 51 if (!res) { 52 return; 53 } 54 55 if (pixman_image_get_width(res->image) != s->current_cursor->width || 56 pixman_image_get_height(res->image) != s->current_cursor->height) { 57 return; 58 } 59 60 pixels = s->current_cursor->width * s->current_cursor->height; 61 memcpy(s->current_cursor->data, 62 pixman_image_get_data(res->image), 63 pixels * sizeof(uint32_t)); 64 } 65 66 #ifdef CONFIG_VIRGL 67 68 static void update_cursor_data_virgl(VirtIOGPU *g, 69 struct virtio_gpu_scanout *s, 70 uint32_t resource_id) 71 { 72 uint32_t width, height; 73 uint32_t pixels, *data; 74 75 data = virgl_renderer_get_cursor_data(resource_id, &width, &height); 76 if (!data) { 77 return; 78 } 79 80 if (width != s->current_cursor->width || 81 height != s->current_cursor->height) { 82 return; 83 } 84 85 pixels = s->current_cursor->width * s->current_cursor->height; 86 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); 87 free(data); 88 } 89 90 #endif 91 92 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 93 { 94 struct virtio_gpu_scanout *s; 95 bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR; 96 97 if (cursor->pos.scanout_id >= g->conf.max_outputs) { 98 return; 99 } 100 s = &g->scanout[cursor->pos.scanout_id]; 101 102 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, 103 cursor->pos.x, 104 cursor->pos.y, 105 move ? "move" : "update", 106 cursor->resource_id); 107 108 if (move) { 109 if (!s->current_cursor) { 110 s->current_cursor = cursor_alloc(64, 64); 111 } 112 113 s->current_cursor->hot_x = cursor->hot_x; 114 s->current_cursor->hot_y = cursor->hot_y; 115 116 if (cursor->resource_id > 0) { 117 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, 118 g, s, cursor->resource_id); 119 } 120 dpy_cursor_define(s->con, s->current_cursor); 121 } 122 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 123 cursor->resource_id ? 1 : 0); 124 } 125 126 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 127 { 128 VirtIOGPU *g = VIRTIO_GPU(vdev); 129 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 130 } 131 132 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 133 { 134 VirtIOGPU *g = VIRTIO_GPU(vdev); 135 struct virtio_gpu_config vgconfig; 136 137 memcpy(&vgconfig, config, sizeof(g->virtio_config)); 138 139 if (vgconfig.events_clear) { 140 g->virtio_config.events_read &= ~vgconfig.events_clear; 141 } 142 } 143 144 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, 145 Error **errp) 146 { 147 VirtIOGPU *g = VIRTIO_GPU(vdev); 148 149 if (virtio_gpu_virgl_enabled(g->conf)) { 150 features |= (1 << VIRTIO_GPU_F_VIRGL); 151 } 152 return features; 153 } 154 155 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) 156 { 157 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); 158 VirtIOGPU *g = VIRTIO_GPU(vdev); 159 160 g->use_virgl_renderer = ((features & virgl) == virgl); 161 trace_virtio_gpu_features(g->use_virgl_renderer); 162 } 163 164 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 165 { 166 g->virtio_config.events_read |= event_type; 167 virtio_notify_config(&g->parent_obj); 168 } 169 170 static struct virtio_gpu_simple_resource * 171 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 172 { 173 struct virtio_gpu_simple_resource *res; 174 175 QTAILQ_FOREACH(res, &g->reslist, next) { 176 if (res->resource_id == resource_id) { 177 return res; 178 } 179 } 180 return NULL; 181 } 182 183 void virtio_gpu_ctrl_response(VirtIOGPU *g, 184 struct virtio_gpu_ctrl_command *cmd, 185 struct virtio_gpu_ctrl_hdr *resp, 186 size_t resp_len) 187 { 188 size_t s; 189 190 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 191 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 192 resp->fence_id = cmd->cmd_hdr.fence_id; 193 resp->ctx_id = cmd->cmd_hdr.ctx_id; 194 } 195 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 196 if (s != resp_len) { 197 qemu_log_mask(LOG_GUEST_ERROR, 198 "%s: response size incorrect %zu vs %zu\n", 199 __func__, s, resp_len); 200 } 201 virtqueue_push(cmd->vq, &cmd->elem, s); 202 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 203 cmd->finished = true; 204 } 205 206 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 207 struct virtio_gpu_ctrl_command *cmd, 208 enum virtio_gpu_ctrl_type type) 209 { 210 struct virtio_gpu_ctrl_hdr resp; 211 212 memset(&resp, 0, sizeof(resp)); 213 resp.type = type; 214 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 215 } 216 217 static void 218 virtio_gpu_fill_display_info(VirtIOGPU *g, 219 struct virtio_gpu_resp_display_info *dpy_info) 220 { 221 int i; 222 223 for (i = 0; i < g->conf.max_outputs; i++) { 224 if (g->enabled_output_bitmask & (1 << i)) { 225 dpy_info->pmodes[i].enabled = 1; 226 dpy_info->pmodes[i].r.width = g->req_state[i].width; 227 dpy_info->pmodes[i].r.height = g->req_state[i].height; 228 } 229 } 230 } 231 232 void virtio_gpu_get_display_info(VirtIOGPU *g, 233 struct virtio_gpu_ctrl_command *cmd) 234 { 235 struct virtio_gpu_resp_display_info display_info; 236 237 trace_virtio_gpu_cmd_get_display_info(); 238 memset(&display_info, 0, sizeof(display_info)); 239 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 240 virtio_gpu_fill_display_info(g, &display_info); 241 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 242 sizeof(display_info)); 243 } 244 245 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 246 { 247 switch (virtio_gpu_format) { 248 #ifdef HOST_WORDS_BIGENDIAN 249 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 250 return PIXMAN_b8g8r8x8; 251 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 252 return PIXMAN_b8g8r8a8; 253 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 254 return PIXMAN_x8r8g8b8; 255 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 256 return PIXMAN_a8r8g8b8; 257 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 258 return PIXMAN_r8g8b8x8; 259 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 260 return PIXMAN_r8g8b8a8; 261 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 262 return PIXMAN_x8b8g8r8; 263 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 264 return PIXMAN_a8b8g8r8; 265 #else 266 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 267 return PIXMAN_x8r8g8b8; 268 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 269 return PIXMAN_a8r8g8b8; 270 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 271 return PIXMAN_b8g8r8x8; 272 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 273 return PIXMAN_b8g8r8a8; 274 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 275 return PIXMAN_x8b8g8r8; 276 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 277 return PIXMAN_a8b8g8r8; 278 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 279 return PIXMAN_r8g8b8x8; 280 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 281 return PIXMAN_r8g8b8a8; 282 #endif 283 default: 284 return 0; 285 } 286 } 287 288 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 289 struct virtio_gpu_ctrl_command *cmd) 290 { 291 pixman_format_code_t pformat; 292 struct virtio_gpu_simple_resource *res; 293 struct virtio_gpu_resource_create_2d c2d; 294 295 VIRTIO_GPU_FILL_CMD(c2d); 296 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 297 c2d.width, c2d.height); 298 299 if (c2d.resource_id == 0) { 300 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 301 __func__); 302 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 303 return; 304 } 305 306 res = virtio_gpu_find_resource(g, c2d.resource_id); 307 if (res) { 308 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 309 __func__, c2d.resource_id); 310 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 311 return; 312 } 313 314 res = g_new0(struct virtio_gpu_simple_resource, 1); 315 316 res->width = c2d.width; 317 res->height = c2d.height; 318 res->format = c2d.format; 319 res->resource_id = c2d.resource_id; 320 321 pformat = get_pixman_format(c2d.format); 322 if (!pformat) { 323 qemu_log_mask(LOG_GUEST_ERROR, 324 "%s: host couldn't handle guest format %d\n", 325 __func__, c2d.format); 326 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 327 return; 328 } 329 res->image = pixman_image_create_bits(pformat, 330 c2d.width, 331 c2d.height, 332 NULL, 0); 333 334 if (!res->image) { 335 qemu_log_mask(LOG_GUEST_ERROR, 336 "%s: resource creation failed %d %d %d\n", 337 __func__, c2d.resource_id, c2d.width, c2d.height); 338 g_free(res); 339 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 340 return; 341 } 342 343 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 344 } 345 346 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 347 struct virtio_gpu_simple_resource *res) 348 { 349 pixman_image_unref(res->image); 350 QTAILQ_REMOVE(&g->reslist, res, next); 351 g_free(res); 352 } 353 354 static void virtio_gpu_resource_unref(VirtIOGPU *g, 355 struct virtio_gpu_ctrl_command *cmd) 356 { 357 struct virtio_gpu_simple_resource *res; 358 struct virtio_gpu_resource_unref unref; 359 360 VIRTIO_GPU_FILL_CMD(unref); 361 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 362 363 res = virtio_gpu_find_resource(g, unref.resource_id); 364 if (!res) { 365 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 366 __func__, unref.resource_id); 367 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 368 return; 369 } 370 virtio_gpu_resource_destroy(g, res); 371 } 372 373 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 374 struct virtio_gpu_ctrl_command *cmd) 375 { 376 struct virtio_gpu_simple_resource *res; 377 int h; 378 uint32_t src_offset, dst_offset, stride; 379 int bpp; 380 pixman_format_code_t format; 381 struct virtio_gpu_transfer_to_host_2d t2d; 382 383 VIRTIO_GPU_FILL_CMD(t2d); 384 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 385 386 res = virtio_gpu_find_resource(g, t2d.resource_id); 387 if (!res || !res->iov) { 388 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 389 __func__, t2d.resource_id); 390 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 391 return; 392 } 393 394 if (t2d.r.x > res->width || 395 t2d.r.y > res->height || 396 t2d.r.width > res->width || 397 t2d.r.height > res->height || 398 t2d.r.x + t2d.r.width > res->width || 399 t2d.r.y + t2d.r.height > res->height) { 400 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 401 " bounds for resource %d: %d %d %d %d vs %d %d\n", 402 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 403 t2d.r.width, t2d.r.height, res->width, res->height); 404 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 405 return; 406 } 407 408 format = pixman_image_get_format(res->image); 409 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 410 stride = pixman_image_get_stride(res->image); 411 412 if (t2d.offset || t2d.r.x || t2d.r.y || 413 t2d.r.width != pixman_image_get_width(res->image)) { 414 void *img_data = pixman_image_get_data(res->image); 415 for (h = 0; h < t2d.r.height; h++) { 416 src_offset = t2d.offset + stride * h; 417 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 418 419 iov_to_buf(res->iov, res->iov_cnt, src_offset, 420 (uint8_t *)img_data 421 + dst_offset, t2d.r.width * bpp); 422 } 423 } else { 424 iov_to_buf(res->iov, res->iov_cnt, 0, 425 pixman_image_get_data(res->image), 426 pixman_image_get_stride(res->image) 427 * pixman_image_get_height(res->image)); 428 } 429 } 430 431 static void virtio_gpu_resource_flush(VirtIOGPU *g, 432 struct virtio_gpu_ctrl_command *cmd) 433 { 434 struct virtio_gpu_simple_resource *res; 435 struct virtio_gpu_resource_flush rf; 436 pixman_region16_t flush_region; 437 int i; 438 439 VIRTIO_GPU_FILL_CMD(rf); 440 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 441 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 442 443 res = virtio_gpu_find_resource(g, rf.resource_id); 444 if (!res) { 445 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 446 __func__, rf.resource_id); 447 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 448 return; 449 } 450 451 if (rf.r.x > res->width || 452 rf.r.y > res->height || 453 rf.r.width > res->width || 454 rf.r.height > res->height || 455 rf.r.x + rf.r.width > res->width || 456 rf.r.y + rf.r.height > res->height) { 457 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 458 " bounds for resource %d: %d %d %d %d vs %d %d\n", 459 __func__, rf.resource_id, rf.r.x, rf.r.y, 460 rf.r.width, rf.r.height, res->width, res->height); 461 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 462 return; 463 } 464 465 pixman_region_init_rect(&flush_region, 466 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 467 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUT; i++) { 468 struct virtio_gpu_scanout *scanout; 469 pixman_region16_t region, finalregion; 470 pixman_box16_t *extents; 471 472 if (!(res->scanout_bitmask & (1 << i))) { 473 continue; 474 } 475 scanout = &g->scanout[i]; 476 477 pixman_region_init(&finalregion); 478 pixman_region_init_rect(®ion, scanout->x, scanout->y, 479 scanout->width, scanout->height); 480 481 pixman_region_intersect(&finalregion, &flush_region, ®ion); 482 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 483 extents = pixman_region_extents(&finalregion); 484 /* work out the area we need to update for each console */ 485 dpy_gfx_update(g->scanout[i].con, 486 extents->x1, extents->y1, 487 extents->x2 - extents->x1, 488 extents->y2 - extents->y1); 489 490 pixman_region_fini(®ion); 491 pixman_region_fini(&finalregion); 492 } 493 pixman_region_fini(&flush_region); 494 } 495 496 static void virtio_gpu_set_scanout(VirtIOGPU *g, 497 struct virtio_gpu_ctrl_command *cmd) 498 { 499 struct virtio_gpu_simple_resource *res; 500 struct virtio_gpu_scanout *scanout; 501 pixman_format_code_t format; 502 uint32_t offset; 503 int bpp; 504 struct virtio_gpu_set_scanout ss; 505 506 VIRTIO_GPU_FILL_CMD(ss); 507 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 508 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 509 510 g->enable = 1; 511 if (ss.resource_id == 0) { 512 scanout = &g->scanout[ss.scanout_id]; 513 if (scanout->resource_id) { 514 res = virtio_gpu_find_resource(g, scanout->resource_id); 515 if (res) { 516 res->scanout_bitmask &= ~(1 << ss.scanout_id); 517 } 518 } 519 if (ss.scanout_id == 0 || 520 ss.scanout_id >= g->conf.max_outputs) { 521 qemu_log_mask(LOG_GUEST_ERROR, 522 "%s: illegal scanout id specified %d", 523 __func__, ss.scanout_id); 524 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 525 return; 526 } 527 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); 528 scanout->ds = NULL; 529 scanout->width = 0; 530 scanout->height = 0; 531 return; 532 } 533 534 /* create a surface for this scanout */ 535 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUT || 536 ss.scanout_id >= g->conf.max_outputs) { 537 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 538 __func__, ss.scanout_id); 539 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 540 return; 541 } 542 543 res = virtio_gpu_find_resource(g, ss.resource_id); 544 if (!res) { 545 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 546 __func__, ss.resource_id); 547 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 548 return; 549 } 550 551 if (ss.r.x > res->width || 552 ss.r.y > res->height || 553 ss.r.width > res->width || 554 ss.r.height > res->height || 555 ss.r.x + ss.r.width > res->width || 556 ss.r.y + ss.r.height > res->height) { 557 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 558 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 559 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 560 ss.r.width, ss.r.height, res->width, res->height); 561 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 562 return; 563 } 564 565 scanout = &g->scanout[ss.scanout_id]; 566 567 format = pixman_image_get_format(res->image); 568 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 569 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 570 if (!scanout->ds || surface_data(scanout->ds) 571 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 572 scanout->width != ss.r.width || 573 scanout->height != ss.r.height) { 574 /* realloc the surface ptr */ 575 scanout->ds = qemu_create_displaysurface_from 576 (ss.r.width, ss.r.height, format, 577 pixman_image_get_stride(res->image), 578 (uint8_t *)pixman_image_get_data(res->image) + offset); 579 if (!scanout->ds) { 580 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 581 return; 582 } 583 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 584 } 585 586 res->scanout_bitmask |= (1 << ss.scanout_id); 587 scanout->resource_id = ss.resource_id; 588 scanout->x = ss.r.x; 589 scanout->y = ss.r.y; 590 scanout->width = ss.r.width; 591 scanout->height = ss.r.height; 592 } 593 594 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 595 struct virtio_gpu_ctrl_command *cmd, 596 struct iovec **iov) 597 { 598 struct virtio_gpu_mem_entry *ents; 599 size_t esize, s; 600 int i; 601 602 if (ab->nr_entries > 16384) { 603 qemu_log_mask(LOG_GUEST_ERROR, 604 "%s: nr_entries is too big (%d > 16384)\n", 605 __func__, ab->nr_entries); 606 return -1; 607 } 608 609 esize = sizeof(*ents) * ab->nr_entries; 610 ents = g_malloc(esize); 611 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 612 sizeof(*ab), ents, esize); 613 if (s != esize) { 614 qemu_log_mask(LOG_GUEST_ERROR, 615 "%s: command data size incorrect %zu vs %zu\n", 616 __func__, s, esize); 617 g_free(ents); 618 return -1; 619 } 620 621 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 622 for (i = 0; i < ab->nr_entries; i++) { 623 hwaddr len = ents[i].length; 624 (*iov)[i].iov_len = ents[i].length; 625 (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); 626 if (!(*iov)[i].iov_base || len != ents[i].length) { 627 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 628 " resource %d element %d\n", 629 __func__, ab->resource_id, i); 630 virtio_gpu_cleanup_mapping_iov(*iov, i); 631 g_free(ents); 632 *iov = NULL; 633 return -1; 634 } 635 } 636 g_free(ents); 637 return 0; 638 } 639 640 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 641 { 642 int i; 643 644 for (i = 0; i < count; i++) { 645 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 646 iov[i].iov_len); 647 } 648 g_free(iov); 649 } 650 651 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 652 { 653 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 654 res->iov = NULL; 655 res->iov_cnt = 0; 656 } 657 658 static void 659 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 660 struct virtio_gpu_ctrl_command *cmd) 661 { 662 struct virtio_gpu_simple_resource *res; 663 struct virtio_gpu_resource_attach_backing ab; 664 int ret; 665 666 VIRTIO_GPU_FILL_CMD(ab); 667 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 668 669 res = virtio_gpu_find_resource(g, ab.resource_id); 670 if (!res) { 671 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 672 __func__, ab.resource_id); 673 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 674 return; 675 } 676 677 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->iov); 678 if (ret != 0) { 679 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 680 return; 681 } 682 683 res->iov_cnt = ab.nr_entries; 684 } 685 686 static void 687 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 688 struct virtio_gpu_ctrl_command *cmd) 689 { 690 struct virtio_gpu_simple_resource *res; 691 struct virtio_gpu_resource_detach_backing detach; 692 693 VIRTIO_GPU_FILL_CMD(detach); 694 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 695 696 res = virtio_gpu_find_resource(g, detach.resource_id); 697 if (!res || !res->iov) { 698 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 699 __func__, detach.resource_id); 700 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 701 return; 702 } 703 virtio_gpu_cleanup_mapping(res); 704 } 705 706 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 707 struct virtio_gpu_ctrl_command *cmd) 708 { 709 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 710 711 switch (cmd->cmd_hdr.type) { 712 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 713 virtio_gpu_get_display_info(g, cmd); 714 break; 715 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 716 virtio_gpu_resource_create_2d(g, cmd); 717 break; 718 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 719 virtio_gpu_resource_unref(g, cmd); 720 break; 721 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 722 virtio_gpu_resource_flush(g, cmd); 723 break; 724 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 725 virtio_gpu_transfer_to_host_2d(g, cmd); 726 break; 727 case VIRTIO_GPU_CMD_SET_SCANOUT: 728 virtio_gpu_set_scanout(g, cmd); 729 break; 730 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 731 virtio_gpu_resource_attach_backing(g, cmd); 732 break; 733 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 734 virtio_gpu_resource_detach_backing(g, cmd); 735 break; 736 default: 737 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 738 break; 739 } 740 if (!cmd->finished) { 741 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 742 VIRTIO_GPU_RESP_OK_NODATA); 743 } 744 } 745 746 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 747 { 748 VirtIOGPU *g = VIRTIO_GPU(vdev); 749 qemu_bh_schedule(g->ctrl_bh); 750 } 751 752 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 753 { 754 VirtIOGPU *g = VIRTIO_GPU(vdev); 755 qemu_bh_schedule(g->cursor_bh); 756 } 757 758 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 759 { 760 VirtIOGPU *g = VIRTIO_GPU(vdev); 761 struct virtio_gpu_ctrl_command *cmd; 762 763 if (!virtio_queue_ready(vq)) { 764 return; 765 } 766 767 #ifdef CONFIG_VIRGL 768 if (!g->renderer_inited && g->use_virgl_renderer) { 769 virtio_gpu_virgl_init(g); 770 g->renderer_inited = true; 771 } 772 #endif 773 774 cmd = g_new(struct virtio_gpu_ctrl_command, 1); 775 while (virtqueue_pop(vq, &cmd->elem)) { 776 cmd->vq = vq; 777 cmd->error = 0; 778 cmd->finished = false; 779 if (virtio_gpu_stats_enabled(g->conf)) { 780 g->stats.requests++; 781 } 782 783 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, 784 g, cmd); 785 if (!cmd->finished) { 786 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 787 g->inflight++; 788 if (virtio_gpu_stats_enabled(g->conf)) { 789 if (g->stats.max_inflight < g->inflight) { 790 g->stats.max_inflight = g->inflight; 791 } 792 fprintf(stderr, "inflight: %3d (+)\r", g->inflight); 793 } 794 cmd = g_new(struct virtio_gpu_ctrl_command, 1); 795 } 796 } 797 g_free(cmd); 798 799 #ifdef CONFIG_VIRGL 800 if (g->use_virgl_renderer) { 801 virtio_gpu_virgl_fence_poll(g); 802 } 803 #endif 804 } 805 806 static void virtio_gpu_ctrl_bh(void *opaque) 807 { 808 VirtIOGPU *g = opaque; 809 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 810 } 811 812 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 813 { 814 VirtIOGPU *g = VIRTIO_GPU(vdev); 815 VirtQueueElement elem; 816 size_t s; 817 struct virtio_gpu_update_cursor cursor_info; 818 819 if (!virtio_queue_ready(vq)) { 820 return; 821 } 822 while (virtqueue_pop(vq, &elem)) { 823 s = iov_to_buf(elem.out_sg, elem.out_num, 0, 824 &cursor_info, sizeof(cursor_info)); 825 if (s != sizeof(cursor_info)) { 826 qemu_log_mask(LOG_GUEST_ERROR, 827 "%s: cursor size incorrect %zu vs %zu\n", 828 __func__, s, sizeof(cursor_info)); 829 } else { 830 update_cursor(g, &cursor_info); 831 } 832 virtqueue_push(vq, &elem, 0); 833 virtio_notify(vdev, vq); 834 } 835 } 836 837 static void virtio_gpu_cursor_bh(void *opaque) 838 { 839 VirtIOGPU *g = opaque; 840 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 841 } 842 843 static void virtio_gpu_invalidate_display(void *opaque) 844 { 845 } 846 847 static void virtio_gpu_update_display(void *opaque) 848 { 849 } 850 851 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 852 { 853 } 854 855 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 856 { 857 VirtIOGPU *g = opaque; 858 859 if (idx > g->conf.max_outputs) { 860 return -1; 861 } 862 863 g->req_state[idx].x = info->xoff; 864 g->req_state[idx].y = info->yoff; 865 g->req_state[idx].width = info->width; 866 g->req_state[idx].height = info->height; 867 868 if (info->width && info->height) { 869 g->enabled_output_bitmask |= (1 << idx); 870 } else { 871 g->enabled_output_bitmask &= ~(1 << idx); 872 } 873 874 /* send event to guest */ 875 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 876 return 0; 877 } 878 879 const GraphicHwOps virtio_gpu_ops = { 880 .invalidate = virtio_gpu_invalidate_display, 881 .gfx_update = virtio_gpu_update_display, 882 .text_update = virtio_gpu_text_update, 883 .ui_info = virtio_gpu_ui_info, 884 }; 885 886 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 887 { 888 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 889 VirtIOGPU *g = VIRTIO_GPU(qdev); 890 bool have_virgl; 891 int i; 892 893 g->config_size = sizeof(struct virtio_gpu_config); 894 g->virtio_config.num_scanouts = g->conf.max_outputs; 895 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 896 g->config_size); 897 898 g->req_state[0].width = 1024; 899 g->req_state[0].height = 768; 900 901 g->use_virgl_renderer = false; 902 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) 903 have_virgl = false; 904 #else 905 have_virgl = display_opengl; 906 #endif 907 if (!have_virgl) { 908 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); 909 } 910 911 if (virtio_gpu_virgl_enabled(g->conf)) { 912 /* use larger control queue in 3d mode */ 913 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); 914 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 915 g->virtio_config.num_capsets = 1; 916 } else { 917 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 918 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 919 } 920 921 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 922 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 923 QTAILQ_INIT(&g->reslist); 924 QTAILQ_INIT(&g->fenceq); 925 926 g->enabled_output_bitmask = 1; 927 g->qdev = qdev; 928 929 for (i = 0; i < g->conf.max_outputs; i++) { 930 g->scanout[i].con = 931 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 932 if (i > 0) { 933 dpy_gfx_replace_surface(g->scanout[i].con, NULL); 934 } 935 } 936 } 937 938 static void virtio_gpu_instance_init(Object *obj) 939 { 940 } 941 942 static void virtio_gpu_reset(VirtIODevice *vdev) 943 { 944 VirtIOGPU *g = VIRTIO_GPU(vdev); 945 struct virtio_gpu_simple_resource *res, *tmp; 946 int i; 947 948 g->enable = 0; 949 950 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 951 virtio_gpu_resource_destroy(g, res); 952 } 953 for (i = 0; i < g->conf.max_outputs; i++) { 954 #if 0 955 g->req_state[i].x = 0; 956 g->req_state[i].y = 0; 957 if (i == 0) { 958 g->req_state[0].width = 1024; 959 g->req_state[0].height = 768; 960 } else { 961 g->req_state[i].width = 0; 962 g->req_state[i].height = 0; 963 } 964 #endif 965 g->scanout[i].resource_id = 0; 966 g->scanout[i].width = 0; 967 g->scanout[i].height = 0; 968 g->scanout[i].x = 0; 969 g->scanout[i].y = 0; 970 g->scanout[i].ds = NULL; 971 } 972 g->enabled_output_bitmask = 1; 973 974 #ifdef CONFIG_VIRGL 975 if (g->use_virgl_renderer) { 976 virtio_gpu_virgl_reset(g); 977 g->use_virgl_renderer = 0; 978 } 979 #endif 980 } 981 982 static Property virtio_gpu_properties[] = { 983 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), 984 #ifdef CONFIG_VIRGL 985 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, 986 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), 987 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, 988 VIRTIO_GPU_FLAG_STATS_ENABLED, false), 989 #endif 990 DEFINE_PROP_END_OF_LIST(), 991 }; 992 993 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 994 { 995 DeviceClass *dc = DEVICE_CLASS(klass); 996 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 997 998 vdc->realize = virtio_gpu_device_realize; 999 vdc->get_config = virtio_gpu_get_config; 1000 vdc->set_config = virtio_gpu_set_config; 1001 vdc->get_features = virtio_gpu_get_features; 1002 vdc->set_features = virtio_gpu_set_features; 1003 1004 vdc->reset = virtio_gpu_reset; 1005 1006 dc->props = virtio_gpu_properties; 1007 } 1008 1009 static const TypeInfo virtio_gpu_info = { 1010 .name = TYPE_VIRTIO_GPU, 1011 .parent = TYPE_VIRTIO_DEVICE, 1012 .instance_size = sizeof(VirtIOGPU), 1013 .instance_init = virtio_gpu_instance_init, 1014 .class_init = virtio_gpu_class_init, 1015 }; 1016 1017 static void virtio_register_types(void) 1018 { 1019 type_register_static(&virtio_gpu_info); 1020 } 1021 1022 type_init(virtio_register_types) 1023 1024 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 1025 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 1026 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 1027 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 1028 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 1029 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 1030 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 1031 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 1032 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 1033 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 1034 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 1035 1036 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); 1037 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); 1038 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); 1039 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); 1040 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); 1041 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); 1042 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); 1043 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); 1044 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); 1045 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); 1046