1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu-common.h" 15 #include "qemu/iov.h" 16 #include "ui/console.h" 17 #include "trace.h" 18 #include "hw/virtio/virtio.h" 19 #include "hw/virtio/virtio-gpu.h" 20 #include "hw/virtio/virtio-bus.h" 21 22 static struct virtio_gpu_simple_resource* 23 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 24 25 static void update_cursor_data_simple(VirtIOGPU *g, 26 struct virtio_gpu_scanout *s, 27 uint32_t resource_id) 28 { 29 struct virtio_gpu_simple_resource *res; 30 uint32_t pixels; 31 32 res = virtio_gpu_find_resource(g, resource_id); 33 if (!res) { 34 return; 35 } 36 37 if (pixman_image_get_width(res->image) != s->current_cursor->width || 38 pixman_image_get_height(res->image) != s->current_cursor->height) { 39 return; 40 } 41 42 pixels = s->current_cursor->width * s->current_cursor->height; 43 memcpy(s->current_cursor->data, 44 pixman_image_get_data(res->image), 45 pixels * sizeof(uint32_t)); 46 } 47 48 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 49 { 50 struct virtio_gpu_scanout *s; 51 52 if (cursor->pos.scanout_id >= g->conf.max_outputs) { 53 return; 54 } 55 s = &g->scanout[cursor->pos.scanout_id]; 56 57 if (cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR) { 58 if (!s->current_cursor) { 59 s->current_cursor = cursor_alloc(64, 64); 60 } 61 62 s->current_cursor->hot_x = cursor->hot_x; 63 s->current_cursor->hot_y = cursor->hot_y; 64 65 if (cursor->resource_id > 0) { 66 update_cursor_data_simple(g, s, cursor->resource_id); 67 } 68 dpy_cursor_define(s->con, s->current_cursor); 69 } 70 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 71 cursor->resource_id ? 1 : 0); 72 } 73 74 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 75 { 76 VirtIOGPU *g = VIRTIO_GPU(vdev); 77 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 78 } 79 80 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 81 { 82 VirtIOGPU *g = VIRTIO_GPU(vdev); 83 struct virtio_gpu_config vgconfig; 84 85 memcpy(&vgconfig, config, sizeof(g->virtio_config)); 86 87 if (vgconfig.events_clear) { 88 g->virtio_config.events_read &= ~vgconfig.events_clear; 89 } 90 } 91 92 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features) 93 { 94 return features; 95 } 96 97 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 98 { 99 g->virtio_config.events_read |= event_type; 100 virtio_notify_config(&g->parent_obj); 101 } 102 103 static struct virtio_gpu_simple_resource * 104 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 105 { 106 struct virtio_gpu_simple_resource *res; 107 108 QTAILQ_FOREACH(res, &g->reslist, next) { 109 if (res->resource_id == resource_id) { 110 return res; 111 } 112 } 113 return NULL; 114 } 115 116 void virtio_gpu_ctrl_response(VirtIOGPU *g, 117 struct virtio_gpu_ctrl_command *cmd, 118 struct virtio_gpu_ctrl_hdr *resp, 119 size_t resp_len) 120 { 121 size_t s; 122 123 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 124 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 125 resp->fence_id = cmd->cmd_hdr.fence_id; 126 resp->ctx_id = cmd->cmd_hdr.ctx_id; 127 } 128 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 129 if (s != resp_len) { 130 qemu_log_mask(LOG_GUEST_ERROR, 131 "%s: response size incorrect %zu vs %zu\n", 132 __func__, s, resp_len); 133 } 134 virtqueue_push(cmd->vq, &cmd->elem, s); 135 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 136 cmd->finished = true; 137 } 138 139 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 140 struct virtio_gpu_ctrl_command *cmd, 141 enum virtio_gpu_ctrl_type type) 142 { 143 struct virtio_gpu_ctrl_hdr resp; 144 145 memset(&resp, 0, sizeof(resp)); 146 resp.type = type; 147 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 148 } 149 150 static void 151 virtio_gpu_fill_display_info(VirtIOGPU *g, 152 struct virtio_gpu_resp_display_info *dpy_info) 153 { 154 int i; 155 156 for (i = 0; i < g->conf.max_outputs; i++) { 157 if (g->enabled_output_bitmask & (1 << i)) { 158 dpy_info->pmodes[i].enabled = 1; 159 dpy_info->pmodes[i].r.width = g->req_state[i].width; 160 dpy_info->pmodes[i].r.height = g->req_state[i].height; 161 } 162 } 163 } 164 165 void virtio_gpu_get_display_info(VirtIOGPU *g, 166 struct virtio_gpu_ctrl_command *cmd) 167 { 168 struct virtio_gpu_resp_display_info display_info; 169 170 trace_virtio_gpu_cmd_get_display_info(); 171 memset(&display_info, 0, sizeof(display_info)); 172 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 173 virtio_gpu_fill_display_info(g, &display_info); 174 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 175 sizeof(display_info)); 176 } 177 178 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 179 { 180 switch (virtio_gpu_format) { 181 #ifdef HOST_WORDS_BIGENDIAN 182 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 183 return PIXMAN_b8g8r8x8; 184 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 185 return PIXMAN_b8g8r8a8; 186 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 187 return PIXMAN_x8r8g8b8; 188 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 189 return PIXMAN_a8r8g8b8; 190 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 191 return PIXMAN_r8g8b8x8; 192 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 193 return PIXMAN_r8g8b8a8; 194 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 195 return PIXMAN_x8b8g8r8; 196 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 197 return PIXMAN_a8b8g8r8; 198 #else 199 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 200 return PIXMAN_x8r8g8b8; 201 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 202 return PIXMAN_a8r8g8b8; 203 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 204 return PIXMAN_b8g8r8x8; 205 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 206 return PIXMAN_b8g8r8a8; 207 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 208 return PIXMAN_x8b8g8r8; 209 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 210 return PIXMAN_a8b8g8r8; 211 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 212 return PIXMAN_r8g8b8x8; 213 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 214 return PIXMAN_r8g8b8a8; 215 #endif 216 default: 217 return 0; 218 } 219 } 220 221 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 222 struct virtio_gpu_ctrl_command *cmd) 223 { 224 pixman_format_code_t pformat; 225 struct virtio_gpu_simple_resource *res; 226 struct virtio_gpu_resource_create_2d c2d; 227 228 VIRTIO_GPU_FILL_CMD(c2d); 229 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 230 c2d.width, c2d.height); 231 232 if (c2d.resource_id == 0) { 233 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 234 __func__); 235 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 236 return; 237 } 238 239 res = virtio_gpu_find_resource(g, c2d.resource_id); 240 if (res) { 241 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 242 __func__, c2d.resource_id); 243 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 244 return; 245 } 246 247 res = g_new0(struct virtio_gpu_simple_resource, 1); 248 249 res->width = c2d.width; 250 res->height = c2d.height; 251 res->format = c2d.format; 252 res->resource_id = c2d.resource_id; 253 254 pformat = get_pixman_format(c2d.format); 255 if (!pformat) { 256 qemu_log_mask(LOG_GUEST_ERROR, 257 "%s: host couldn't handle guest format %d\n", 258 __func__, c2d.format); 259 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 260 return; 261 } 262 res->image = pixman_image_create_bits(pformat, 263 c2d.width, 264 c2d.height, 265 NULL, 0); 266 267 if (!res->image) { 268 qemu_log_mask(LOG_GUEST_ERROR, 269 "%s: resource creation failed %d %d %d\n", 270 __func__, c2d.resource_id, c2d.width, c2d.height); 271 g_free(res); 272 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 273 return; 274 } 275 276 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 277 } 278 279 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 280 struct virtio_gpu_simple_resource *res) 281 { 282 pixman_image_unref(res->image); 283 QTAILQ_REMOVE(&g->reslist, res, next); 284 g_free(res); 285 } 286 287 static void virtio_gpu_resource_unref(VirtIOGPU *g, 288 struct virtio_gpu_ctrl_command *cmd) 289 { 290 struct virtio_gpu_simple_resource *res; 291 struct virtio_gpu_resource_unref unref; 292 293 VIRTIO_GPU_FILL_CMD(unref); 294 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 295 296 res = virtio_gpu_find_resource(g, unref.resource_id); 297 if (!res) { 298 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 299 __func__, unref.resource_id); 300 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 301 return; 302 } 303 virtio_gpu_resource_destroy(g, res); 304 } 305 306 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 307 struct virtio_gpu_ctrl_command *cmd) 308 { 309 struct virtio_gpu_simple_resource *res; 310 int h; 311 uint32_t src_offset, dst_offset, stride; 312 int bpp; 313 pixman_format_code_t format; 314 struct virtio_gpu_transfer_to_host_2d t2d; 315 316 VIRTIO_GPU_FILL_CMD(t2d); 317 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 318 319 res = virtio_gpu_find_resource(g, t2d.resource_id); 320 if (!res || !res->iov) { 321 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 322 __func__, t2d.resource_id); 323 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 324 return; 325 } 326 327 if (t2d.r.x > res->width || 328 t2d.r.y > res->height || 329 t2d.r.width > res->width || 330 t2d.r.height > res->height || 331 t2d.r.x + t2d.r.width > res->width || 332 t2d.r.y + t2d.r.height > res->height) { 333 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 334 " bounds for resource %d: %d %d %d %d vs %d %d\n", 335 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 336 t2d.r.width, t2d.r.height, res->width, res->height); 337 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 338 return; 339 } 340 341 format = pixman_image_get_format(res->image); 342 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 343 stride = pixman_image_get_stride(res->image); 344 345 if (t2d.offset || t2d.r.x || t2d.r.y || 346 t2d.r.width != pixman_image_get_width(res->image)) { 347 void *img_data = pixman_image_get_data(res->image); 348 for (h = 0; h < t2d.r.height; h++) { 349 src_offset = t2d.offset + stride * h; 350 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 351 352 iov_to_buf(res->iov, res->iov_cnt, src_offset, 353 (uint8_t *)img_data 354 + dst_offset, t2d.r.width * bpp); 355 } 356 } else { 357 iov_to_buf(res->iov, res->iov_cnt, 0, 358 pixman_image_get_data(res->image), 359 pixman_image_get_stride(res->image) 360 * pixman_image_get_height(res->image)); 361 } 362 } 363 364 static void virtio_gpu_resource_flush(VirtIOGPU *g, 365 struct virtio_gpu_ctrl_command *cmd) 366 { 367 struct virtio_gpu_simple_resource *res; 368 struct virtio_gpu_resource_flush rf; 369 pixman_region16_t flush_region; 370 int i; 371 372 VIRTIO_GPU_FILL_CMD(rf); 373 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 374 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 375 376 res = virtio_gpu_find_resource(g, rf.resource_id); 377 if (!res) { 378 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 379 __func__, rf.resource_id); 380 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 381 return; 382 } 383 384 if (rf.r.x > res->width || 385 rf.r.y > res->height || 386 rf.r.width > res->width || 387 rf.r.height > res->height || 388 rf.r.x + rf.r.width > res->width || 389 rf.r.y + rf.r.height > res->height) { 390 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 391 " bounds for resource %d: %d %d %d %d vs %d %d\n", 392 __func__, rf.resource_id, rf.r.x, rf.r.y, 393 rf.r.width, rf.r.height, res->width, res->height); 394 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 395 return; 396 } 397 398 pixman_region_init_rect(&flush_region, 399 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 400 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUT; i++) { 401 struct virtio_gpu_scanout *scanout; 402 pixman_region16_t region, finalregion; 403 pixman_box16_t *extents; 404 405 if (!(res->scanout_bitmask & (1 << i))) { 406 continue; 407 } 408 scanout = &g->scanout[i]; 409 410 pixman_region_init(&finalregion); 411 pixman_region_init_rect(®ion, scanout->x, scanout->y, 412 scanout->width, scanout->height); 413 414 pixman_region_intersect(&finalregion, &flush_region, ®ion); 415 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 416 extents = pixman_region_extents(&finalregion); 417 /* work out the area we need to update for each console */ 418 dpy_gfx_update(g->scanout[i].con, 419 extents->x1, extents->y1, 420 extents->x2 - extents->x1, 421 extents->y2 - extents->y1); 422 423 pixman_region_fini(®ion); 424 pixman_region_fini(&finalregion); 425 } 426 pixman_region_fini(&flush_region); 427 } 428 429 static void virtio_gpu_set_scanout(VirtIOGPU *g, 430 struct virtio_gpu_ctrl_command *cmd) 431 { 432 struct virtio_gpu_simple_resource *res; 433 struct virtio_gpu_scanout *scanout; 434 pixman_format_code_t format; 435 uint32_t offset; 436 int bpp; 437 struct virtio_gpu_set_scanout ss; 438 439 VIRTIO_GPU_FILL_CMD(ss); 440 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 441 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 442 443 g->enable = 1; 444 if (ss.resource_id == 0) { 445 scanout = &g->scanout[ss.scanout_id]; 446 if (scanout->resource_id) { 447 res = virtio_gpu_find_resource(g, scanout->resource_id); 448 if (res) { 449 res->scanout_bitmask &= ~(1 << ss.scanout_id); 450 } 451 } 452 if (ss.scanout_id == 0 || 453 ss.scanout_id >= g->conf.max_outputs) { 454 qemu_log_mask(LOG_GUEST_ERROR, 455 "%s: illegal scanout id specified %d", 456 __func__, ss.scanout_id); 457 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 458 return; 459 } 460 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); 461 scanout->ds = NULL; 462 scanout->width = 0; 463 scanout->height = 0; 464 return; 465 } 466 467 /* create a surface for this scanout */ 468 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUT || 469 ss.scanout_id >= g->conf.max_outputs) { 470 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 471 __func__, ss.scanout_id); 472 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 473 return; 474 } 475 476 res = virtio_gpu_find_resource(g, ss.resource_id); 477 if (!res) { 478 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 479 __func__, ss.resource_id); 480 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 481 return; 482 } 483 484 if (ss.r.x > res->width || 485 ss.r.y > res->height || 486 ss.r.width > res->width || 487 ss.r.height > res->height || 488 ss.r.x + ss.r.width > res->width || 489 ss.r.y + ss.r.height > res->height) { 490 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 491 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 492 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 493 ss.r.width, ss.r.height, res->width, res->height); 494 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 495 return; 496 } 497 498 scanout = &g->scanout[ss.scanout_id]; 499 500 format = pixman_image_get_format(res->image); 501 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 502 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 503 if (!scanout->ds || surface_data(scanout->ds) 504 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 505 scanout->width != ss.r.width || 506 scanout->height != ss.r.height) { 507 /* realloc the surface ptr */ 508 scanout->ds = qemu_create_displaysurface_from 509 (ss.r.width, ss.r.height, format, 510 pixman_image_get_stride(res->image), 511 (uint8_t *)pixman_image_get_data(res->image) + offset); 512 if (!scanout->ds) { 513 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 514 return; 515 } 516 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 517 } 518 519 res->scanout_bitmask |= (1 << ss.scanout_id); 520 scanout->resource_id = ss.resource_id; 521 scanout->x = ss.r.x; 522 scanout->y = ss.r.y; 523 scanout->width = ss.r.width; 524 scanout->height = ss.r.height; 525 } 526 527 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 528 struct virtio_gpu_ctrl_command *cmd, 529 struct iovec **iov) 530 { 531 struct virtio_gpu_mem_entry *ents; 532 size_t esize, s; 533 int i; 534 535 if (ab->nr_entries > 16384) { 536 qemu_log_mask(LOG_GUEST_ERROR, 537 "%s: nr_entries is too big (%d > 16384)\n", 538 __func__, ab->nr_entries); 539 return -1; 540 } 541 542 esize = sizeof(*ents) * ab->nr_entries; 543 ents = g_malloc(esize); 544 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 545 sizeof(*ab), ents, esize); 546 if (s != esize) { 547 qemu_log_mask(LOG_GUEST_ERROR, 548 "%s: command data size incorrect %zu vs %zu\n", 549 __func__, s, esize); 550 g_free(ents); 551 return -1; 552 } 553 554 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 555 for (i = 0; i < ab->nr_entries; i++) { 556 hwaddr len = ents[i].length; 557 (*iov)[i].iov_len = ents[i].length; 558 (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); 559 if (!(*iov)[i].iov_base || len != ents[i].length) { 560 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 561 " resource %d element %d\n", 562 __func__, ab->resource_id, i); 563 virtio_gpu_cleanup_mapping_iov(*iov, i); 564 g_free(ents); 565 g_free(*iov); 566 *iov = NULL; 567 return -1; 568 } 569 } 570 g_free(ents); 571 return 0; 572 } 573 574 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 575 { 576 int i; 577 578 for (i = 0; i < count; i++) { 579 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 580 iov[i].iov_len); 581 } 582 } 583 584 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 585 { 586 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 587 g_free(res->iov); 588 res->iov = NULL; 589 res->iov_cnt = 0; 590 } 591 592 static void 593 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 594 struct virtio_gpu_ctrl_command *cmd) 595 { 596 struct virtio_gpu_simple_resource *res; 597 struct virtio_gpu_resource_attach_backing ab; 598 int ret; 599 600 VIRTIO_GPU_FILL_CMD(ab); 601 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 602 603 res = virtio_gpu_find_resource(g, ab.resource_id); 604 if (!res) { 605 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 606 __func__, ab.resource_id); 607 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 608 return; 609 } 610 611 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->iov); 612 if (ret != 0) { 613 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 614 return; 615 } 616 617 res->iov_cnt = ab.nr_entries; 618 } 619 620 static void 621 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 622 struct virtio_gpu_ctrl_command *cmd) 623 { 624 struct virtio_gpu_simple_resource *res; 625 struct virtio_gpu_resource_detach_backing detach; 626 627 VIRTIO_GPU_FILL_CMD(detach); 628 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 629 630 res = virtio_gpu_find_resource(g, detach.resource_id); 631 if (!res || !res->iov) { 632 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 633 __func__, detach.resource_id); 634 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 635 return; 636 } 637 virtio_gpu_cleanup_mapping(res); 638 } 639 640 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 641 struct virtio_gpu_ctrl_command *cmd) 642 { 643 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 644 645 switch (cmd->cmd_hdr.type) { 646 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 647 virtio_gpu_get_display_info(g, cmd); 648 break; 649 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 650 virtio_gpu_resource_create_2d(g, cmd); 651 break; 652 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 653 virtio_gpu_resource_unref(g, cmd); 654 break; 655 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 656 virtio_gpu_resource_flush(g, cmd); 657 break; 658 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 659 virtio_gpu_transfer_to_host_2d(g, cmd); 660 break; 661 case VIRTIO_GPU_CMD_SET_SCANOUT: 662 virtio_gpu_set_scanout(g, cmd); 663 break; 664 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 665 virtio_gpu_resource_attach_backing(g, cmd); 666 break; 667 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 668 virtio_gpu_resource_detach_backing(g, cmd); 669 break; 670 default: 671 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 672 break; 673 } 674 if (!cmd->finished) { 675 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 676 VIRTIO_GPU_RESP_OK_NODATA); 677 } 678 } 679 680 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 681 { 682 VirtIOGPU *g = VIRTIO_GPU(vdev); 683 qemu_bh_schedule(g->ctrl_bh); 684 } 685 686 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 687 { 688 VirtIOGPU *g = VIRTIO_GPU(vdev); 689 qemu_bh_schedule(g->cursor_bh); 690 } 691 692 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 693 { 694 VirtIOGPU *g = VIRTIO_GPU(vdev); 695 struct virtio_gpu_ctrl_command *cmd; 696 697 if (!virtio_queue_ready(vq)) { 698 return; 699 } 700 701 cmd = g_new(struct virtio_gpu_ctrl_command, 1); 702 while (virtqueue_pop(vq, &cmd->elem)) { 703 cmd->vq = vq; 704 cmd->error = 0; 705 cmd->finished = false; 706 g->stats.requests++; 707 708 virtio_gpu_simple_process_cmd(g, cmd); 709 if (!cmd->finished) { 710 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 711 g->stats.inflight++; 712 if (g->stats.max_inflight < g->stats.inflight) { 713 g->stats.max_inflight = g->stats.inflight; 714 } 715 fprintf(stderr, "inflight: %3d (+)\r", g->stats.inflight); 716 cmd = g_new(struct virtio_gpu_ctrl_command, 1); 717 } 718 } 719 g_free(cmd); 720 } 721 722 static void virtio_gpu_ctrl_bh(void *opaque) 723 { 724 VirtIOGPU *g = opaque; 725 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 726 } 727 728 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 729 { 730 VirtIOGPU *g = VIRTIO_GPU(vdev); 731 VirtQueueElement elem; 732 size_t s; 733 struct virtio_gpu_update_cursor cursor_info; 734 735 if (!virtio_queue_ready(vq)) { 736 return; 737 } 738 while (virtqueue_pop(vq, &elem)) { 739 s = iov_to_buf(elem.out_sg, elem.out_num, 0, 740 &cursor_info, sizeof(cursor_info)); 741 if (s != sizeof(cursor_info)) { 742 qemu_log_mask(LOG_GUEST_ERROR, 743 "%s: cursor size incorrect %zu vs %zu\n", 744 __func__, s, sizeof(cursor_info)); 745 } else { 746 update_cursor(g, &cursor_info); 747 } 748 virtqueue_push(vq, &elem, 0); 749 virtio_notify(vdev, vq); 750 } 751 } 752 753 static void virtio_gpu_cursor_bh(void *opaque) 754 { 755 VirtIOGPU *g = opaque; 756 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 757 } 758 759 static void virtio_gpu_invalidate_display(void *opaque) 760 { 761 } 762 763 static void virtio_gpu_update_display(void *opaque) 764 { 765 } 766 767 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 768 { 769 } 770 771 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 772 { 773 VirtIOGPU *g = opaque; 774 775 if (idx > g->conf.max_outputs) { 776 return -1; 777 } 778 779 g->req_state[idx].x = info->xoff; 780 g->req_state[idx].y = info->yoff; 781 g->req_state[idx].width = info->width; 782 g->req_state[idx].height = info->height; 783 784 if (info->width && info->height) { 785 g->enabled_output_bitmask |= (1 << idx); 786 } else { 787 g->enabled_output_bitmask &= ~(1 << idx); 788 } 789 790 /* send event to guest */ 791 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 792 return 0; 793 } 794 795 const GraphicHwOps virtio_gpu_ops = { 796 .invalidate = virtio_gpu_invalidate_display, 797 .gfx_update = virtio_gpu_update_display, 798 .text_update = virtio_gpu_text_update, 799 .ui_info = virtio_gpu_ui_info, 800 }; 801 802 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 803 { 804 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 805 VirtIOGPU *g = VIRTIO_GPU(qdev); 806 int i; 807 808 g->config_size = sizeof(struct virtio_gpu_config); 809 g->virtio_config.num_scanouts = g->conf.max_outputs; 810 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 811 g->config_size); 812 813 g->req_state[0].width = 1024; 814 g->req_state[0].height = 768; 815 816 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 817 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 818 819 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 820 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 821 QTAILQ_INIT(&g->reslist); 822 QTAILQ_INIT(&g->fenceq); 823 824 g->enabled_output_bitmask = 1; 825 g->qdev = qdev; 826 827 for (i = 0; i < g->conf.max_outputs; i++) { 828 g->scanout[i].con = 829 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 830 if (i > 0) { 831 dpy_gfx_replace_surface(g->scanout[i].con, NULL); 832 } 833 } 834 } 835 836 static void virtio_gpu_instance_init(Object *obj) 837 { 838 } 839 840 static void virtio_gpu_reset(VirtIODevice *vdev) 841 { 842 VirtIOGPU *g = VIRTIO_GPU(vdev); 843 struct virtio_gpu_simple_resource *res, *tmp; 844 int i; 845 846 g->enable = 0; 847 848 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 849 virtio_gpu_resource_destroy(g, res); 850 } 851 for (i = 0; i < g->conf.max_outputs; i++) { 852 #if 0 853 g->req_state[i].x = 0; 854 g->req_state[i].y = 0; 855 if (i == 0) { 856 g->req_state[0].width = 1024; 857 g->req_state[0].height = 768; 858 } else { 859 g->req_state[i].width = 0; 860 g->req_state[i].height = 0; 861 } 862 #endif 863 g->scanout[i].resource_id = 0; 864 g->scanout[i].width = 0; 865 g->scanout[i].height = 0; 866 g->scanout[i].x = 0; 867 g->scanout[i].y = 0; 868 g->scanout[i].ds = NULL; 869 } 870 g->enabled_output_bitmask = 1; 871 } 872 873 static Property virtio_gpu_properties[] = { 874 DEFINE_VIRTIO_GPU_PROPERTIES(VirtIOGPU, conf), 875 DEFINE_PROP_END_OF_LIST(), 876 }; 877 878 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 879 { 880 DeviceClass *dc = DEVICE_CLASS(klass); 881 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 882 883 vdc->realize = virtio_gpu_device_realize; 884 vdc->get_config = virtio_gpu_get_config; 885 vdc->set_config = virtio_gpu_set_config; 886 vdc->get_features = virtio_gpu_get_features; 887 888 vdc->reset = virtio_gpu_reset; 889 890 dc->props = virtio_gpu_properties; 891 } 892 893 static const TypeInfo virtio_gpu_info = { 894 .name = TYPE_VIRTIO_GPU, 895 .parent = TYPE_VIRTIO_DEVICE, 896 .instance_size = sizeof(VirtIOGPU), 897 .instance_init = virtio_gpu_instance_init, 898 .class_init = virtio_gpu_class_init, 899 }; 900 901 static void virtio_register_types(void) 902 { 903 type_register_static(&virtio_gpu_info); 904 } 905 906 type_init(virtio_register_types) 907 908 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 909 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 910 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 911 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 912 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 913 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 914 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 915 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 916 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 917 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 918 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 919