1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu-common.h" 15 #include "qemu/iov.h" 16 #include "ui/console.h" 17 #include "trace.h" 18 #include "hw/virtio/virtio.h" 19 #include "hw/virtio/virtio-gpu.h" 20 #include "hw/virtio/virtio-bus.h" 21 22 static struct virtio_gpu_simple_resource* 23 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 24 25 static void update_cursor_data_simple(VirtIOGPU *g, 26 struct virtio_gpu_scanout *s, 27 uint32_t resource_id) 28 { 29 struct virtio_gpu_simple_resource *res; 30 uint32_t pixels; 31 32 res = virtio_gpu_find_resource(g, resource_id); 33 if (!res) { 34 return; 35 } 36 37 if (pixman_image_get_width(res->image) != s->current_cursor->width || 38 pixman_image_get_height(res->image) != s->current_cursor->height) { 39 return; 40 } 41 42 pixels = s->current_cursor->width * s->current_cursor->height; 43 memcpy(s->current_cursor->data, 44 pixman_image_get_data(res->image), 45 pixels * sizeof(uint32_t)); 46 } 47 48 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) 49 { 50 struct virtio_gpu_scanout *s; 51 52 if (cursor->pos.scanout_id >= g->conf.max_outputs) { 53 return; 54 } 55 s = &g->scanout[cursor->pos.scanout_id]; 56 57 if (cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR) { 58 if (!s->current_cursor) { 59 s->current_cursor = cursor_alloc(64, 64); 60 } 61 62 s->current_cursor->hot_x = cursor->hot_x; 63 s->current_cursor->hot_y = cursor->hot_y; 64 65 if (cursor->resource_id > 0) { 66 update_cursor_data_simple(g, s, cursor->resource_id); 67 } 68 dpy_cursor_define(s->con, s->current_cursor); 69 } 70 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, 71 cursor->resource_id ? 1 : 0); 72 } 73 74 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) 75 { 76 VirtIOGPU *g = VIRTIO_GPU(vdev); 77 memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); 78 } 79 80 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) 81 { 82 VirtIOGPU *g = VIRTIO_GPU(vdev); 83 struct virtio_gpu_config vgconfig; 84 85 memcpy(&vgconfig, config, sizeof(g->virtio_config)); 86 87 if (vgconfig.events_clear) { 88 g->virtio_config.events_read &= ~vgconfig.events_clear; 89 } 90 } 91 92 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, 93 Error **errp) 94 { 95 return features; 96 } 97 98 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) 99 { 100 g->virtio_config.events_read |= event_type; 101 virtio_notify_config(&g->parent_obj); 102 } 103 104 static struct virtio_gpu_simple_resource * 105 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) 106 { 107 struct virtio_gpu_simple_resource *res; 108 109 QTAILQ_FOREACH(res, &g->reslist, next) { 110 if (res->resource_id == resource_id) { 111 return res; 112 } 113 } 114 return NULL; 115 } 116 117 void virtio_gpu_ctrl_response(VirtIOGPU *g, 118 struct virtio_gpu_ctrl_command *cmd, 119 struct virtio_gpu_ctrl_hdr *resp, 120 size_t resp_len) 121 { 122 size_t s; 123 124 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { 125 resp->flags |= VIRTIO_GPU_FLAG_FENCE; 126 resp->fence_id = cmd->cmd_hdr.fence_id; 127 resp->ctx_id = cmd->cmd_hdr.ctx_id; 128 } 129 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); 130 if (s != resp_len) { 131 qemu_log_mask(LOG_GUEST_ERROR, 132 "%s: response size incorrect %zu vs %zu\n", 133 __func__, s, resp_len); 134 } 135 virtqueue_push(cmd->vq, &cmd->elem, s); 136 virtio_notify(VIRTIO_DEVICE(g), cmd->vq); 137 cmd->finished = true; 138 } 139 140 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 141 struct virtio_gpu_ctrl_command *cmd, 142 enum virtio_gpu_ctrl_type type) 143 { 144 struct virtio_gpu_ctrl_hdr resp; 145 146 memset(&resp, 0, sizeof(resp)); 147 resp.type = type; 148 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); 149 } 150 151 static void 152 virtio_gpu_fill_display_info(VirtIOGPU *g, 153 struct virtio_gpu_resp_display_info *dpy_info) 154 { 155 int i; 156 157 for (i = 0; i < g->conf.max_outputs; i++) { 158 if (g->enabled_output_bitmask & (1 << i)) { 159 dpy_info->pmodes[i].enabled = 1; 160 dpy_info->pmodes[i].r.width = g->req_state[i].width; 161 dpy_info->pmodes[i].r.height = g->req_state[i].height; 162 } 163 } 164 } 165 166 void virtio_gpu_get_display_info(VirtIOGPU *g, 167 struct virtio_gpu_ctrl_command *cmd) 168 { 169 struct virtio_gpu_resp_display_info display_info; 170 171 trace_virtio_gpu_cmd_get_display_info(); 172 memset(&display_info, 0, sizeof(display_info)); 173 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; 174 virtio_gpu_fill_display_info(g, &display_info); 175 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, 176 sizeof(display_info)); 177 } 178 179 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) 180 { 181 switch (virtio_gpu_format) { 182 #ifdef HOST_WORDS_BIGENDIAN 183 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 184 return PIXMAN_b8g8r8x8; 185 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 186 return PIXMAN_b8g8r8a8; 187 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 188 return PIXMAN_x8r8g8b8; 189 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 190 return PIXMAN_a8r8g8b8; 191 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 192 return PIXMAN_r8g8b8x8; 193 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 194 return PIXMAN_r8g8b8a8; 195 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 196 return PIXMAN_x8b8g8r8; 197 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 198 return PIXMAN_a8b8g8r8; 199 #else 200 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: 201 return PIXMAN_x8r8g8b8; 202 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: 203 return PIXMAN_a8r8g8b8; 204 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: 205 return PIXMAN_b8g8r8x8; 206 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: 207 return PIXMAN_b8g8r8a8; 208 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: 209 return PIXMAN_x8b8g8r8; 210 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: 211 return PIXMAN_a8b8g8r8; 212 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: 213 return PIXMAN_r8g8b8x8; 214 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: 215 return PIXMAN_r8g8b8a8; 216 #endif 217 default: 218 return 0; 219 } 220 } 221 222 static void virtio_gpu_resource_create_2d(VirtIOGPU *g, 223 struct virtio_gpu_ctrl_command *cmd) 224 { 225 pixman_format_code_t pformat; 226 struct virtio_gpu_simple_resource *res; 227 struct virtio_gpu_resource_create_2d c2d; 228 229 VIRTIO_GPU_FILL_CMD(c2d); 230 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, 231 c2d.width, c2d.height); 232 233 if (c2d.resource_id == 0) { 234 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", 235 __func__); 236 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 237 return; 238 } 239 240 res = virtio_gpu_find_resource(g, c2d.resource_id); 241 if (res) { 242 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", 243 __func__, c2d.resource_id); 244 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 245 return; 246 } 247 248 res = g_new0(struct virtio_gpu_simple_resource, 1); 249 250 res->width = c2d.width; 251 res->height = c2d.height; 252 res->format = c2d.format; 253 res->resource_id = c2d.resource_id; 254 255 pformat = get_pixman_format(c2d.format); 256 if (!pformat) { 257 qemu_log_mask(LOG_GUEST_ERROR, 258 "%s: host couldn't handle guest format %d\n", 259 __func__, c2d.format); 260 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 261 return; 262 } 263 res->image = pixman_image_create_bits(pformat, 264 c2d.width, 265 c2d.height, 266 NULL, 0); 267 268 if (!res->image) { 269 qemu_log_mask(LOG_GUEST_ERROR, 270 "%s: resource creation failed %d %d %d\n", 271 __func__, c2d.resource_id, c2d.width, c2d.height); 272 g_free(res); 273 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; 274 return; 275 } 276 277 QTAILQ_INSERT_HEAD(&g->reslist, res, next); 278 } 279 280 static void virtio_gpu_resource_destroy(VirtIOGPU *g, 281 struct virtio_gpu_simple_resource *res) 282 { 283 pixman_image_unref(res->image); 284 QTAILQ_REMOVE(&g->reslist, res, next); 285 g_free(res); 286 } 287 288 static void virtio_gpu_resource_unref(VirtIOGPU *g, 289 struct virtio_gpu_ctrl_command *cmd) 290 { 291 struct virtio_gpu_simple_resource *res; 292 struct virtio_gpu_resource_unref unref; 293 294 VIRTIO_GPU_FILL_CMD(unref); 295 trace_virtio_gpu_cmd_res_unref(unref.resource_id); 296 297 res = virtio_gpu_find_resource(g, unref.resource_id); 298 if (!res) { 299 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 300 __func__, unref.resource_id); 301 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 302 return; 303 } 304 virtio_gpu_resource_destroy(g, res); 305 } 306 307 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, 308 struct virtio_gpu_ctrl_command *cmd) 309 { 310 struct virtio_gpu_simple_resource *res; 311 int h; 312 uint32_t src_offset, dst_offset, stride; 313 int bpp; 314 pixman_format_code_t format; 315 struct virtio_gpu_transfer_to_host_2d t2d; 316 317 VIRTIO_GPU_FILL_CMD(t2d); 318 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); 319 320 res = virtio_gpu_find_resource(g, t2d.resource_id); 321 if (!res || !res->iov) { 322 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 323 __func__, t2d.resource_id); 324 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 325 return; 326 } 327 328 if (t2d.r.x > res->width || 329 t2d.r.y > res->height || 330 t2d.r.width > res->width || 331 t2d.r.height > res->height || 332 t2d.r.x + t2d.r.width > res->width || 333 t2d.r.y + t2d.r.height > res->height) { 334 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" 335 " bounds for resource %d: %d %d %d %d vs %d %d\n", 336 __func__, t2d.resource_id, t2d.r.x, t2d.r.y, 337 t2d.r.width, t2d.r.height, res->width, res->height); 338 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 339 return; 340 } 341 342 format = pixman_image_get_format(res->image); 343 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 344 stride = pixman_image_get_stride(res->image); 345 346 if (t2d.offset || t2d.r.x || t2d.r.y || 347 t2d.r.width != pixman_image_get_width(res->image)) { 348 void *img_data = pixman_image_get_data(res->image); 349 for (h = 0; h < t2d.r.height; h++) { 350 src_offset = t2d.offset + stride * h; 351 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); 352 353 iov_to_buf(res->iov, res->iov_cnt, src_offset, 354 (uint8_t *)img_data 355 + dst_offset, t2d.r.width * bpp); 356 } 357 } else { 358 iov_to_buf(res->iov, res->iov_cnt, 0, 359 pixman_image_get_data(res->image), 360 pixman_image_get_stride(res->image) 361 * pixman_image_get_height(res->image)); 362 } 363 } 364 365 static void virtio_gpu_resource_flush(VirtIOGPU *g, 366 struct virtio_gpu_ctrl_command *cmd) 367 { 368 struct virtio_gpu_simple_resource *res; 369 struct virtio_gpu_resource_flush rf; 370 pixman_region16_t flush_region; 371 int i; 372 373 VIRTIO_GPU_FILL_CMD(rf); 374 trace_virtio_gpu_cmd_res_flush(rf.resource_id, 375 rf.r.width, rf.r.height, rf.r.x, rf.r.y); 376 377 res = virtio_gpu_find_resource(g, rf.resource_id); 378 if (!res) { 379 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 380 __func__, rf.resource_id); 381 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 382 return; 383 } 384 385 if (rf.r.x > res->width || 386 rf.r.y > res->height || 387 rf.r.width > res->width || 388 rf.r.height > res->height || 389 rf.r.x + rf.r.width > res->width || 390 rf.r.y + rf.r.height > res->height) { 391 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" 392 " bounds for resource %d: %d %d %d %d vs %d %d\n", 393 __func__, rf.resource_id, rf.r.x, rf.r.y, 394 rf.r.width, rf.r.height, res->width, res->height); 395 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 396 return; 397 } 398 399 pixman_region_init_rect(&flush_region, 400 rf.r.x, rf.r.y, rf.r.width, rf.r.height); 401 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUT; i++) { 402 struct virtio_gpu_scanout *scanout; 403 pixman_region16_t region, finalregion; 404 pixman_box16_t *extents; 405 406 if (!(res->scanout_bitmask & (1 << i))) { 407 continue; 408 } 409 scanout = &g->scanout[i]; 410 411 pixman_region_init(&finalregion); 412 pixman_region_init_rect(®ion, scanout->x, scanout->y, 413 scanout->width, scanout->height); 414 415 pixman_region_intersect(&finalregion, &flush_region, ®ion); 416 pixman_region_translate(&finalregion, -scanout->x, -scanout->y); 417 extents = pixman_region_extents(&finalregion); 418 /* work out the area we need to update for each console */ 419 dpy_gfx_update(g->scanout[i].con, 420 extents->x1, extents->y1, 421 extents->x2 - extents->x1, 422 extents->y2 - extents->y1); 423 424 pixman_region_fini(®ion); 425 pixman_region_fini(&finalregion); 426 } 427 pixman_region_fini(&flush_region); 428 } 429 430 static void virtio_gpu_set_scanout(VirtIOGPU *g, 431 struct virtio_gpu_ctrl_command *cmd) 432 { 433 struct virtio_gpu_simple_resource *res; 434 struct virtio_gpu_scanout *scanout; 435 pixman_format_code_t format; 436 uint32_t offset; 437 int bpp; 438 struct virtio_gpu_set_scanout ss; 439 440 VIRTIO_GPU_FILL_CMD(ss); 441 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, 442 ss.r.width, ss.r.height, ss.r.x, ss.r.y); 443 444 g->enable = 1; 445 if (ss.resource_id == 0) { 446 scanout = &g->scanout[ss.scanout_id]; 447 if (scanout->resource_id) { 448 res = virtio_gpu_find_resource(g, scanout->resource_id); 449 if (res) { 450 res->scanout_bitmask &= ~(1 << ss.scanout_id); 451 } 452 } 453 if (ss.scanout_id == 0 || 454 ss.scanout_id >= g->conf.max_outputs) { 455 qemu_log_mask(LOG_GUEST_ERROR, 456 "%s: illegal scanout id specified %d", 457 __func__, ss.scanout_id); 458 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 459 return; 460 } 461 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); 462 scanout->ds = NULL; 463 scanout->width = 0; 464 scanout->height = 0; 465 return; 466 } 467 468 /* create a surface for this scanout */ 469 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUT || 470 ss.scanout_id >= g->conf.max_outputs) { 471 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", 472 __func__, ss.scanout_id); 473 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; 474 return; 475 } 476 477 res = virtio_gpu_find_resource(g, ss.resource_id); 478 if (!res) { 479 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 480 __func__, ss.resource_id); 481 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 482 return; 483 } 484 485 if (ss.r.x > res->width || 486 ss.r.y > res->height || 487 ss.r.width > res->width || 488 ss.r.height > res->height || 489 ss.r.x + ss.r.width > res->width || 490 ss.r.y + ss.r.height > res->height) { 491 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" 492 " resource %d, (%d,%d)+%d,%d vs %d %d\n", 493 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, 494 ss.r.width, ss.r.height, res->width, res->height); 495 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; 496 return; 497 } 498 499 scanout = &g->scanout[ss.scanout_id]; 500 501 format = pixman_image_get_format(res->image); 502 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; 503 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); 504 if (!scanout->ds || surface_data(scanout->ds) 505 != ((uint8_t *)pixman_image_get_data(res->image) + offset) || 506 scanout->width != ss.r.width || 507 scanout->height != ss.r.height) { 508 /* realloc the surface ptr */ 509 scanout->ds = qemu_create_displaysurface_from 510 (ss.r.width, ss.r.height, format, 511 pixman_image_get_stride(res->image), 512 (uint8_t *)pixman_image_get_data(res->image) + offset); 513 if (!scanout->ds) { 514 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 515 return; 516 } 517 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); 518 } 519 520 res->scanout_bitmask |= (1 << ss.scanout_id); 521 scanout->resource_id = ss.resource_id; 522 scanout->x = ss.r.x; 523 scanout->y = ss.r.y; 524 scanout->width = ss.r.width; 525 scanout->height = ss.r.height; 526 } 527 528 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, 529 struct virtio_gpu_ctrl_command *cmd, 530 struct iovec **iov) 531 { 532 struct virtio_gpu_mem_entry *ents; 533 size_t esize, s; 534 int i; 535 536 if (ab->nr_entries > 16384) { 537 qemu_log_mask(LOG_GUEST_ERROR, 538 "%s: nr_entries is too big (%d > 16384)\n", 539 __func__, ab->nr_entries); 540 return -1; 541 } 542 543 esize = sizeof(*ents) * ab->nr_entries; 544 ents = g_malloc(esize); 545 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 546 sizeof(*ab), ents, esize); 547 if (s != esize) { 548 qemu_log_mask(LOG_GUEST_ERROR, 549 "%s: command data size incorrect %zu vs %zu\n", 550 __func__, s, esize); 551 g_free(ents); 552 return -1; 553 } 554 555 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); 556 for (i = 0; i < ab->nr_entries; i++) { 557 hwaddr len = ents[i].length; 558 (*iov)[i].iov_len = ents[i].length; 559 (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); 560 if (!(*iov)[i].iov_base || len != ents[i].length) { 561 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" 562 " resource %d element %d\n", 563 __func__, ab->resource_id, i); 564 virtio_gpu_cleanup_mapping_iov(*iov, i); 565 g_free(ents); 566 g_free(*iov); 567 *iov = NULL; 568 return -1; 569 } 570 } 571 g_free(ents); 572 return 0; 573 } 574 575 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) 576 { 577 int i; 578 579 for (i = 0; i < count; i++) { 580 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, 581 iov[i].iov_len); 582 } 583 } 584 585 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) 586 { 587 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); 588 g_free(res->iov); 589 res->iov = NULL; 590 res->iov_cnt = 0; 591 } 592 593 static void 594 virtio_gpu_resource_attach_backing(VirtIOGPU *g, 595 struct virtio_gpu_ctrl_command *cmd) 596 { 597 struct virtio_gpu_simple_resource *res; 598 struct virtio_gpu_resource_attach_backing ab; 599 int ret; 600 601 VIRTIO_GPU_FILL_CMD(ab); 602 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); 603 604 res = virtio_gpu_find_resource(g, ab.resource_id); 605 if (!res) { 606 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 607 __func__, ab.resource_id); 608 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 609 return; 610 } 611 612 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->iov); 613 if (ret != 0) { 614 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 615 return; 616 } 617 618 res->iov_cnt = ab.nr_entries; 619 } 620 621 static void 622 virtio_gpu_resource_detach_backing(VirtIOGPU *g, 623 struct virtio_gpu_ctrl_command *cmd) 624 { 625 struct virtio_gpu_simple_resource *res; 626 struct virtio_gpu_resource_detach_backing detach; 627 628 VIRTIO_GPU_FILL_CMD(detach); 629 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); 630 631 res = virtio_gpu_find_resource(g, detach.resource_id); 632 if (!res || !res->iov) { 633 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", 634 __func__, detach.resource_id); 635 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; 636 return; 637 } 638 virtio_gpu_cleanup_mapping(res); 639 } 640 641 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, 642 struct virtio_gpu_ctrl_command *cmd) 643 { 644 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); 645 646 switch (cmd->cmd_hdr.type) { 647 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: 648 virtio_gpu_get_display_info(g, cmd); 649 break; 650 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: 651 virtio_gpu_resource_create_2d(g, cmd); 652 break; 653 case VIRTIO_GPU_CMD_RESOURCE_UNREF: 654 virtio_gpu_resource_unref(g, cmd); 655 break; 656 case VIRTIO_GPU_CMD_RESOURCE_FLUSH: 657 virtio_gpu_resource_flush(g, cmd); 658 break; 659 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: 660 virtio_gpu_transfer_to_host_2d(g, cmd); 661 break; 662 case VIRTIO_GPU_CMD_SET_SCANOUT: 663 virtio_gpu_set_scanout(g, cmd); 664 break; 665 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: 666 virtio_gpu_resource_attach_backing(g, cmd); 667 break; 668 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: 669 virtio_gpu_resource_detach_backing(g, cmd); 670 break; 671 default: 672 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; 673 break; 674 } 675 if (!cmd->finished) { 676 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : 677 VIRTIO_GPU_RESP_OK_NODATA); 678 } 679 } 680 681 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) 682 { 683 VirtIOGPU *g = VIRTIO_GPU(vdev); 684 qemu_bh_schedule(g->ctrl_bh); 685 } 686 687 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) 688 { 689 VirtIOGPU *g = VIRTIO_GPU(vdev); 690 qemu_bh_schedule(g->cursor_bh); 691 } 692 693 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 694 { 695 VirtIOGPU *g = VIRTIO_GPU(vdev); 696 struct virtio_gpu_ctrl_command *cmd; 697 698 if (!virtio_queue_ready(vq)) { 699 return; 700 } 701 702 cmd = g_new(struct virtio_gpu_ctrl_command, 1); 703 while (virtqueue_pop(vq, &cmd->elem)) { 704 cmd->vq = vq; 705 cmd->error = 0; 706 cmd->finished = false; 707 g->stats.requests++; 708 709 virtio_gpu_simple_process_cmd(g, cmd); 710 if (!cmd->finished) { 711 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); 712 g->stats.inflight++; 713 if (g->stats.max_inflight < g->stats.inflight) { 714 g->stats.max_inflight = g->stats.inflight; 715 } 716 fprintf(stderr, "inflight: %3d (+)\r", g->stats.inflight); 717 cmd = g_new(struct virtio_gpu_ctrl_command, 1); 718 } 719 } 720 g_free(cmd); 721 } 722 723 static void virtio_gpu_ctrl_bh(void *opaque) 724 { 725 VirtIOGPU *g = opaque; 726 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); 727 } 728 729 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) 730 { 731 VirtIOGPU *g = VIRTIO_GPU(vdev); 732 VirtQueueElement elem; 733 size_t s; 734 struct virtio_gpu_update_cursor cursor_info; 735 736 if (!virtio_queue_ready(vq)) { 737 return; 738 } 739 while (virtqueue_pop(vq, &elem)) { 740 s = iov_to_buf(elem.out_sg, elem.out_num, 0, 741 &cursor_info, sizeof(cursor_info)); 742 if (s != sizeof(cursor_info)) { 743 qemu_log_mask(LOG_GUEST_ERROR, 744 "%s: cursor size incorrect %zu vs %zu\n", 745 __func__, s, sizeof(cursor_info)); 746 } else { 747 update_cursor(g, &cursor_info); 748 } 749 virtqueue_push(vq, &elem, 0); 750 virtio_notify(vdev, vq); 751 } 752 } 753 754 static void virtio_gpu_cursor_bh(void *opaque) 755 { 756 VirtIOGPU *g = opaque; 757 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); 758 } 759 760 static void virtio_gpu_invalidate_display(void *opaque) 761 { 762 } 763 764 static void virtio_gpu_update_display(void *opaque) 765 { 766 } 767 768 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 769 { 770 } 771 772 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 773 { 774 VirtIOGPU *g = opaque; 775 776 if (idx > g->conf.max_outputs) { 777 return -1; 778 } 779 780 g->req_state[idx].x = info->xoff; 781 g->req_state[idx].y = info->yoff; 782 g->req_state[idx].width = info->width; 783 g->req_state[idx].height = info->height; 784 785 if (info->width && info->height) { 786 g->enabled_output_bitmask |= (1 << idx); 787 } else { 788 g->enabled_output_bitmask &= ~(1 << idx); 789 } 790 791 /* send event to guest */ 792 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 793 return 0; 794 } 795 796 const GraphicHwOps virtio_gpu_ops = { 797 .invalidate = virtio_gpu_invalidate_display, 798 .gfx_update = virtio_gpu_update_display, 799 .text_update = virtio_gpu_text_update, 800 .ui_info = virtio_gpu_ui_info, 801 }; 802 803 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) 804 { 805 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 806 VirtIOGPU *g = VIRTIO_GPU(qdev); 807 int i; 808 809 g->config_size = sizeof(struct virtio_gpu_config); 810 g->virtio_config.num_scanouts = g->conf.max_outputs; 811 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 812 g->config_size); 813 814 g->req_state[0].width = 1024; 815 g->req_state[0].height = 768; 816 817 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); 818 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); 819 820 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); 821 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); 822 QTAILQ_INIT(&g->reslist); 823 QTAILQ_INIT(&g->fenceq); 824 825 g->enabled_output_bitmask = 1; 826 g->qdev = qdev; 827 828 for (i = 0; i < g->conf.max_outputs; i++) { 829 g->scanout[i].con = 830 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 831 if (i > 0) { 832 dpy_gfx_replace_surface(g->scanout[i].con, NULL); 833 } 834 } 835 } 836 837 static void virtio_gpu_instance_init(Object *obj) 838 { 839 } 840 841 static void virtio_gpu_reset(VirtIODevice *vdev) 842 { 843 VirtIOGPU *g = VIRTIO_GPU(vdev); 844 struct virtio_gpu_simple_resource *res, *tmp; 845 int i; 846 847 g->enable = 0; 848 849 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { 850 virtio_gpu_resource_destroy(g, res); 851 } 852 for (i = 0; i < g->conf.max_outputs; i++) { 853 #if 0 854 g->req_state[i].x = 0; 855 g->req_state[i].y = 0; 856 if (i == 0) { 857 g->req_state[0].width = 1024; 858 g->req_state[0].height = 768; 859 } else { 860 g->req_state[i].width = 0; 861 g->req_state[i].height = 0; 862 } 863 #endif 864 g->scanout[i].resource_id = 0; 865 g->scanout[i].width = 0; 866 g->scanout[i].height = 0; 867 g->scanout[i].x = 0; 868 g->scanout[i].y = 0; 869 g->scanout[i].ds = NULL; 870 } 871 g->enabled_output_bitmask = 1; 872 } 873 874 static Property virtio_gpu_properties[] = { 875 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), 876 DEFINE_PROP_END_OF_LIST(), 877 }; 878 879 static void virtio_gpu_class_init(ObjectClass *klass, void *data) 880 { 881 DeviceClass *dc = DEVICE_CLASS(klass); 882 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 883 884 vdc->realize = virtio_gpu_device_realize; 885 vdc->get_config = virtio_gpu_get_config; 886 vdc->set_config = virtio_gpu_set_config; 887 vdc->get_features = virtio_gpu_get_features; 888 889 vdc->reset = virtio_gpu_reset; 890 891 dc->props = virtio_gpu_properties; 892 } 893 894 static const TypeInfo virtio_gpu_info = { 895 .name = TYPE_VIRTIO_GPU, 896 .parent = TYPE_VIRTIO_DEVICE, 897 .instance_size = sizeof(VirtIOGPU), 898 .instance_init = virtio_gpu_instance_init, 899 .class_init = virtio_gpu_class_init, 900 }; 901 902 static void virtio_register_types(void) 903 { 904 type_register_static(&virtio_gpu_info); 905 } 906 907 type_init(virtio_register_types) 908 909 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 910 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 911 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 912 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 913 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 914 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 915 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 916 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 917 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 918 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 919 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 920