1 // SPDX-License-Identifier: GPL-2.0+ 2 3 #include <drm/drm_atomic_helper.h> 4 #include <drm/drm_simple_kms_helper.h> 5 #include <drm/drm_vblank.h> 6 7 #include "amdgpu.h" 8 #ifdef CONFIG_DRM_AMDGPU_SI 9 #include "dce_v6_0.h" 10 #endif 11 #ifdef CONFIG_DRM_AMDGPU_CIK 12 #include "dce_v8_0.h" 13 #endif 14 #include "dce_v10_0.h" 15 #include "dce_v11_0.h" 16 #include "ivsrcid/ivsrcid_vislands30.h" 17 #include "amdgpu_vkms.h" 18 #include "amdgpu_display.h" 19 20 /** 21 * DOC: amdgpu_vkms 22 * 23 * The amdgpu vkms interface provides a virtual KMS interface for several use 24 * cases: devices without display hardware, platforms where the actual display 25 * hardware is not useful (e.g., servers), SR-IOV virtual functions, device 26 * emulation/simulation, and device bring up prior to display hardware being 27 * usable. We previously emulated a legacy KMS interface, but there was a desire 28 * to move to the atomic KMS interface. The vkms driver did everything we 29 * needed, but we wanted KMS support natively in the driver without buffer 30 * sharing and the ability to support an instance of VKMS per device. We first 31 * looked at splitting vkms into a stub driver and a helper module that other 32 * drivers could use to implement a virtual display, but this strategy ended up 33 * being messy due to driver specific callbacks needed for buffer management. 34 * Ultimately, it proved easier to import the vkms code as it mostly used core 35 * drm helpers anyway. 36 */ 37 38 static const u32 amdgpu_vkms_formats[] = { 39 DRM_FORMAT_XRGB8888, 40 }; 41 42 static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) 43 { 44 struct amdgpu_vkms_output *output = container_of(timer, 45 struct amdgpu_vkms_output, 46 vblank_hrtimer); 47 struct drm_crtc *crtc = &output->crtc; 48 u64 ret_overrun; 49 bool ret; 50 51 ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, 52 output->period_ns); 53 WARN_ON(ret_overrun != 1); 54 55 ret = drm_crtc_handle_vblank(crtc); 56 if (!ret) 57 DRM_ERROR("amdgpu_vkms failure on handling vblank"); 58 59 return HRTIMER_RESTART; 60 } 61 62 static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc) 63 { 64 struct drm_device *dev = crtc->dev; 65 unsigned int pipe = drm_crtc_index(crtc); 66 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 67 struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); 68 69 drm_calc_timestamping_constants(crtc, &crtc->mode); 70 71 hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 72 out->vblank_hrtimer.function = &amdgpu_vkms_vblank_simulate; 73 out->period_ns = ktime_set(0, vblank->framedur_ns); 74 hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL); 75 76 return 0; 77 } 78 79 static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) 80 { 81 struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); 82 83 hrtimer_cancel(&out->vblank_hrtimer); 84 } 85 86 static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, 87 int *max_error, 88 ktime_t *vblank_time, 89 bool in_vblank_irq) 90 { 91 struct drm_device *dev = crtc->dev; 92 unsigned int pipe = crtc->index; 93 struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); 94 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 95 96 if (!READ_ONCE(vblank->enabled)) { 97 *vblank_time = ktime_get(); 98 return true; 99 } 100 101 *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires); 102 103 if (WARN_ON(*vblank_time == vblank->time)) 104 return true; 105 106 /* 107 * To prevent races we roll the hrtimer forward before we do any 108 * interrupt processing - this is how real hw works (the interrupt is 109 * only generated after all the vblank registers are updated) and what 110 * the vblank core expects. Therefore we need to always correct the 111 * timestampe by one frame. 112 */ 113 *vblank_time -= output->period_ns; 114 115 return true; 116 } 117 118 static const struct drm_crtc_funcs amdgpu_vkms_crtc_funcs = { 119 .set_config = drm_atomic_helper_set_config, 120 .destroy = drm_crtc_cleanup, 121 .page_flip = drm_atomic_helper_page_flip, 122 .reset = drm_atomic_helper_crtc_reset, 123 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 124 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 125 .enable_vblank = amdgpu_vkms_enable_vblank, 126 .disable_vblank = amdgpu_vkms_disable_vblank, 127 .get_vblank_timestamp = amdgpu_vkms_get_vblank_timestamp, 128 }; 129 130 static void amdgpu_vkms_crtc_atomic_enable(struct drm_crtc *crtc, 131 struct drm_atomic_state *state) 132 { 133 drm_crtc_vblank_on(crtc); 134 } 135 136 static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc, 137 struct drm_atomic_state *state) 138 { 139 drm_crtc_vblank_off(crtc); 140 } 141 142 static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc, 143 struct drm_atomic_state *state) 144 { 145 if (crtc->state->event) { 146 spin_lock(&crtc->dev->event_lock); 147 148 if (drm_crtc_vblank_get(crtc) != 0) 149 drm_crtc_send_vblank_event(crtc, crtc->state->event); 150 else 151 drm_crtc_arm_vblank_event(crtc, crtc->state->event); 152 153 spin_unlock(&crtc->dev->event_lock); 154 155 crtc->state->event = NULL; 156 } 157 } 158 159 static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = { 160 .atomic_flush = amdgpu_vkms_crtc_atomic_flush, 161 .atomic_enable = amdgpu_vkms_crtc_atomic_enable, 162 .atomic_disable = amdgpu_vkms_crtc_atomic_disable, 163 }; 164 165 static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, 166 struct drm_plane *primary, struct drm_plane *cursor) 167 { 168 int ret; 169 170 ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, 171 &amdgpu_vkms_crtc_funcs, NULL); 172 if (ret) { 173 DRM_ERROR("Failed to init CRTC\n"); 174 return ret; 175 } 176 177 drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs); 178 179 return ret; 180 } 181 182 static const struct drm_connector_funcs amdgpu_vkms_connector_funcs = { 183 .fill_modes = drm_helper_probe_single_connector_modes, 184 .destroy = drm_connector_cleanup, 185 .reset = drm_atomic_helper_connector_reset, 186 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 187 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 188 }; 189 190 static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector) 191 { 192 struct drm_device *dev = connector->dev; 193 struct drm_display_mode *mode = NULL; 194 unsigned i; 195 static const struct mode_size { 196 int w; 197 int h; 198 } common_modes[] = { 199 { 640, 480}, 200 { 720, 480}, 201 { 800, 600}, 202 { 848, 480}, 203 {1024, 768}, 204 {1152, 768}, 205 {1280, 720}, 206 {1280, 800}, 207 {1280, 854}, 208 {1280, 960}, 209 {1280, 1024}, 210 {1440, 900}, 211 {1400, 1050}, 212 {1680, 1050}, 213 {1600, 1200}, 214 {1920, 1080}, 215 {1920, 1200}, 216 {2560, 1440}, 217 {4096, 3112}, 218 {3656, 2664}, 219 {3840, 2160}, 220 {4096, 2160}, 221 }; 222 223 for (i = 0; i < ARRAY_SIZE(common_modes); i++) { 224 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); 225 drm_mode_probed_add(connector, mode); 226 } 227 228 drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); 229 230 return ARRAY_SIZE(common_modes); 231 } 232 233 static const struct drm_connector_helper_funcs amdgpu_vkms_conn_helper_funcs = { 234 .get_modes = amdgpu_vkms_conn_get_modes, 235 }; 236 237 static const struct drm_plane_funcs amdgpu_vkms_plane_funcs = { 238 .update_plane = drm_atomic_helper_update_plane, 239 .disable_plane = drm_atomic_helper_disable_plane, 240 .destroy = drm_plane_cleanup, 241 .reset = drm_atomic_helper_plane_reset, 242 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 243 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 244 }; 245 246 static void amdgpu_vkms_plane_atomic_update(struct drm_plane *plane, 247 struct drm_atomic_state *old_state) 248 { 249 return; 250 } 251 252 static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane, 253 struct drm_atomic_state *state) 254 { 255 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 256 plane); 257 struct drm_crtc_state *crtc_state; 258 int ret; 259 260 if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) 261 return 0; 262 263 crtc_state = drm_atomic_get_crtc_state(state, 264 new_plane_state->crtc); 265 if (IS_ERR(crtc_state)) 266 return PTR_ERR(crtc_state); 267 268 ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 269 DRM_PLANE_HELPER_NO_SCALING, 270 DRM_PLANE_HELPER_NO_SCALING, 271 false, true); 272 if (ret != 0) 273 return ret; 274 275 /* for now primary plane must be visible and full screen */ 276 if (!new_plane_state->visible) 277 return -EINVAL; 278 279 return 0; 280 } 281 282 static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, 283 struct drm_plane_state *new_state) 284 { 285 struct amdgpu_framebuffer *afb; 286 struct drm_gem_object *obj; 287 struct amdgpu_device *adev; 288 struct amdgpu_bo *rbo; 289 struct list_head list; 290 struct ttm_validate_buffer tv; 291 struct ww_acquire_ctx ticket; 292 uint32_t domain; 293 int r; 294 295 if (!new_state->fb) { 296 DRM_DEBUG_KMS("No FB bound\n"); 297 return 0; 298 } 299 afb = to_amdgpu_framebuffer(new_state->fb); 300 obj = new_state->fb->obj[0]; 301 rbo = gem_to_amdgpu_bo(obj); 302 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 303 INIT_LIST_HEAD(&list); 304 305 tv.bo = &rbo->tbo; 306 tv.num_shared = 1; 307 list_add(&tv.head, &list); 308 309 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); 310 if (r) { 311 dev_err(adev->dev, "fail to reserve bo (%d)\n", r); 312 return r; 313 } 314 315 if (plane->type != DRM_PLANE_TYPE_CURSOR) 316 domain = amdgpu_display_supported_domains(adev, rbo->flags); 317 else 318 domain = AMDGPU_GEM_DOMAIN_VRAM; 319 320 r = amdgpu_bo_pin(rbo, domain); 321 if (unlikely(r != 0)) { 322 if (r != -ERESTARTSYS) 323 DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 324 ttm_eu_backoff_reservation(&ticket, &list); 325 return r; 326 } 327 328 r = amdgpu_ttm_alloc_gart(&rbo->tbo); 329 if (unlikely(r != 0)) { 330 amdgpu_bo_unpin(rbo); 331 ttm_eu_backoff_reservation(&ticket, &list); 332 DRM_ERROR("%p bind failed\n", rbo); 333 return r; 334 } 335 336 ttm_eu_backoff_reservation(&ticket, &list); 337 338 afb->address = amdgpu_bo_gpu_offset(rbo); 339 340 amdgpu_bo_ref(rbo); 341 342 return 0; 343 } 344 345 static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane, 346 struct drm_plane_state *old_state) 347 { 348 struct amdgpu_bo *rbo; 349 int r; 350 351 if (!old_state->fb) 352 return; 353 354 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); 355 r = amdgpu_bo_reserve(rbo, false); 356 if (unlikely(r)) { 357 DRM_ERROR("failed to reserve rbo before unpin\n"); 358 return; 359 } 360 361 amdgpu_bo_unpin(rbo); 362 amdgpu_bo_unreserve(rbo); 363 amdgpu_bo_unref(&rbo); 364 } 365 366 static const struct drm_plane_helper_funcs amdgpu_vkms_primary_helper_funcs = { 367 .atomic_update = amdgpu_vkms_plane_atomic_update, 368 .atomic_check = amdgpu_vkms_plane_atomic_check, 369 .prepare_fb = amdgpu_vkms_prepare_fb, 370 .cleanup_fb = amdgpu_vkms_cleanup_fb, 371 }; 372 373 static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, 374 enum drm_plane_type type, 375 int index) 376 { 377 struct drm_plane *plane; 378 int ret; 379 380 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 381 if (!plane) 382 return ERR_PTR(-ENOMEM); 383 384 ret = drm_universal_plane_init(dev, plane, 1 << index, 385 &amdgpu_vkms_plane_funcs, 386 amdgpu_vkms_formats, 387 ARRAY_SIZE(amdgpu_vkms_formats), 388 NULL, type, NULL); 389 if (ret) { 390 kfree(plane); 391 return ERR_PTR(ret); 392 } 393 394 drm_plane_helper_add(plane, &amdgpu_vkms_primary_helper_funcs); 395 396 return plane; 397 } 398 399 int amdgpu_vkms_output_init(struct drm_device *dev, 400 struct amdgpu_vkms_output *output, int index) 401 { 402 struct drm_connector *connector = &output->connector; 403 struct drm_encoder *encoder = &output->encoder; 404 struct drm_crtc *crtc = &output->crtc; 405 struct drm_plane *primary, *cursor = NULL; 406 int ret; 407 408 primary = amdgpu_vkms_plane_init(dev, DRM_PLANE_TYPE_PRIMARY, index); 409 if (IS_ERR(primary)) 410 return PTR_ERR(primary); 411 412 ret = amdgpu_vkms_crtc_init(dev, crtc, primary, cursor); 413 if (ret) 414 goto err_crtc; 415 416 ret = drm_connector_init(dev, connector, &amdgpu_vkms_connector_funcs, 417 DRM_MODE_CONNECTOR_VIRTUAL); 418 if (ret) { 419 DRM_ERROR("Failed to init connector\n"); 420 goto err_connector; 421 } 422 423 drm_connector_helper_add(connector, &amdgpu_vkms_conn_helper_funcs); 424 425 ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL); 426 if (ret) { 427 DRM_ERROR("Failed to init encoder\n"); 428 goto err_encoder; 429 } 430 encoder->possible_crtcs = 1 << index; 431 432 ret = drm_connector_attach_encoder(connector, encoder); 433 if (ret) { 434 DRM_ERROR("Failed to attach connector to encoder\n"); 435 goto err_attach; 436 } 437 438 drm_mode_config_reset(dev); 439 440 return 0; 441 442 err_attach: 443 drm_encoder_cleanup(encoder); 444 445 err_encoder: 446 drm_connector_cleanup(connector); 447 448 err_connector: 449 drm_crtc_cleanup(crtc); 450 451 err_crtc: 452 drm_plane_cleanup(primary); 453 454 return ret; 455 } 456 457 const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = { 458 .fb_create = amdgpu_display_user_framebuffer_create, 459 .atomic_check = drm_atomic_helper_check, 460 .atomic_commit = drm_atomic_helper_commit, 461 }; 462 463 static int amdgpu_vkms_sw_init(void *handle) 464 { 465 int r, i; 466 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 467 468 adev_to_drm(adev)->max_vblank_count = 0; 469 470 adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs; 471 472 adev_to_drm(adev)->mode_config.max_width = XRES_MAX; 473 adev_to_drm(adev)->mode_config.max_height = YRES_MAX; 474 475 adev_to_drm(adev)->mode_config.preferred_depth = 24; 476 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 477 478 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; 479 480 r = amdgpu_display_modeset_create_props(adev); 481 if (r) 482 return r; 483 484 adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, sizeof(struct amdgpu_vkms_output), GFP_KERNEL); 485 if (!adev->amdgpu_vkms_output) 486 return -ENOMEM; 487 488 /* allocate crtcs, encoders, connectors */ 489 for (i = 0; i < adev->mode_info.num_crtc; i++) { 490 r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i); 491 if (r) 492 return r; 493 } 494 495 drm_kms_helper_poll_init(adev_to_drm(adev)); 496 497 adev->mode_info.mode_config_initialized = true; 498 return 0; 499 } 500 501 static int amdgpu_vkms_sw_fini(void *handle) 502 { 503 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 504 int i = 0; 505 506 for (i = 0; i < adev->mode_info.num_crtc; i++) 507 if (adev->mode_info.crtcs[i]) 508 hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); 509 510 kfree(adev->mode_info.bios_hardcoded_edid); 511 kfree(adev->amdgpu_vkms_output); 512 513 drm_kms_helper_poll_fini(adev_to_drm(adev)); 514 515 adev->mode_info.mode_config_initialized = false; 516 return 0; 517 } 518 519 static int amdgpu_vkms_hw_init(void *handle) 520 { 521 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 522 523 switch (adev->asic_type) { 524 #ifdef CONFIG_DRM_AMDGPU_SI 525 case CHIP_TAHITI: 526 case CHIP_PITCAIRN: 527 case CHIP_VERDE: 528 case CHIP_OLAND: 529 dce_v6_0_disable_dce(adev); 530 break; 531 #endif 532 #ifdef CONFIG_DRM_AMDGPU_CIK 533 case CHIP_BONAIRE: 534 case CHIP_HAWAII: 535 case CHIP_KAVERI: 536 case CHIP_KABINI: 537 case CHIP_MULLINS: 538 dce_v8_0_disable_dce(adev); 539 break; 540 #endif 541 case CHIP_FIJI: 542 case CHIP_TONGA: 543 dce_v10_0_disable_dce(adev); 544 break; 545 case CHIP_CARRIZO: 546 case CHIP_STONEY: 547 case CHIP_POLARIS10: 548 case CHIP_POLARIS11: 549 case CHIP_VEGAM: 550 dce_v11_0_disable_dce(adev); 551 break; 552 case CHIP_TOPAZ: 553 #ifdef CONFIG_DRM_AMDGPU_SI 554 case CHIP_HAINAN: 555 #endif 556 /* no DCE */ 557 break; 558 default: 559 break; 560 } 561 return 0; 562 } 563 564 static int amdgpu_vkms_hw_fini(void *handle) 565 { 566 return 0; 567 } 568 569 static int amdgpu_vkms_suspend(void *handle) 570 { 571 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 572 int r; 573 574 r = drm_mode_config_helper_suspend(adev_to_drm(adev)); 575 if (r) 576 return r; 577 return amdgpu_vkms_hw_fini(handle); 578 } 579 580 static int amdgpu_vkms_resume(void *handle) 581 { 582 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 583 int r; 584 585 r = amdgpu_vkms_hw_init(handle); 586 if (r) 587 return r; 588 return drm_mode_config_helper_resume(adev_to_drm(adev)); 589 } 590 591 static bool amdgpu_vkms_is_idle(void *handle) 592 { 593 return true; 594 } 595 596 static int amdgpu_vkms_wait_for_idle(void *handle) 597 { 598 return 0; 599 } 600 601 static int amdgpu_vkms_soft_reset(void *handle) 602 { 603 return 0; 604 } 605 606 static int amdgpu_vkms_set_clockgating_state(void *handle, 607 enum amd_clockgating_state state) 608 { 609 return 0; 610 } 611 612 static int amdgpu_vkms_set_powergating_state(void *handle, 613 enum amd_powergating_state state) 614 { 615 return 0; 616 } 617 618 static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { 619 .name = "amdgpu_vkms", 620 .early_init = NULL, 621 .late_init = NULL, 622 .sw_init = amdgpu_vkms_sw_init, 623 .sw_fini = amdgpu_vkms_sw_fini, 624 .hw_init = amdgpu_vkms_hw_init, 625 .hw_fini = amdgpu_vkms_hw_fini, 626 .suspend = amdgpu_vkms_suspend, 627 .resume = amdgpu_vkms_resume, 628 .is_idle = amdgpu_vkms_is_idle, 629 .wait_for_idle = amdgpu_vkms_wait_for_idle, 630 .soft_reset = amdgpu_vkms_soft_reset, 631 .set_clockgating_state = amdgpu_vkms_set_clockgating_state, 632 .set_powergating_state = amdgpu_vkms_set_powergating_state, 633 }; 634 635 const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = 636 { 637 .type = AMD_IP_BLOCK_TYPE_DCE, 638 .major = 1, 639 .minor = 0, 640 .rev = 0, 641 .funcs = &amdgpu_vkms_ip_funcs, 642 }; 643 644