1 /* 2 * Copyright 2007-8 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: Dave Airlie 24 * Alex Deucher 25 */ 26 #include <drm/drmP.h> 27 #include <drm/radeon_drm.h> 28 #include "radeon.h" 29 30 #include "atom.h" 31 #include <asm/div64.h> 32 33 #include <linux/pm_runtime.h> 34 #include <drm/drm_crtc_helper.h> 35 #include <drm/drm_edid.h> 36 37 #include <linux/gcd.h> 38 39 static void avivo_crtc_load_lut(struct drm_crtc *crtc) 40 { 41 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 42 struct drm_device *dev = crtc->dev; 43 struct radeon_device *rdev = dev->dev_private; 44 int i; 45 46 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); 47 WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0); 48 49 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); 50 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); 51 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); 52 53 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); 54 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 55 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 56 57 WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id); 58 WREG32(AVIVO_DC_LUT_RW_MODE, 0); 59 WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f); 60 61 WREG8(AVIVO_DC_LUT_RW_INDEX, 0); 62 for (i = 0; i < 256; i++) { 63 WREG32(AVIVO_DC_LUT_30_COLOR, 64 (radeon_crtc->lut_r[i] << 20) | 65 (radeon_crtc->lut_g[i] << 10) | 66 (radeon_crtc->lut_b[i] << 0)); 67 } 68 69 /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */ 70 WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1); 71 } 72 73 static void dce4_crtc_load_lut(struct drm_crtc *crtc) 74 { 75 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 76 struct drm_device *dev = crtc->dev; 77 struct radeon_device *rdev = dev->dev_private; 78 int i; 79 80 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); 81 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); 82 83 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); 84 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); 85 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); 86 87 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); 88 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 89 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 90 91 WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); 92 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); 93 94 WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); 95 for (i = 0; i < 256; i++) { 96 WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, 97 (radeon_crtc->lut_r[i] << 20) | 98 (radeon_crtc->lut_g[i] << 10) | 99 (radeon_crtc->lut_b[i] << 0)); 100 } 101 } 102 103 static void dce5_crtc_load_lut(struct drm_crtc *crtc) 104 { 105 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 106 struct drm_device *dev = crtc->dev; 107 struct radeon_device *rdev = dev->dev_private; 108 int i; 109 110 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); 111 112 WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, 113 (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | 114 NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); 115 WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset, 116 NI_GRPH_PRESCALE_BYPASS); 117 WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset, 118 NI_OVL_PRESCALE_BYPASS); 119 WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset, 120 (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) | 121 NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT))); 122 123 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); 124 125 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); 126 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); 127 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); 128 129 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); 130 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 131 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 132 133 WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); 134 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); 135 136 WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); 137 for (i = 0; i < 256; i++) { 138 WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, 139 (radeon_crtc->lut_r[i] << 20) | 140 (radeon_crtc->lut_g[i] << 10) | 141 (radeon_crtc->lut_b[i] << 0)); 142 } 143 144 WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset, 145 (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 146 NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 147 NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | 148 NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS))); 149 WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset, 150 (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) | 151 NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS))); 152 WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset, 153 (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | 154 NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); 155 WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset, 156 (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) | 157 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); 158 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 159 WREG32(0x6940 + radeon_crtc->crtc_offset, 0); 160 if (ASIC_IS_DCE8(rdev)) { 161 /* XXX this only needs to be programmed once per crtc at startup, 162 * not sure where the best place for it is 163 */ 164 WREG32(CIK_ALPHA_CONTROL + radeon_crtc->crtc_offset, 165 CIK_CURSOR_ALPHA_BLND_ENA); 166 } 167 } 168 169 static void legacy_crtc_load_lut(struct drm_crtc *crtc) 170 { 171 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 172 struct drm_device *dev = crtc->dev; 173 struct radeon_device *rdev = dev->dev_private; 174 int i; 175 uint32_t dac2_cntl; 176 177 dac2_cntl = RREG32(RADEON_DAC_CNTL2); 178 if (radeon_crtc->crtc_id == 0) 179 dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL; 180 else 181 dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL; 182 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 183 184 WREG8(RADEON_PALETTE_INDEX, 0); 185 for (i = 0; i < 256; i++) { 186 WREG32(RADEON_PALETTE_30_DATA, 187 (radeon_crtc->lut_r[i] << 20) | 188 (radeon_crtc->lut_g[i] << 10) | 189 (radeon_crtc->lut_b[i] << 0)); 190 } 191 } 192 193 void radeon_crtc_load_lut(struct drm_crtc *crtc) 194 { 195 struct drm_device *dev = crtc->dev; 196 struct radeon_device *rdev = dev->dev_private; 197 198 if (!crtc->enabled) 199 return; 200 201 if (ASIC_IS_DCE5(rdev)) 202 dce5_crtc_load_lut(crtc); 203 else if (ASIC_IS_DCE4(rdev)) 204 dce4_crtc_load_lut(crtc); 205 else if (ASIC_IS_AVIVO(rdev)) 206 avivo_crtc_load_lut(crtc); 207 else 208 legacy_crtc_load_lut(crtc); 209 } 210 211 /** Sets the color ramps on behalf of fbcon */ 212 void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 213 u16 blue, int regno) 214 { 215 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 216 217 radeon_crtc->lut_r[regno] = red >> 6; 218 radeon_crtc->lut_g[regno] = green >> 6; 219 radeon_crtc->lut_b[regno] = blue >> 6; 220 } 221 222 /** Gets the color ramps on behalf of fbcon */ 223 void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 224 u16 *blue, int regno) 225 { 226 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 227 228 *red = radeon_crtc->lut_r[regno] << 6; 229 *green = radeon_crtc->lut_g[regno] << 6; 230 *blue = radeon_crtc->lut_b[regno] << 6; 231 } 232 233 static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 234 u16 *blue, uint32_t start, uint32_t size) 235 { 236 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 237 int end = (start + size > 256) ? 256 : start + size, i; 238 239 /* userspace palettes are always correct as is */ 240 for (i = start; i < end; i++) { 241 radeon_crtc->lut_r[i] = red[i] >> 6; 242 radeon_crtc->lut_g[i] = green[i] >> 6; 243 radeon_crtc->lut_b[i] = blue[i] >> 6; 244 } 245 radeon_crtc_load_lut(crtc); 246 } 247 248 static void radeon_crtc_destroy(struct drm_crtc *crtc) 249 { 250 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 251 252 drm_crtc_cleanup(crtc); 253 destroy_workqueue(radeon_crtc->flip_queue); 254 kfree(radeon_crtc); 255 } 256 257 /** 258 * radeon_unpin_work_func - unpin old buffer object 259 * 260 * @__work - kernel work item 261 * 262 * Unpin the old frame buffer object outside of the interrupt handler 263 */ 264 static void radeon_unpin_work_func(struct work_struct *__work) 265 { 266 struct radeon_flip_work *work = 267 container_of(__work, struct radeon_flip_work, unpin_work); 268 int r; 269 270 /* unpin of the old buffer */ 271 r = radeon_bo_reserve(work->old_rbo, false); 272 if (likely(r == 0)) { 273 r = radeon_bo_unpin(work->old_rbo); 274 if (unlikely(r != 0)) { 275 DRM_ERROR("failed to unpin buffer after flip\n"); 276 } 277 radeon_bo_unreserve(work->old_rbo); 278 } else 279 DRM_ERROR("failed to reserve buffer after flip\n"); 280 281 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 282 kfree(work); 283 } 284 285 void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) 286 { 287 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 288 unsigned long flags; 289 u32 update_pending; 290 int vpos, hpos; 291 292 /* can happen during initialization */ 293 if (radeon_crtc == NULL) 294 return; 295 296 /* Skip the pageflip completion check below (based on polling) on 297 * asics which reliably support hw pageflip completion irqs. pflip 298 * irqs are a reliable and race-free method of handling pageflip 299 * completion detection. A use_pflipirq module parameter < 2 allows 300 * to override this in case of asics with faulty pflip irqs. 301 * A module parameter of 0 would only use this polling based path, 302 * a parameter of 1 would use pflip irq only as a backup to this 303 * path, as in Linux 3.16. 304 */ 305 if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev)) 306 return; 307 308 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 309 if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { 310 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " 311 "RADEON_FLIP_SUBMITTED(%d)\n", 312 radeon_crtc->flip_status, 313 RADEON_FLIP_SUBMITTED); 314 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 315 return; 316 } 317 318 update_pending = radeon_page_flip_pending(rdev, crtc_id); 319 320 /* Has the pageflip already completed in crtc, or is it certain 321 * to complete in this vblank? 322 */ 323 if (update_pending && 324 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0, 325 &vpos, &hpos, NULL, NULL)) && 326 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || 327 (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) { 328 /* crtc didn't flip in this target vblank interval, 329 * but flip is pending in crtc. Based on the current 330 * scanout position we know that the current frame is 331 * (nearly) complete and the flip will (likely) 332 * complete before the start of the next frame. 333 */ 334 update_pending = 0; 335 } 336 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 337 if (!update_pending) 338 radeon_crtc_handle_flip(rdev, crtc_id); 339 } 340 341 /** 342 * radeon_crtc_handle_flip - page flip completed 343 * 344 * @rdev: radeon device pointer 345 * @crtc_id: crtc number this event is for 346 * 347 * Called when we are sure that a page flip for this crtc is completed. 348 */ 349 void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) 350 { 351 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 352 struct radeon_flip_work *work; 353 unsigned long flags; 354 355 /* this can happen at init */ 356 if (radeon_crtc == NULL) 357 return; 358 359 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 360 work = radeon_crtc->flip_work; 361 if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { 362 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " 363 "RADEON_FLIP_SUBMITTED(%d)\n", 364 radeon_crtc->flip_status, 365 RADEON_FLIP_SUBMITTED); 366 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 367 return; 368 } 369 370 /* Pageflip completed. Clean up. */ 371 radeon_crtc->flip_status = RADEON_FLIP_NONE; 372 radeon_crtc->flip_work = NULL; 373 374 /* wakeup userspace */ 375 if (work->event) 376 drm_send_vblank_event(rdev->ddev, crtc_id, work->event); 377 378 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 379 380 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); 381 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); 382 queue_work(radeon_crtc->flip_queue, &work->unpin_work); 383 } 384 385 /** 386 * radeon_flip_work_func - page flip framebuffer 387 * 388 * @work - kernel work item 389 * 390 * Wait for the buffer object to become idle and do the actual page flip 391 */ 392 static void radeon_flip_work_func(struct work_struct *__work) 393 { 394 struct radeon_flip_work *work = 395 container_of(__work, struct radeon_flip_work, flip_work); 396 struct radeon_device *rdev = work->rdev; 397 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; 398 399 struct drm_crtc *crtc = &radeon_crtc->base; 400 unsigned long flags; 401 int r; 402 403 down_read(&rdev->exclusive_lock); 404 if (work->fence) { 405 struct radeon_fence *fence; 406 407 fence = to_radeon_fence(work->fence); 408 if (fence && fence->rdev == rdev) { 409 r = radeon_fence_wait(fence, false); 410 if (r == -EDEADLK) { 411 up_read(&rdev->exclusive_lock); 412 do { 413 r = radeon_gpu_reset(rdev); 414 } while (r == -EAGAIN); 415 down_read(&rdev->exclusive_lock); 416 } 417 } else 418 r = fence_wait(work->fence, false); 419 420 if (r) 421 DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); 422 423 /* We continue with the page flip even if we failed to wait on 424 * the fence, otherwise the DRM core and userspace will be 425 * confused about which BO the CRTC is scanning out 426 */ 427 428 fence_put(work->fence); 429 work->fence = NULL; 430 } 431 432 /* We borrow the event spin lock for protecting flip_status */ 433 spin_lock_irqsave(&crtc->dev->event_lock, flags); 434 435 /* set the proper interrupt */ 436 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); 437 438 /* do the flip (mmio) */ 439 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); 440 441 radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; 442 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 443 up_read(&rdev->exclusive_lock); 444 } 445 446 static int radeon_crtc_page_flip(struct drm_crtc *crtc, 447 struct drm_framebuffer *fb, 448 struct drm_pending_vblank_event *event, 449 uint32_t page_flip_flags) 450 { 451 struct drm_device *dev = crtc->dev; 452 struct radeon_device *rdev = dev->dev_private; 453 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 454 struct radeon_framebuffer *old_radeon_fb; 455 struct radeon_framebuffer *new_radeon_fb; 456 struct drm_gem_object *obj; 457 struct radeon_flip_work *work; 458 struct radeon_bo *new_rbo; 459 uint32_t tiling_flags, pitch_pixels; 460 uint64_t base; 461 unsigned long flags; 462 int r; 463 464 work = kzalloc(sizeof *work, GFP_KERNEL); 465 if (work == NULL) 466 return -ENOMEM; 467 468 INIT_WORK(&work->flip_work, radeon_flip_work_func); 469 INIT_WORK(&work->unpin_work, radeon_unpin_work_func); 470 471 work->rdev = rdev; 472 work->crtc_id = radeon_crtc->crtc_id; 473 work->event = event; 474 475 /* schedule unpin of the old buffer */ 476 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); 477 obj = old_radeon_fb->obj; 478 479 /* take a reference to the old object */ 480 drm_gem_object_reference(obj); 481 work->old_rbo = gem_to_radeon_bo(obj); 482 483 new_radeon_fb = to_radeon_framebuffer(fb); 484 obj = new_radeon_fb->obj; 485 new_rbo = gem_to_radeon_bo(obj); 486 487 /* pin the new buffer */ 488 DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n", 489 work->old_rbo, new_rbo); 490 491 r = radeon_bo_reserve(new_rbo, false); 492 if (unlikely(r != 0)) { 493 DRM_ERROR("failed to reserve new rbo buffer before flip\n"); 494 goto cleanup; 495 } 496 /* Only 27 bit offset for legacy CRTC */ 497 r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM, 498 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); 499 if (unlikely(r != 0)) { 500 radeon_bo_unreserve(new_rbo); 501 r = -EINVAL; 502 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 503 goto cleanup; 504 } 505 work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); 506 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); 507 radeon_bo_unreserve(new_rbo); 508 509 if (!ASIC_IS_AVIVO(rdev)) { 510 /* crtc offset is from display base addr not FB location */ 511 base -= radeon_crtc->legacy_display_base_addr; 512 pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8); 513 514 if (tiling_flags & RADEON_TILING_MACRO) { 515 if (ASIC_IS_R300(rdev)) { 516 base &= ~0x7ff; 517 } else { 518 int byteshift = fb->bits_per_pixel >> 4; 519 int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11; 520 base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8); 521 } 522 } else { 523 int offset = crtc->y * pitch_pixels + crtc->x; 524 switch (fb->bits_per_pixel) { 525 case 8: 526 default: 527 offset *= 1; 528 break; 529 case 15: 530 case 16: 531 offset *= 2; 532 break; 533 case 24: 534 offset *= 3; 535 break; 536 case 32: 537 offset *= 4; 538 break; 539 } 540 base += offset; 541 } 542 base &= ~7; 543 } 544 work->base = base; 545 546 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); 547 if (r) { 548 DRM_ERROR("failed to get vblank before flip\n"); 549 goto pflip_cleanup; 550 } 551 552 /* We borrow the event spin lock for protecting flip_work */ 553 spin_lock_irqsave(&crtc->dev->event_lock, flags); 554 555 if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { 556 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 557 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 558 r = -EBUSY; 559 goto vblank_cleanup; 560 } 561 radeon_crtc->flip_status = RADEON_FLIP_PENDING; 562 radeon_crtc->flip_work = work; 563 564 /* update crtc fb */ 565 crtc->primary->fb = fb; 566 567 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 568 569 queue_work(radeon_crtc->flip_queue, &work->flip_work); 570 return 0; 571 572 vblank_cleanup: 573 drm_vblank_put(crtc->dev, radeon_crtc->crtc_id); 574 575 pflip_cleanup: 576 if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { 577 DRM_ERROR("failed to reserve new rbo in error path\n"); 578 goto cleanup; 579 } 580 if (unlikely(radeon_bo_unpin(new_rbo) != 0)) { 581 DRM_ERROR("failed to unpin new rbo in error path\n"); 582 } 583 radeon_bo_unreserve(new_rbo); 584 585 cleanup: 586 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 587 fence_put(work->fence); 588 kfree(work); 589 return r; 590 } 591 592 static int 593 radeon_crtc_set_config(struct drm_mode_set *set) 594 { 595 struct drm_device *dev; 596 struct radeon_device *rdev; 597 struct drm_crtc *crtc; 598 bool active = false; 599 int ret; 600 601 if (!set || !set->crtc) 602 return -EINVAL; 603 604 dev = set->crtc->dev; 605 606 ret = pm_runtime_get_sync(dev->dev); 607 if (ret < 0) 608 return ret; 609 610 ret = drm_crtc_helper_set_config(set); 611 612 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 613 if (crtc->enabled) 614 active = true; 615 616 pm_runtime_mark_last_busy(dev->dev); 617 618 rdev = dev->dev_private; 619 /* if we have active crtcs and we don't have a power ref, 620 take the current one */ 621 if (active && !rdev->have_disp_power_ref) { 622 rdev->have_disp_power_ref = true; 623 return ret; 624 } 625 /* if we have no active crtcs, then drop the power ref 626 we got before */ 627 if (!active && rdev->have_disp_power_ref) { 628 pm_runtime_put_autosuspend(dev->dev); 629 rdev->have_disp_power_ref = false; 630 } 631 632 /* drop the power reference we got coming in here */ 633 pm_runtime_put_autosuspend(dev->dev); 634 return ret; 635 } 636 static const struct drm_crtc_funcs radeon_crtc_funcs = { 637 .cursor_set = radeon_crtc_cursor_set, 638 .cursor_move = radeon_crtc_cursor_move, 639 .gamma_set = radeon_crtc_gamma_set, 640 .set_config = radeon_crtc_set_config, 641 .destroy = radeon_crtc_destroy, 642 .page_flip = radeon_crtc_page_flip, 643 }; 644 645 static void radeon_crtc_init(struct drm_device *dev, int index) 646 { 647 struct radeon_device *rdev = dev->dev_private; 648 struct radeon_crtc *radeon_crtc; 649 int i; 650 651 radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 652 if (radeon_crtc == NULL) 653 return; 654 655 drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs); 656 657 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); 658 radeon_crtc->crtc_id = index; 659 radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc"); 660 rdev->mode_info.crtcs[index] = radeon_crtc; 661 662 if (rdev->family >= CHIP_BONAIRE) { 663 radeon_crtc->max_cursor_width = CIK_CURSOR_WIDTH; 664 radeon_crtc->max_cursor_height = CIK_CURSOR_HEIGHT; 665 } else { 666 radeon_crtc->max_cursor_width = CURSOR_WIDTH; 667 radeon_crtc->max_cursor_height = CURSOR_HEIGHT; 668 } 669 dev->mode_config.cursor_width = radeon_crtc->max_cursor_width; 670 dev->mode_config.cursor_height = radeon_crtc->max_cursor_height; 671 672 #if 0 673 radeon_crtc->mode_set.crtc = &radeon_crtc->base; 674 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); 675 radeon_crtc->mode_set.num_connectors = 0; 676 #endif 677 678 for (i = 0; i < 256; i++) { 679 radeon_crtc->lut_r[i] = i << 2; 680 radeon_crtc->lut_g[i] = i << 2; 681 radeon_crtc->lut_b[i] = i << 2; 682 } 683 684 if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)) 685 radeon_atombios_init_crtc(dev, radeon_crtc); 686 else 687 radeon_legacy_init_crtc(dev, radeon_crtc); 688 } 689 690 static const char *encoder_names[38] = { 691 "NONE", 692 "INTERNAL_LVDS", 693 "INTERNAL_TMDS1", 694 "INTERNAL_TMDS2", 695 "INTERNAL_DAC1", 696 "INTERNAL_DAC2", 697 "INTERNAL_SDVOA", 698 "INTERNAL_SDVOB", 699 "SI170B", 700 "CH7303", 701 "CH7301", 702 "INTERNAL_DVO1", 703 "EXTERNAL_SDVOA", 704 "EXTERNAL_SDVOB", 705 "TITFP513", 706 "INTERNAL_LVTM1", 707 "VT1623", 708 "HDMI_SI1930", 709 "HDMI_INTERNAL", 710 "INTERNAL_KLDSCP_TMDS1", 711 "INTERNAL_KLDSCP_DVO1", 712 "INTERNAL_KLDSCP_DAC1", 713 "INTERNAL_KLDSCP_DAC2", 714 "SI178", 715 "MVPU_FPGA", 716 "INTERNAL_DDI", 717 "VT1625", 718 "HDMI_SI1932", 719 "DP_AN9801", 720 "DP_DP501", 721 "INTERNAL_UNIPHY", 722 "INTERNAL_KLDSCP_LVTMA", 723 "INTERNAL_UNIPHY1", 724 "INTERNAL_UNIPHY2", 725 "NUTMEG", 726 "TRAVIS", 727 "INTERNAL_VCE", 728 "INTERNAL_UNIPHY3", 729 }; 730 731 static const char *hpd_names[6] = { 732 "HPD1", 733 "HPD2", 734 "HPD3", 735 "HPD4", 736 "HPD5", 737 "HPD6", 738 }; 739 740 static void radeon_print_display_setup(struct drm_device *dev) 741 { 742 struct drm_connector *connector; 743 struct radeon_connector *radeon_connector; 744 struct drm_encoder *encoder; 745 struct radeon_encoder *radeon_encoder; 746 uint32_t devices; 747 int i = 0; 748 749 DRM_INFO("Radeon Display Connectors\n"); 750 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 751 radeon_connector = to_radeon_connector(connector); 752 DRM_INFO("Connector %d:\n", i); 753 DRM_INFO(" %s\n", connector->name); 754 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 755 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 756 if (radeon_connector->ddc_bus) { 757 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 758 radeon_connector->ddc_bus->rec.mask_clk_reg, 759 radeon_connector->ddc_bus->rec.mask_data_reg, 760 radeon_connector->ddc_bus->rec.a_clk_reg, 761 radeon_connector->ddc_bus->rec.a_data_reg, 762 radeon_connector->ddc_bus->rec.en_clk_reg, 763 radeon_connector->ddc_bus->rec.en_data_reg, 764 radeon_connector->ddc_bus->rec.y_clk_reg, 765 radeon_connector->ddc_bus->rec.y_data_reg); 766 if (radeon_connector->router.ddc_valid) 767 DRM_INFO(" DDC Router 0x%x/0x%x\n", 768 radeon_connector->router.ddc_mux_control_pin, 769 radeon_connector->router.ddc_mux_state); 770 if (radeon_connector->router.cd_valid) 771 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", 772 radeon_connector->router.cd_mux_control_pin, 773 radeon_connector->router.cd_mux_state); 774 } else { 775 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || 776 connector->connector_type == DRM_MODE_CONNECTOR_DVII || 777 connector->connector_type == DRM_MODE_CONNECTOR_DVID || 778 connector->connector_type == DRM_MODE_CONNECTOR_DVIA || 779 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 780 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) 781 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); 782 } 783 DRM_INFO(" Encoders:\n"); 784 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 785 radeon_encoder = to_radeon_encoder(encoder); 786 devices = radeon_encoder->devices & radeon_connector->devices; 787 if (devices) { 788 if (devices & ATOM_DEVICE_CRT1_SUPPORT) 789 DRM_INFO(" CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]); 790 if (devices & ATOM_DEVICE_CRT2_SUPPORT) 791 DRM_INFO(" CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]); 792 if (devices & ATOM_DEVICE_LCD1_SUPPORT) 793 DRM_INFO(" LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]); 794 if (devices & ATOM_DEVICE_DFP1_SUPPORT) 795 DRM_INFO(" DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]); 796 if (devices & ATOM_DEVICE_DFP2_SUPPORT) 797 DRM_INFO(" DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]); 798 if (devices & ATOM_DEVICE_DFP3_SUPPORT) 799 DRM_INFO(" DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]); 800 if (devices & ATOM_DEVICE_DFP4_SUPPORT) 801 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); 802 if (devices & ATOM_DEVICE_DFP5_SUPPORT) 803 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); 804 if (devices & ATOM_DEVICE_DFP6_SUPPORT) 805 DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); 806 if (devices & ATOM_DEVICE_TV1_SUPPORT) 807 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); 808 if (devices & ATOM_DEVICE_CV_SUPPORT) 809 DRM_INFO(" CV: %s\n", encoder_names[radeon_encoder->encoder_id]); 810 } 811 } 812 i++; 813 } 814 } 815 816 static bool radeon_setup_enc_conn(struct drm_device *dev) 817 { 818 struct radeon_device *rdev = dev->dev_private; 819 bool ret = false; 820 821 if (rdev->bios) { 822 if (rdev->is_atom_bios) { 823 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); 824 if (ret == false) 825 ret = radeon_get_atom_connector_info_from_object_table(dev); 826 } else { 827 ret = radeon_get_legacy_connector_info_from_bios(dev); 828 if (ret == false) 829 ret = radeon_get_legacy_connector_info_from_table(dev); 830 } 831 } else { 832 if (!ASIC_IS_AVIVO(rdev)) 833 ret = radeon_get_legacy_connector_info_from_table(dev); 834 } 835 if (ret) { 836 radeon_setup_encoder_clones(dev); 837 radeon_print_display_setup(dev); 838 } 839 840 return ret; 841 } 842 843 /* avivo */ 844 845 /** 846 * avivo_reduce_ratio - fractional number reduction 847 * 848 * @nom: nominator 849 * @den: denominator 850 * @nom_min: minimum value for nominator 851 * @den_min: minimum value for denominator 852 * 853 * Find the greatest common divisor and apply it on both nominator and 854 * denominator, but make nominator and denominator are at least as large 855 * as their minimum values. 856 */ 857 static void avivo_reduce_ratio(unsigned *nom, unsigned *den, 858 unsigned nom_min, unsigned den_min) 859 { 860 unsigned tmp; 861 862 /* reduce the numbers to a simpler ratio */ 863 tmp = gcd(*nom, *den); 864 *nom /= tmp; 865 *den /= tmp; 866 867 /* make sure nominator is large enough */ 868 if (*nom < nom_min) { 869 tmp = DIV_ROUND_UP(nom_min, *nom); 870 *nom *= tmp; 871 *den *= tmp; 872 } 873 874 /* make sure the denominator is large enough */ 875 if (*den < den_min) { 876 tmp = DIV_ROUND_UP(den_min, *den); 877 *nom *= tmp; 878 *den *= tmp; 879 } 880 } 881 882 /** 883 * avivo_get_fb_ref_div - feedback and ref divider calculation 884 * 885 * @nom: nominator 886 * @den: denominator 887 * @post_div: post divider 888 * @fb_div_max: feedback divider maximum 889 * @ref_div_max: reference divider maximum 890 * @fb_div: resulting feedback divider 891 * @ref_div: resulting reference divider 892 * 893 * Calculate feedback and reference divider for a given post divider. Makes 894 * sure we stay within the limits. 895 */ 896 static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, 897 unsigned fb_div_max, unsigned ref_div_max, 898 unsigned *fb_div, unsigned *ref_div) 899 { 900 /* limit reference * post divider to a maximum */ 901 ref_div_max = max(min(100 / post_div, ref_div_max), 1u); 902 903 /* get matching reference and feedback divider */ 904 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); 905 *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); 906 907 /* limit fb divider to its maximum */ 908 if (*fb_div > fb_div_max) { 909 *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div); 910 *fb_div = fb_div_max; 911 } 912 } 913 914 /** 915 * radeon_compute_pll_avivo - compute PLL paramaters 916 * 917 * @pll: information about the PLL 918 * @dot_clock_p: resulting pixel clock 919 * fb_div_p: resulting feedback divider 920 * frac_fb_div_p: fractional part of the feedback divider 921 * ref_div_p: resulting reference divider 922 * post_div_p: resulting reference divider 923 * 924 * Try to calculate the PLL parameters to generate the given frequency: 925 * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div) 926 */ 927 void radeon_compute_pll_avivo(struct radeon_pll *pll, 928 u32 freq, 929 u32 *dot_clock_p, 930 u32 *fb_div_p, 931 u32 *frac_fb_div_p, 932 u32 *ref_div_p, 933 u32 *post_div_p) 934 { 935 unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ? 936 freq : freq / 10; 937 938 unsigned fb_div_min, fb_div_max, fb_div; 939 unsigned post_div_min, post_div_max, post_div; 940 unsigned ref_div_min, ref_div_max, ref_div; 941 unsigned post_div_best, diff_best; 942 unsigned nom, den; 943 944 /* determine allowed feedback divider range */ 945 fb_div_min = pll->min_feedback_div; 946 fb_div_max = pll->max_feedback_div; 947 948 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 949 fb_div_min *= 10; 950 fb_div_max *= 10; 951 } 952 953 /* determine allowed ref divider range */ 954 if (pll->flags & RADEON_PLL_USE_REF_DIV) 955 ref_div_min = pll->reference_div; 956 else 957 ref_div_min = pll->min_ref_div; 958 959 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && 960 pll->flags & RADEON_PLL_USE_REF_DIV) 961 ref_div_max = pll->reference_div; 962 else 963 ref_div_max = pll->max_ref_div; 964 965 /* determine allowed post divider range */ 966 if (pll->flags & RADEON_PLL_USE_POST_DIV) { 967 post_div_min = pll->post_div; 968 post_div_max = pll->post_div; 969 } else { 970 unsigned vco_min, vco_max; 971 972 if (pll->flags & RADEON_PLL_IS_LCD) { 973 vco_min = pll->lcd_pll_out_min; 974 vco_max = pll->lcd_pll_out_max; 975 } else { 976 vco_min = pll->pll_out_min; 977 vco_max = pll->pll_out_max; 978 } 979 980 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 981 vco_min *= 10; 982 vco_max *= 10; 983 } 984 985 post_div_min = vco_min / target_clock; 986 if ((target_clock * post_div_min) < vco_min) 987 ++post_div_min; 988 if (post_div_min < pll->min_post_div) 989 post_div_min = pll->min_post_div; 990 991 post_div_max = vco_max / target_clock; 992 if ((target_clock * post_div_max) > vco_max) 993 --post_div_max; 994 if (post_div_max > pll->max_post_div) 995 post_div_max = pll->max_post_div; 996 } 997 998 /* represent the searched ratio as fractional number */ 999 nom = target_clock; 1000 den = pll->reference_freq; 1001 1002 /* reduce the numbers to a simpler ratio */ 1003 avivo_reduce_ratio(&nom, &den, fb_div_min, post_div_min); 1004 1005 /* now search for a post divider */ 1006 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) 1007 post_div_best = post_div_min; 1008 else 1009 post_div_best = post_div_max; 1010 diff_best = ~0; 1011 1012 for (post_div = post_div_min; post_div <= post_div_max; ++post_div) { 1013 unsigned diff; 1014 avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, 1015 ref_div_max, &fb_div, &ref_div); 1016 diff = abs(target_clock - (pll->reference_freq * fb_div) / 1017 (ref_div * post_div)); 1018 1019 if (diff < diff_best || (diff == diff_best && 1020 !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) { 1021 1022 post_div_best = post_div; 1023 diff_best = diff; 1024 } 1025 } 1026 post_div = post_div_best; 1027 1028 /* get the feedback and reference divider for the optimal value */ 1029 avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max, 1030 &fb_div, &ref_div); 1031 1032 /* reduce the numbers to a simpler ratio once more */ 1033 /* this also makes sure that the reference divider is large enough */ 1034 avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); 1035 1036 /* avoid high jitter with small fractional dividers */ 1037 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { 1038 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50); 1039 if (fb_div < fb_div_min) { 1040 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); 1041 fb_div *= tmp; 1042 ref_div *= tmp; 1043 } 1044 } 1045 1046 /* and finally save the result */ 1047 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 1048 *fb_div_p = fb_div / 10; 1049 *frac_fb_div_p = fb_div % 10; 1050 } else { 1051 *fb_div_p = fb_div; 1052 *frac_fb_div_p = 0; 1053 } 1054 1055 *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) + 1056 (pll->reference_freq * *frac_fb_div_p)) / 1057 (ref_div * post_div * 10); 1058 *ref_div_p = ref_div; 1059 *post_div_p = post_div; 1060 1061 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n", 1062 freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, 1063 ref_div, post_div); 1064 } 1065 1066 /* pre-avivo */ 1067 static inline uint32_t radeon_div(uint64_t n, uint32_t d) 1068 { 1069 uint64_t mod; 1070 1071 n += d / 2; 1072 1073 mod = do_div(n, d); 1074 return n; 1075 } 1076 1077 void radeon_compute_pll_legacy(struct radeon_pll *pll, 1078 uint64_t freq, 1079 uint32_t *dot_clock_p, 1080 uint32_t *fb_div_p, 1081 uint32_t *frac_fb_div_p, 1082 uint32_t *ref_div_p, 1083 uint32_t *post_div_p) 1084 { 1085 uint32_t min_ref_div = pll->min_ref_div; 1086 uint32_t max_ref_div = pll->max_ref_div; 1087 uint32_t min_post_div = pll->min_post_div; 1088 uint32_t max_post_div = pll->max_post_div; 1089 uint32_t min_fractional_feed_div = 0; 1090 uint32_t max_fractional_feed_div = 0; 1091 uint32_t best_vco = pll->best_vco; 1092 uint32_t best_post_div = 1; 1093 uint32_t best_ref_div = 1; 1094 uint32_t best_feedback_div = 1; 1095 uint32_t best_frac_feedback_div = 0; 1096 uint32_t best_freq = -1; 1097 uint32_t best_error = 0xffffffff; 1098 uint32_t best_vco_diff = 1; 1099 uint32_t post_div; 1100 u32 pll_out_min, pll_out_max; 1101 1102 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); 1103 freq = freq * 1000; 1104 1105 if (pll->flags & RADEON_PLL_IS_LCD) { 1106 pll_out_min = pll->lcd_pll_out_min; 1107 pll_out_max = pll->lcd_pll_out_max; 1108 } else { 1109 pll_out_min = pll->pll_out_min; 1110 pll_out_max = pll->pll_out_max; 1111 } 1112 1113 if (pll_out_min > 64800) 1114 pll_out_min = 64800; 1115 1116 if (pll->flags & RADEON_PLL_USE_REF_DIV) 1117 min_ref_div = max_ref_div = pll->reference_div; 1118 else { 1119 while (min_ref_div < max_ref_div-1) { 1120 uint32_t mid = (min_ref_div + max_ref_div) / 2; 1121 uint32_t pll_in = pll->reference_freq / mid; 1122 if (pll_in < pll->pll_in_min) 1123 max_ref_div = mid; 1124 else if (pll_in > pll->pll_in_max) 1125 min_ref_div = mid; 1126 else 1127 break; 1128 } 1129 } 1130 1131 if (pll->flags & RADEON_PLL_USE_POST_DIV) 1132 min_post_div = max_post_div = pll->post_div; 1133 1134 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 1135 min_fractional_feed_div = pll->min_frac_feedback_div; 1136 max_fractional_feed_div = pll->max_frac_feedback_div; 1137 } 1138 1139 for (post_div = max_post_div; post_div >= min_post_div; --post_div) { 1140 uint32_t ref_div; 1141 1142 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 1143 continue; 1144 1145 /* legacy radeons only have a few post_divs */ 1146 if (pll->flags & RADEON_PLL_LEGACY) { 1147 if ((post_div == 5) || 1148 (post_div == 7) || 1149 (post_div == 9) || 1150 (post_div == 10) || 1151 (post_div == 11) || 1152 (post_div == 13) || 1153 (post_div == 14) || 1154 (post_div == 15)) 1155 continue; 1156 } 1157 1158 for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) { 1159 uint32_t feedback_div, current_freq = 0, error, vco_diff; 1160 uint32_t pll_in = pll->reference_freq / ref_div; 1161 uint32_t min_feed_div = pll->min_feedback_div; 1162 uint32_t max_feed_div = pll->max_feedback_div + 1; 1163 1164 if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max) 1165 continue; 1166 1167 while (min_feed_div < max_feed_div) { 1168 uint32_t vco; 1169 uint32_t min_frac_feed_div = min_fractional_feed_div; 1170 uint32_t max_frac_feed_div = max_fractional_feed_div + 1; 1171 uint32_t frac_feedback_div; 1172 uint64_t tmp; 1173 1174 feedback_div = (min_feed_div + max_feed_div) / 2; 1175 1176 tmp = (uint64_t)pll->reference_freq * feedback_div; 1177 vco = radeon_div(tmp, ref_div); 1178 1179 if (vco < pll_out_min) { 1180 min_feed_div = feedback_div + 1; 1181 continue; 1182 } else if (vco > pll_out_max) { 1183 max_feed_div = feedback_div; 1184 continue; 1185 } 1186 1187 while (min_frac_feed_div < max_frac_feed_div) { 1188 frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2; 1189 tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div; 1190 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; 1191 current_freq = radeon_div(tmp, ref_div * post_div); 1192 1193 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { 1194 if (freq < current_freq) 1195 error = 0xffffffff; 1196 else 1197 error = freq - current_freq; 1198 } else 1199 error = abs(current_freq - freq); 1200 vco_diff = abs(vco - best_vco); 1201 1202 if ((best_vco == 0 && error < best_error) || 1203 (best_vco != 0 && 1204 ((best_error > 100 && error < best_error - 100) || 1205 (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) { 1206 best_post_div = post_div; 1207 best_ref_div = ref_div; 1208 best_feedback_div = feedback_div; 1209 best_frac_feedback_div = frac_feedback_div; 1210 best_freq = current_freq; 1211 best_error = error; 1212 best_vco_diff = vco_diff; 1213 } else if (current_freq == freq) { 1214 if (best_freq == -1) { 1215 best_post_div = post_div; 1216 best_ref_div = ref_div; 1217 best_feedback_div = feedback_div; 1218 best_frac_feedback_div = frac_feedback_div; 1219 best_freq = current_freq; 1220 best_error = error; 1221 best_vco_diff = vco_diff; 1222 } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || 1223 ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || 1224 ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || 1225 ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || 1226 ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || 1227 ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { 1228 best_post_div = post_div; 1229 best_ref_div = ref_div; 1230 best_feedback_div = feedback_div; 1231 best_frac_feedback_div = frac_feedback_div; 1232 best_freq = current_freq; 1233 best_error = error; 1234 best_vco_diff = vco_diff; 1235 } 1236 } 1237 if (current_freq < freq) 1238 min_frac_feed_div = frac_feedback_div + 1; 1239 else 1240 max_frac_feed_div = frac_feedback_div; 1241 } 1242 if (current_freq < freq) 1243 min_feed_div = feedback_div + 1; 1244 else 1245 max_feed_div = feedback_div; 1246 } 1247 } 1248 } 1249 1250 *dot_clock_p = best_freq / 10000; 1251 *fb_div_p = best_feedback_div; 1252 *frac_fb_div_p = best_frac_feedback_div; 1253 *ref_div_p = best_ref_div; 1254 *post_div_p = best_post_div; 1255 DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n", 1256 (long long)freq, 1257 best_freq / 1000, best_feedback_div, best_frac_feedback_div, 1258 best_ref_div, best_post_div); 1259 1260 } 1261 1262 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 1263 { 1264 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 1265 1266 if (radeon_fb->obj) { 1267 drm_gem_object_unreference_unlocked(radeon_fb->obj); 1268 } 1269 drm_framebuffer_cleanup(fb); 1270 kfree(radeon_fb); 1271 } 1272 1273 static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb, 1274 struct drm_file *file_priv, 1275 unsigned int *handle) 1276 { 1277 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 1278 1279 return drm_gem_handle_create(file_priv, radeon_fb->obj, handle); 1280 } 1281 1282 static const struct drm_framebuffer_funcs radeon_fb_funcs = { 1283 .destroy = radeon_user_framebuffer_destroy, 1284 .create_handle = radeon_user_framebuffer_create_handle, 1285 }; 1286 1287 int 1288 radeon_framebuffer_init(struct drm_device *dev, 1289 struct radeon_framebuffer *rfb, 1290 struct drm_mode_fb_cmd2 *mode_cmd, 1291 struct drm_gem_object *obj) 1292 { 1293 int ret; 1294 rfb->obj = obj; 1295 drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd); 1296 ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs); 1297 if (ret) { 1298 rfb->obj = NULL; 1299 return ret; 1300 } 1301 return 0; 1302 } 1303 1304 static struct drm_framebuffer * 1305 radeon_user_framebuffer_create(struct drm_device *dev, 1306 struct drm_file *file_priv, 1307 struct drm_mode_fb_cmd2 *mode_cmd) 1308 { 1309 struct drm_gem_object *obj; 1310 struct radeon_framebuffer *radeon_fb; 1311 int ret; 1312 1313 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); 1314 if (obj == NULL) { 1315 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " 1316 "can't create framebuffer\n", mode_cmd->handles[0]); 1317 return ERR_PTR(-ENOENT); 1318 } 1319 1320 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); 1321 if (radeon_fb == NULL) { 1322 drm_gem_object_unreference_unlocked(obj); 1323 return ERR_PTR(-ENOMEM); 1324 } 1325 1326 ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); 1327 if (ret) { 1328 kfree(radeon_fb); 1329 drm_gem_object_unreference_unlocked(obj); 1330 return ERR_PTR(ret); 1331 } 1332 1333 return &radeon_fb->base; 1334 } 1335 1336 static void radeon_output_poll_changed(struct drm_device *dev) 1337 { 1338 struct radeon_device *rdev = dev->dev_private; 1339 radeon_fb_output_poll_changed(rdev); 1340 } 1341 1342 static const struct drm_mode_config_funcs radeon_mode_funcs = { 1343 .fb_create = radeon_user_framebuffer_create, 1344 .output_poll_changed = radeon_output_poll_changed 1345 }; 1346 1347 static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = 1348 { { 0, "driver" }, 1349 { 1, "bios" }, 1350 }; 1351 1352 static struct drm_prop_enum_list radeon_tv_std_enum_list[] = 1353 { { TV_STD_NTSC, "ntsc" }, 1354 { TV_STD_PAL, "pal" }, 1355 { TV_STD_PAL_M, "pal-m" }, 1356 { TV_STD_PAL_60, "pal-60" }, 1357 { TV_STD_NTSC_J, "ntsc-j" }, 1358 { TV_STD_SCART_PAL, "scart-pal" }, 1359 { TV_STD_PAL_CN, "pal-cn" }, 1360 { TV_STD_SECAM, "secam" }, 1361 }; 1362 1363 static struct drm_prop_enum_list radeon_underscan_enum_list[] = 1364 { { UNDERSCAN_OFF, "off" }, 1365 { UNDERSCAN_ON, "on" }, 1366 { UNDERSCAN_AUTO, "auto" }, 1367 }; 1368 1369 static struct drm_prop_enum_list radeon_audio_enum_list[] = 1370 { { RADEON_AUDIO_DISABLE, "off" }, 1371 { RADEON_AUDIO_ENABLE, "on" }, 1372 { RADEON_AUDIO_AUTO, "auto" }, 1373 }; 1374 1375 /* XXX support different dither options? spatial, temporal, both, etc. */ 1376 static struct drm_prop_enum_list radeon_dither_enum_list[] = 1377 { { RADEON_FMT_DITHER_DISABLE, "off" }, 1378 { RADEON_FMT_DITHER_ENABLE, "on" }, 1379 }; 1380 1381 static int radeon_modeset_create_props(struct radeon_device *rdev) 1382 { 1383 int sz; 1384 1385 if (rdev->is_atom_bios) { 1386 rdev->mode_info.coherent_mode_property = 1387 drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1); 1388 if (!rdev->mode_info.coherent_mode_property) 1389 return -ENOMEM; 1390 } 1391 1392 if (!ASIC_IS_AVIVO(rdev)) { 1393 sz = ARRAY_SIZE(radeon_tmds_pll_enum_list); 1394 rdev->mode_info.tmds_pll_property = 1395 drm_property_create_enum(rdev->ddev, 0, 1396 "tmds_pll", 1397 radeon_tmds_pll_enum_list, sz); 1398 } 1399 1400 rdev->mode_info.load_detect_property = 1401 drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1); 1402 if (!rdev->mode_info.load_detect_property) 1403 return -ENOMEM; 1404 1405 drm_mode_create_scaling_mode_property(rdev->ddev); 1406 1407 sz = ARRAY_SIZE(radeon_tv_std_enum_list); 1408 rdev->mode_info.tv_std_property = 1409 drm_property_create_enum(rdev->ddev, 0, 1410 "tv standard", 1411 radeon_tv_std_enum_list, sz); 1412 1413 sz = ARRAY_SIZE(radeon_underscan_enum_list); 1414 rdev->mode_info.underscan_property = 1415 drm_property_create_enum(rdev->ddev, 0, 1416 "underscan", 1417 radeon_underscan_enum_list, sz); 1418 1419 rdev->mode_info.underscan_hborder_property = 1420 drm_property_create_range(rdev->ddev, 0, 1421 "underscan hborder", 0, 128); 1422 if (!rdev->mode_info.underscan_hborder_property) 1423 return -ENOMEM; 1424 1425 rdev->mode_info.underscan_vborder_property = 1426 drm_property_create_range(rdev->ddev, 0, 1427 "underscan vborder", 0, 128); 1428 if (!rdev->mode_info.underscan_vborder_property) 1429 return -ENOMEM; 1430 1431 sz = ARRAY_SIZE(radeon_audio_enum_list); 1432 rdev->mode_info.audio_property = 1433 drm_property_create_enum(rdev->ddev, 0, 1434 "audio", 1435 radeon_audio_enum_list, sz); 1436 1437 sz = ARRAY_SIZE(radeon_dither_enum_list); 1438 rdev->mode_info.dither_property = 1439 drm_property_create_enum(rdev->ddev, 0, 1440 "dither", 1441 radeon_dither_enum_list, sz); 1442 1443 return 0; 1444 } 1445 1446 void radeon_update_display_priority(struct radeon_device *rdev) 1447 { 1448 /* adjustment options for the display watermarks */ 1449 if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) { 1450 /* set display priority to high for r3xx, rv515 chips 1451 * this avoids flickering due to underflow to the 1452 * display controllers during heavy acceleration. 1453 * Don't force high on rs4xx igp chips as it seems to 1454 * affect the sound card. See kernel bug 15982. 1455 */ 1456 if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) && 1457 !(rdev->flags & RADEON_IS_IGP)) 1458 rdev->disp_priority = 2; 1459 else 1460 rdev->disp_priority = 0; 1461 } else 1462 rdev->disp_priority = radeon_disp_priority; 1463 1464 } 1465 1466 /* 1467 * Allocate hdmi structs and determine register offsets 1468 */ 1469 static void radeon_afmt_init(struct radeon_device *rdev) 1470 { 1471 int i; 1472 1473 for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) 1474 rdev->mode_info.afmt[i] = NULL; 1475 1476 if (ASIC_IS_NODCE(rdev)) { 1477 /* nothing to do */ 1478 } else if (ASIC_IS_DCE4(rdev)) { 1479 static uint32_t eg_offsets[] = { 1480 EVERGREEN_CRTC0_REGISTER_OFFSET, 1481 EVERGREEN_CRTC1_REGISTER_OFFSET, 1482 EVERGREEN_CRTC2_REGISTER_OFFSET, 1483 EVERGREEN_CRTC3_REGISTER_OFFSET, 1484 EVERGREEN_CRTC4_REGISTER_OFFSET, 1485 EVERGREEN_CRTC5_REGISTER_OFFSET, 1486 0x13830 - 0x7030, 1487 }; 1488 int num_afmt; 1489 1490 /* DCE8 has 7 audio blocks tied to DIG encoders */ 1491 /* DCE6 has 6 audio blocks tied to DIG encoders */ 1492 /* DCE4/5 has 6 audio blocks tied to DIG encoders */ 1493 /* DCE4.1 has 2 audio blocks tied to DIG encoders */ 1494 if (ASIC_IS_DCE8(rdev)) 1495 num_afmt = 7; 1496 else if (ASIC_IS_DCE6(rdev)) 1497 num_afmt = 6; 1498 else if (ASIC_IS_DCE5(rdev)) 1499 num_afmt = 6; 1500 else if (ASIC_IS_DCE41(rdev)) 1501 num_afmt = 2; 1502 else /* DCE4 */ 1503 num_afmt = 6; 1504 1505 BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets)); 1506 for (i = 0; i < num_afmt; i++) { 1507 rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1508 if (rdev->mode_info.afmt[i]) { 1509 rdev->mode_info.afmt[i]->offset = eg_offsets[i]; 1510 rdev->mode_info.afmt[i]->id = i; 1511 } 1512 } 1513 } else if (ASIC_IS_DCE3(rdev)) { 1514 /* DCE3.x has 2 audio blocks tied to DIG encoders */ 1515 rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1516 if (rdev->mode_info.afmt[0]) { 1517 rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0; 1518 rdev->mode_info.afmt[0]->id = 0; 1519 } 1520 rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1521 if (rdev->mode_info.afmt[1]) { 1522 rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1; 1523 rdev->mode_info.afmt[1]->id = 1; 1524 } 1525 } else if (ASIC_IS_DCE2(rdev)) { 1526 /* DCE2 has at least 1 routable audio block */ 1527 rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1528 if (rdev->mode_info.afmt[0]) { 1529 rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0; 1530 rdev->mode_info.afmt[0]->id = 0; 1531 } 1532 /* r6xx has 2 routable audio blocks */ 1533 if (rdev->family >= CHIP_R600) { 1534 rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL); 1535 if (rdev->mode_info.afmt[1]) { 1536 rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1; 1537 rdev->mode_info.afmt[1]->id = 1; 1538 } 1539 } 1540 } 1541 } 1542 1543 static void radeon_afmt_fini(struct radeon_device *rdev) 1544 { 1545 int i; 1546 1547 for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) { 1548 kfree(rdev->mode_info.afmt[i]); 1549 rdev->mode_info.afmt[i] = NULL; 1550 } 1551 } 1552 1553 int radeon_modeset_init(struct radeon_device *rdev) 1554 { 1555 int i; 1556 int ret; 1557 1558 drm_mode_config_init(rdev->ddev); 1559 rdev->mode_info.mode_config_initialized = true; 1560 1561 rdev->ddev->mode_config.funcs = &radeon_mode_funcs; 1562 1563 if (ASIC_IS_DCE5(rdev)) { 1564 rdev->ddev->mode_config.max_width = 16384; 1565 rdev->ddev->mode_config.max_height = 16384; 1566 } else if (ASIC_IS_AVIVO(rdev)) { 1567 rdev->ddev->mode_config.max_width = 8192; 1568 rdev->ddev->mode_config.max_height = 8192; 1569 } else { 1570 rdev->ddev->mode_config.max_width = 4096; 1571 rdev->ddev->mode_config.max_height = 4096; 1572 } 1573 1574 rdev->ddev->mode_config.preferred_depth = 24; 1575 rdev->ddev->mode_config.prefer_shadow = 1; 1576 1577 rdev->ddev->mode_config.fb_base = rdev->mc.aper_base; 1578 1579 ret = radeon_modeset_create_props(rdev); 1580 if (ret) { 1581 return ret; 1582 } 1583 1584 /* init i2c buses */ 1585 radeon_i2c_init(rdev); 1586 1587 /* check combios for a valid hardcoded EDID - Sun servers */ 1588 if (!rdev->is_atom_bios) { 1589 /* check for hardcoded EDID in BIOS */ 1590 radeon_combios_check_hardcoded_edid(rdev); 1591 } 1592 1593 /* allocate crtcs */ 1594 for (i = 0; i < rdev->num_crtc; i++) { 1595 radeon_crtc_init(rdev->ddev, i); 1596 } 1597 1598 /* okay we should have all the bios connectors */ 1599 ret = radeon_setup_enc_conn(rdev->ddev); 1600 if (!ret) { 1601 return ret; 1602 } 1603 1604 /* init dig PHYs, disp eng pll */ 1605 if (rdev->is_atom_bios) { 1606 radeon_atom_encoder_init(rdev); 1607 radeon_atom_disp_eng_pll_init(rdev); 1608 } 1609 1610 /* initialize hpd */ 1611 radeon_hpd_init(rdev); 1612 1613 /* setup afmt */ 1614 radeon_afmt_init(rdev); 1615 1616 radeon_fbdev_init(rdev); 1617 drm_kms_helper_poll_init(rdev->ddev); 1618 1619 if (rdev->pm.dpm_enabled) { 1620 /* do dpm late init */ 1621 ret = radeon_pm_late_init(rdev); 1622 if (ret) { 1623 rdev->pm.dpm_enabled = false; 1624 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 1625 } 1626 /* set the dpm state for PX since there won't be 1627 * a modeset to call this. 1628 */ 1629 radeon_pm_compute_clocks(rdev); 1630 } 1631 1632 return 0; 1633 } 1634 1635 void radeon_modeset_fini(struct radeon_device *rdev) 1636 { 1637 radeon_fbdev_fini(rdev); 1638 kfree(rdev->mode_info.bios_hardcoded_edid); 1639 1640 if (rdev->mode_info.mode_config_initialized) { 1641 radeon_afmt_fini(rdev); 1642 drm_kms_helper_poll_fini(rdev->ddev); 1643 radeon_hpd_fini(rdev); 1644 drm_mode_config_cleanup(rdev->ddev); 1645 rdev->mode_info.mode_config_initialized = false; 1646 } 1647 /* free i2c buses */ 1648 radeon_i2c_fini(rdev); 1649 } 1650 1651 static bool is_hdtv_mode(const struct drm_display_mode *mode) 1652 { 1653 /* try and guess if this is a tv or a monitor */ 1654 if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ 1655 (mode->vdisplay == 576) || /* 576p */ 1656 (mode->vdisplay == 720) || /* 720p */ 1657 (mode->vdisplay == 1080)) /* 1080p */ 1658 return true; 1659 else 1660 return false; 1661 } 1662 1663 bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 1664 const struct drm_display_mode *mode, 1665 struct drm_display_mode *adjusted_mode) 1666 { 1667 struct drm_device *dev = crtc->dev; 1668 struct radeon_device *rdev = dev->dev_private; 1669 struct drm_encoder *encoder; 1670 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1671 struct radeon_encoder *radeon_encoder; 1672 struct drm_connector *connector; 1673 struct radeon_connector *radeon_connector; 1674 bool first = true; 1675 u32 src_v = 1, dst_v = 1; 1676 u32 src_h = 1, dst_h = 1; 1677 1678 radeon_crtc->h_border = 0; 1679 radeon_crtc->v_border = 0; 1680 1681 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1682 if (encoder->crtc != crtc) 1683 continue; 1684 radeon_encoder = to_radeon_encoder(encoder); 1685 connector = radeon_get_connector_for_encoder(encoder); 1686 radeon_connector = to_radeon_connector(connector); 1687 1688 if (first) { 1689 /* set scaling */ 1690 if (radeon_encoder->rmx_type == RMX_OFF) 1691 radeon_crtc->rmx_type = RMX_OFF; 1692 else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay || 1693 mode->vdisplay < radeon_encoder->native_mode.vdisplay) 1694 radeon_crtc->rmx_type = radeon_encoder->rmx_type; 1695 else 1696 radeon_crtc->rmx_type = RMX_OFF; 1697 /* copy native mode */ 1698 memcpy(&radeon_crtc->native_mode, 1699 &radeon_encoder->native_mode, 1700 sizeof(struct drm_display_mode)); 1701 src_v = crtc->mode.vdisplay; 1702 dst_v = radeon_crtc->native_mode.vdisplay; 1703 src_h = crtc->mode.hdisplay; 1704 dst_h = radeon_crtc->native_mode.hdisplay; 1705 1706 /* fix up for overscan on hdmi */ 1707 if (ASIC_IS_AVIVO(rdev) && 1708 (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && 1709 ((radeon_encoder->underscan_type == UNDERSCAN_ON) || 1710 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && 1711 drm_detect_hdmi_monitor(radeon_connector_edid(connector)) && 1712 is_hdtv_mode(mode)))) { 1713 if (radeon_encoder->underscan_hborder != 0) 1714 radeon_crtc->h_border = radeon_encoder->underscan_hborder; 1715 else 1716 radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; 1717 if (radeon_encoder->underscan_vborder != 0) 1718 radeon_crtc->v_border = radeon_encoder->underscan_vborder; 1719 else 1720 radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; 1721 radeon_crtc->rmx_type = RMX_FULL; 1722 src_v = crtc->mode.vdisplay; 1723 dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); 1724 src_h = crtc->mode.hdisplay; 1725 dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2); 1726 } 1727 first = false; 1728 } else { 1729 if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { 1730 /* WARNING: Right now this can't happen but 1731 * in the future we need to check that scaling 1732 * are consistent across different encoder 1733 * (ie all encoder can work with the same 1734 * scaling). 1735 */ 1736 DRM_ERROR("Scaling not consistent across encoder.\n"); 1737 return false; 1738 } 1739 } 1740 } 1741 if (radeon_crtc->rmx_type != RMX_OFF) { 1742 fixed20_12 a, b; 1743 a.full = dfixed_const(src_v); 1744 b.full = dfixed_const(dst_v); 1745 radeon_crtc->vsc.full = dfixed_div(a, b); 1746 a.full = dfixed_const(src_h); 1747 b.full = dfixed_const(dst_h); 1748 radeon_crtc->hsc.full = dfixed_div(a, b); 1749 } else { 1750 radeon_crtc->vsc.full = dfixed_const(1); 1751 radeon_crtc->hsc.full = dfixed_const(1); 1752 } 1753 return true; 1754 } 1755 1756 /* 1757 * Retrieve current video scanout position of crtc on a given gpu, and 1758 * an optional accurate timestamp of when query happened. 1759 * 1760 * \param dev Device to query. 1761 * \param crtc Crtc to query. 1762 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). 1763 * \param *vpos Location where vertical scanout position should be stored. 1764 * \param *hpos Location where horizontal scanout position should go. 1765 * \param *stime Target location for timestamp taken immediately before 1766 * scanout position query. Can be NULL to skip timestamp. 1767 * \param *etime Target location for timestamp taken immediately after 1768 * scanout position query. Can be NULL to skip timestamp. 1769 * 1770 * Returns vpos as a positive number while in active scanout area. 1771 * Returns vpos as a negative number inside vblank, counting the number 1772 * of scanlines to go until end of vblank, e.g., -1 means "one scanline 1773 * until start of active scanout / end of vblank." 1774 * 1775 * \return Flags, or'ed together as follows: 1776 * 1777 * DRM_SCANOUTPOS_VALID = Query successful. 1778 * DRM_SCANOUTPOS_INVBL = Inside vblank. 1779 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of 1780 * this flag means that returned position may be offset by a constant but 1781 * unknown small number of scanlines wrt. real scanout position. 1782 * 1783 */ 1784 int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, 1785 int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) 1786 { 1787 u32 stat_crtc = 0, vbl = 0, position = 0; 1788 int vbl_start, vbl_end, vtotal, ret = 0; 1789 bool in_vbl = true; 1790 1791 struct radeon_device *rdev = dev->dev_private; 1792 1793 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1794 1795 /* Get optional system timestamp before query. */ 1796 if (stime) 1797 *stime = ktime_get(); 1798 1799 if (ASIC_IS_DCE4(rdev)) { 1800 if (crtc == 0) { 1801 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1802 EVERGREEN_CRTC0_REGISTER_OFFSET); 1803 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1804 EVERGREEN_CRTC0_REGISTER_OFFSET); 1805 ret |= DRM_SCANOUTPOS_VALID; 1806 } 1807 if (crtc == 1) { 1808 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1809 EVERGREEN_CRTC1_REGISTER_OFFSET); 1810 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1811 EVERGREEN_CRTC1_REGISTER_OFFSET); 1812 ret |= DRM_SCANOUTPOS_VALID; 1813 } 1814 if (crtc == 2) { 1815 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1816 EVERGREEN_CRTC2_REGISTER_OFFSET); 1817 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1818 EVERGREEN_CRTC2_REGISTER_OFFSET); 1819 ret |= DRM_SCANOUTPOS_VALID; 1820 } 1821 if (crtc == 3) { 1822 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1823 EVERGREEN_CRTC3_REGISTER_OFFSET); 1824 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1825 EVERGREEN_CRTC3_REGISTER_OFFSET); 1826 ret |= DRM_SCANOUTPOS_VALID; 1827 } 1828 if (crtc == 4) { 1829 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1830 EVERGREEN_CRTC4_REGISTER_OFFSET); 1831 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1832 EVERGREEN_CRTC4_REGISTER_OFFSET); 1833 ret |= DRM_SCANOUTPOS_VALID; 1834 } 1835 if (crtc == 5) { 1836 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1837 EVERGREEN_CRTC5_REGISTER_OFFSET); 1838 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1839 EVERGREEN_CRTC5_REGISTER_OFFSET); 1840 ret |= DRM_SCANOUTPOS_VALID; 1841 } 1842 } else if (ASIC_IS_AVIVO(rdev)) { 1843 if (crtc == 0) { 1844 vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); 1845 position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); 1846 ret |= DRM_SCANOUTPOS_VALID; 1847 } 1848 if (crtc == 1) { 1849 vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); 1850 position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); 1851 ret |= DRM_SCANOUTPOS_VALID; 1852 } 1853 } else { 1854 /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ 1855 if (crtc == 0) { 1856 /* Assume vbl_end == 0, get vbl_start from 1857 * upper 16 bits. 1858 */ 1859 vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) & 1860 RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; 1861 /* Only retrieve vpos from upper 16 bits, set hpos == 0. */ 1862 position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 1863 stat_crtc = RREG32(RADEON_CRTC_STATUS); 1864 if (!(stat_crtc & 1)) 1865 in_vbl = false; 1866 1867 ret |= DRM_SCANOUTPOS_VALID; 1868 } 1869 if (crtc == 1) { 1870 vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & 1871 RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; 1872 position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; 1873 stat_crtc = RREG32(RADEON_CRTC2_STATUS); 1874 if (!(stat_crtc & 1)) 1875 in_vbl = false; 1876 1877 ret |= DRM_SCANOUTPOS_VALID; 1878 } 1879 } 1880 1881 /* Get optional system timestamp after query. */ 1882 if (etime) 1883 *etime = ktime_get(); 1884 1885 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1886 1887 /* Decode into vertical and horizontal scanout position. */ 1888 *vpos = position & 0x1fff; 1889 *hpos = (position >> 16) & 0x1fff; 1890 1891 /* Valid vblank area boundaries from gpu retrieved? */ 1892 if (vbl > 0) { 1893 /* Yes: Decode. */ 1894 ret |= DRM_SCANOUTPOS_ACCURATE; 1895 vbl_start = vbl & 0x1fff; 1896 vbl_end = (vbl >> 16) & 0x1fff; 1897 } 1898 else { 1899 /* No: Fake something reasonable which gives at least ok results. */ 1900 vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; 1901 vbl_end = 0; 1902 } 1903 1904 /* Test scanout position against vblank region. */ 1905 if ((*vpos < vbl_start) && (*vpos >= vbl_end)) 1906 in_vbl = false; 1907 1908 /* Check if inside vblank area and apply corrective offsets: 1909 * vpos will then be >=0 in video scanout area, but negative 1910 * within vblank area, counting down the number of lines until 1911 * start of scanout. 1912 */ 1913 1914 /* Inside "upper part" of vblank area? Apply corrective offset if so: */ 1915 if (in_vbl && (*vpos >= vbl_start)) { 1916 vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; 1917 *vpos = *vpos - vtotal; 1918 } 1919 1920 /* Correct for shifted end of vbl at vbl_end. */ 1921 *vpos = *vpos - vbl_end; 1922 1923 /* In vblank? */ 1924 if (in_vbl) 1925 ret |= DRM_SCANOUTPOS_IN_VBLANK; 1926 1927 /* Is vpos outside nominal vblank area, but less than 1928 * 1/100 of a frame height away from start of vblank? 1929 * If so, assume this isn't a massively delayed vblank 1930 * interrupt, but a vblank interrupt that fired a few 1931 * microseconds before true start of vblank. Compensate 1932 * by adding a full frame duration to the final timestamp. 1933 * Happens, e.g., on ATI R500, R600. 1934 * 1935 * We only do this if DRM_CALLED_FROM_VBLIRQ. 1936 */ 1937 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { 1938 vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; 1939 vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; 1940 1941 if (vbl_start - *vpos < vtotal / 100) { 1942 *vpos -= vtotal; 1943 1944 /* Signal this correction as "applied". */ 1945 ret |= 0x8; 1946 } 1947 } 1948 1949 return ret; 1950 } 1951