1 /*
2  * Copyright 2007-8 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie
24  *          Alex Deucher
25  */
26 #include <drm/drmP.h>
27 #include <drm/radeon_drm.h>
28 #include "radeon.h"
29 
30 #include "atom.h"
31 #include <asm/div64.h>
32 
33 #include <linux/pm_runtime.h>
34 #include <drm/drm_crtc_helper.h>
35 #include <drm/drm_edid.h>
36 
37 #include <linux/gcd.h>
38 
39 static void avivo_crtc_load_lut(struct drm_crtc *crtc)
40 {
41 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
42 	struct drm_device *dev = crtc->dev;
43 	struct radeon_device *rdev = dev->dev_private;
44 	int i;
45 
46 	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
47 	WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
48 
49 	WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
50 	WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
51 	WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
52 
53 	WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
54 	WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
55 	WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
56 
57 	WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id);
58 	WREG32(AVIVO_DC_LUT_RW_MODE, 0);
59 	WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
60 
61 	WREG8(AVIVO_DC_LUT_RW_INDEX, 0);
62 	for (i = 0; i < 256; i++) {
63 		WREG32(AVIVO_DC_LUT_30_COLOR,
64 			     (radeon_crtc->lut_r[i] << 20) |
65 			     (radeon_crtc->lut_g[i] << 10) |
66 			     (radeon_crtc->lut_b[i] << 0));
67 	}
68 
69 	WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
70 }
71 
72 static void dce4_crtc_load_lut(struct drm_crtc *crtc)
73 {
74 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
75 	struct drm_device *dev = crtc->dev;
76 	struct radeon_device *rdev = dev->dev_private;
77 	int i;
78 
79 	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
80 	WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
81 
82 	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
83 	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
84 	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
85 
86 	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
87 	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
88 	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
89 
90 	WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
91 	WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
92 
93 	WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
94 	for (i = 0; i < 256; i++) {
95 		WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
96 		       (radeon_crtc->lut_r[i] << 20) |
97 		       (radeon_crtc->lut_g[i] << 10) |
98 		       (radeon_crtc->lut_b[i] << 0));
99 	}
100 }
101 
102 static void dce5_crtc_load_lut(struct drm_crtc *crtc)
103 {
104 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
105 	struct drm_device *dev = crtc->dev;
106 	struct radeon_device *rdev = dev->dev_private;
107 	int i;
108 
109 	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
110 
111 	WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
112 	       (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
113 		NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
114 	WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset,
115 	       NI_GRPH_PRESCALE_BYPASS);
116 	WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset,
117 	       NI_OVL_PRESCALE_BYPASS);
118 	WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset,
119 	       (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
120 		NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
121 
122 	WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
123 
124 	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
125 	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
126 	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
127 
128 	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
129 	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
130 	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
131 
132 	WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
133 	WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
134 
135 	WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
136 	for (i = 0; i < 256; i++) {
137 		WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
138 		       (radeon_crtc->lut_r[i] << 20) |
139 		       (radeon_crtc->lut_g[i] << 10) |
140 		       (radeon_crtc->lut_b[i] << 0));
141 	}
142 
143 	WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
144 	       (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
145 		NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
146 		NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
147 		NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
148 	WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset,
149 	       (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
150 		NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
151 	WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset,
152 	       (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
153 		NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
154 	WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
155 	       (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
156 		NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
157 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
158 	WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
159 	if (ASIC_IS_DCE8(rdev)) {
160 		/* XXX this only needs to be programmed once per crtc at startup,
161 		 * not sure where the best place for it is
162 		 */
163 		WREG32(CIK_ALPHA_CONTROL + radeon_crtc->crtc_offset,
164 		       CIK_CURSOR_ALPHA_BLND_ENA);
165 	}
166 }
167 
168 static void legacy_crtc_load_lut(struct drm_crtc *crtc)
169 {
170 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
171 	struct drm_device *dev = crtc->dev;
172 	struct radeon_device *rdev = dev->dev_private;
173 	int i;
174 	uint32_t dac2_cntl;
175 
176 	dac2_cntl = RREG32(RADEON_DAC_CNTL2);
177 	if (radeon_crtc->crtc_id == 0)
178 		dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
179 	else
180 		dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL;
181 	WREG32(RADEON_DAC_CNTL2, dac2_cntl);
182 
183 	WREG8(RADEON_PALETTE_INDEX, 0);
184 	for (i = 0; i < 256; i++) {
185 		WREG32(RADEON_PALETTE_30_DATA,
186 			     (radeon_crtc->lut_r[i] << 20) |
187 			     (radeon_crtc->lut_g[i] << 10) |
188 			     (radeon_crtc->lut_b[i] << 0));
189 	}
190 }
191 
192 void radeon_crtc_load_lut(struct drm_crtc *crtc)
193 {
194 	struct drm_device *dev = crtc->dev;
195 	struct radeon_device *rdev = dev->dev_private;
196 
197 	if (!crtc->enabled)
198 		return;
199 
200 	if (ASIC_IS_DCE5(rdev))
201 		dce5_crtc_load_lut(crtc);
202 	else if (ASIC_IS_DCE4(rdev))
203 		dce4_crtc_load_lut(crtc);
204 	else if (ASIC_IS_AVIVO(rdev))
205 		avivo_crtc_load_lut(crtc);
206 	else
207 		legacy_crtc_load_lut(crtc);
208 }
209 
210 /** Sets the color ramps on behalf of fbcon */
211 void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
212 			      u16 blue, int regno)
213 {
214 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
215 
216 	radeon_crtc->lut_r[regno] = red >> 6;
217 	radeon_crtc->lut_g[regno] = green >> 6;
218 	radeon_crtc->lut_b[regno] = blue >> 6;
219 }
220 
221 /** Gets the color ramps on behalf of fbcon */
222 void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
223 			      u16 *blue, int regno)
224 {
225 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
226 
227 	*red = radeon_crtc->lut_r[regno] << 6;
228 	*green = radeon_crtc->lut_g[regno] << 6;
229 	*blue = radeon_crtc->lut_b[regno] << 6;
230 }
231 
232 static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
233 				  u16 *blue, uint32_t start, uint32_t size)
234 {
235 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
236 	int end = (start + size > 256) ? 256 : start + size, i;
237 
238 	/* userspace palettes are always correct as is */
239 	for (i = start; i < end; i++) {
240 		radeon_crtc->lut_r[i] = red[i] >> 6;
241 		radeon_crtc->lut_g[i] = green[i] >> 6;
242 		radeon_crtc->lut_b[i] = blue[i] >> 6;
243 	}
244 	radeon_crtc_load_lut(crtc);
245 }
246 
247 static void radeon_crtc_destroy(struct drm_crtc *crtc)
248 {
249 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
250 
251 	drm_crtc_cleanup(crtc);
252 	destroy_workqueue(radeon_crtc->flip_queue);
253 	kfree(radeon_crtc);
254 }
255 
256 /**
257  * radeon_unpin_work_func - unpin old buffer object
258  *
259  * @__work - kernel work item
260  *
261  * Unpin the old frame buffer object outside of the interrupt handler
262  */
263 static void radeon_unpin_work_func(struct work_struct *__work)
264 {
265 	struct radeon_flip_work *work =
266 		container_of(__work, struct radeon_flip_work, unpin_work);
267 	int r;
268 
269 	/* unpin of the old buffer */
270 	r = radeon_bo_reserve(work->old_rbo, false);
271 	if (likely(r == 0)) {
272 		r = radeon_bo_unpin(work->old_rbo);
273 		if (unlikely(r != 0)) {
274 			DRM_ERROR("failed to unpin buffer after flip\n");
275 		}
276 		radeon_bo_unreserve(work->old_rbo);
277 	} else
278 		DRM_ERROR("failed to reserve buffer after flip\n");
279 
280 	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
281 	kfree(work);
282 }
283 
284 void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
285 {
286 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
287 	struct radeon_flip_work *work;
288 	unsigned long flags;
289 	u32 update_pending;
290 	int vpos, hpos;
291 
292 	/* can happen during initialization */
293 	if (radeon_crtc == NULL)
294 		return;
295 
296 	spin_lock_irqsave(&rdev->ddev->event_lock, flags);
297 	work = radeon_crtc->flip_work;
298 	if (work == NULL) {
299 		spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
300 		return;
301 	}
302 
303 	update_pending = radeon_page_flip_pending(rdev, crtc_id);
304 
305 	/* Has the pageflip already completed in crtc, or is it certain
306 	 * to complete in this vblank?
307 	 */
308 	if (update_pending &&
309 	    (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0,
310 							       &vpos, &hpos, NULL, NULL)) &&
311 	    ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
312 	     (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
313 		/* crtc didn't flip in this target vblank interval,
314 		 * but flip is pending in crtc. Based on the current
315 		 * scanout position we know that the current frame is
316 		 * (nearly) complete and the flip will (likely)
317 		 * complete before the start of the next frame.
318 		 */
319 		update_pending = 0;
320 	}
321 	spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
322 	if (!update_pending)
323 		radeon_crtc_handle_flip(rdev, crtc_id);
324 }
325 
326 /**
327  * radeon_crtc_handle_flip - page flip completed
328  *
329  * @rdev: radeon device pointer
330  * @crtc_id: crtc number this event is for
331  *
332  * Called when we are sure that a page flip for this crtc is completed.
333  */
334 void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
335 {
336 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
337 	struct radeon_flip_work *work;
338 	unsigned long flags;
339 
340 	/* this can happen at init */
341 	if (radeon_crtc == NULL)
342 		return;
343 
344 	spin_lock_irqsave(&rdev->ddev->event_lock, flags);
345 	work = radeon_crtc->flip_work;
346 	if (work == NULL) {
347 		spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
348 		return;
349 	}
350 
351 	/* Pageflip completed. Clean up. */
352 	radeon_crtc->flip_work = NULL;
353 
354 	/* wakeup userspace */
355 	if (work->event)
356 		drm_send_vblank_event(rdev->ddev, crtc_id, work->event);
357 
358 	spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
359 
360 	radeon_fence_unref(&work->fence);
361 	radeon_irq_kms_pflip_irq_get(rdev, work->crtc_id);
362 	queue_work(radeon_crtc->flip_queue, &work->unpin_work);
363 }
364 
365 /**
366  * radeon_flip_work_func - page flip framebuffer
367  *
368  * @work - kernel work item
369  *
370  * Wait for the buffer object to become idle and do the actual page flip
371  */
372 static void radeon_flip_work_func(struct work_struct *__work)
373 {
374 	struct radeon_flip_work *work =
375 		container_of(__work, struct radeon_flip_work, flip_work);
376 	struct radeon_device *rdev = work->rdev;
377 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
378 
379 	struct drm_crtc *crtc = &radeon_crtc->base;
380 	struct drm_framebuffer *fb = work->fb;
381 
382 	uint32_t tiling_flags, pitch_pixels;
383 	uint64_t base;
384 
385 	unsigned long flags;
386 	int r;
387 
388         down_read(&rdev->exclusive_lock);
389 	while (work->fence) {
390 		r = radeon_fence_wait(work->fence, false);
391 		if (r == -EDEADLK) {
392 			up_read(&rdev->exclusive_lock);
393 			r = radeon_gpu_reset(rdev);
394 			down_read(&rdev->exclusive_lock);
395 		}
396 
397 		if (r) {
398 			DRM_ERROR("failed to wait on page flip fence (%d)!\n",
399 				  r);
400 			goto cleanup;
401 		} else
402 			radeon_fence_unref(&work->fence);
403 	}
404 
405 	/* pin the new buffer */
406 	DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
407 			 work->old_rbo, work->new_rbo);
408 
409 	r = radeon_bo_reserve(work->new_rbo, false);
410 	if (unlikely(r != 0)) {
411 		DRM_ERROR("failed to reserve new rbo buffer before flip\n");
412 		goto cleanup;
413 	}
414 	/* Only 27 bit offset for legacy CRTC */
415 	r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM,
416 				     ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
417 	if (unlikely(r != 0)) {
418 		radeon_bo_unreserve(work->new_rbo);
419 		r = -EINVAL;
420 		DRM_ERROR("failed to pin new rbo buffer before flip\n");
421 		goto cleanup;
422 	}
423 	radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL);
424 	radeon_bo_unreserve(work->new_rbo);
425 
426 	if (!ASIC_IS_AVIVO(rdev)) {
427 		/* crtc offset is from display base addr not FB location */
428 		base -= radeon_crtc->legacy_display_base_addr;
429 		pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8);
430 
431 		if (tiling_flags & RADEON_TILING_MACRO) {
432 			if (ASIC_IS_R300(rdev)) {
433 				base &= ~0x7ff;
434 			} else {
435 				int byteshift = fb->bits_per_pixel >> 4;
436 				int tile_addr = (((crtc->y >> 3) * pitch_pixels +  crtc->x) >> (8 - byteshift)) << 11;
437 				base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
438 			}
439 		} else {
440 			int offset = crtc->y * pitch_pixels + crtc->x;
441 			switch (fb->bits_per_pixel) {
442 			case 8:
443 			default:
444 				offset *= 1;
445 				break;
446 			case 15:
447 			case 16:
448 				offset *= 2;
449 				break;
450 			case 24:
451 				offset *= 3;
452 				break;
453 			case 32:
454 				offset *= 4;
455 				break;
456 			}
457 			base += offset;
458 		}
459 		base &= ~7;
460 	}
461 
462 	/* We borrow the event spin lock for protecting flip_work */
463 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
464 
465 	/* set the proper interrupt */
466 	radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
467 
468 	/* do the flip (mmio) */
469 	radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
470 
471 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
472 	up_read(&rdev->exclusive_lock);
473 
474 	return;
475 
476 cleanup:
477 	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
478 	radeon_fence_unref(&work->fence);
479 	kfree(work);
480 	up_read(&rdev->exclusive_lock);
481 }
482 
483 static int radeon_crtc_page_flip(struct drm_crtc *crtc,
484 				 struct drm_framebuffer *fb,
485 				 struct drm_pending_vblank_event *event,
486 				 uint32_t page_flip_flags)
487 {
488 	struct drm_device *dev = crtc->dev;
489 	struct radeon_device *rdev = dev->dev_private;
490 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
491 	struct radeon_framebuffer *old_radeon_fb;
492 	struct radeon_framebuffer *new_radeon_fb;
493 	struct drm_gem_object *obj;
494 	struct radeon_flip_work *work;
495 	unsigned long flags;
496 
497 	work = kzalloc(sizeof *work, GFP_KERNEL);
498 	if (work == NULL)
499 		return -ENOMEM;
500 
501 	INIT_WORK(&work->flip_work, radeon_flip_work_func);
502 	INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
503 
504 	work->rdev = rdev;
505 	work->crtc_id = radeon_crtc->crtc_id;
506 	work->fb = fb;
507 	work->event = event;
508 
509 	/* schedule unpin of the old buffer */
510 	old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
511 	obj = old_radeon_fb->obj;
512 
513 	/* take a reference to the old object */
514 	drm_gem_object_reference(obj);
515 	work->old_rbo = gem_to_radeon_bo(obj);
516 
517 	new_radeon_fb = to_radeon_framebuffer(fb);
518 	obj = new_radeon_fb->obj;
519 	work->new_rbo = gem_to_radeon_bo(obj);
520 
521 	spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
522 	if (work->new_rbo->tbo.sync_obj)
523 		work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
524 	spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
525 
526 	/* We borrow the event spin lock for protecting flip_work */
527 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
528 
529 	if (radeon_crtc->flip_work) {
530 		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
531 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
532 		drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
533 		radeon_fence_unref(&work->fence);
534 		kfree(work);
535 		return -EBUSY;
536 	}
537 	radeon_crtc->flip_work = work;
538 
539 	/* update crtc fb */
540 	crtc->primary->fb = fb;
541 
542 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
543 
544 	queue_work(radeon_crtc->flip_queue, &work->flip_work);
545 
546 	return 0;
547 }
548 
549 static int
550 radeon_crtc_set_config(struct drm_mode_set *set)
551 {
552 	struct drm_device *dev;
553 	struct radeon_device *rdev;
554 	struct drm_crtc *crtc;
555 	bool active = false;
556 	int ret;
557 
558 	if (!set || !set->crtc)
559 		return -EINVAL;
560 
561 	dev = set->crtc->dev;
562 
563 	ret = pm_runtime_get_sync(dev->dev);
564 	if (ret < 0)
565 		return ret;
566 
567 	ret = drm_crtc_helper_set_config(set);
568 
569 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
570 		if (crtc->enabled)
571 			active = true;
572 
573 	pm_runtime_mark_last_busy(dev->dev);
574 
575 	rdev = dev->dev_private;
576 	/* if we have active crtcs and we don't have a power ref,
577 	   take the current one */
578 	if (active && !rdev->have_disp_power_ref) {
579 		rdev->have_disp_power_ref = true;
580 		return ret;
581 	}
582 	/* if we have no active crtcs, then drop the power ref
583 	   we got before */
584 	if (!active && rdev->have_disp_power_ref) {
585 		pm_runtime_put_autosuspend(dev->dev);
586 		rdev->have_disp_power_ref = false;
587 	}
588 
589 	/* drop the power reference we got coming in here */
590 	pm_runtime_put_autosuspend(dev->dev);
591 	return ret;
592 }
593 static const struct drm_crtc_funcs radeon_crtc_funcs = {
594 	.cursor_set = radeon_crtc_cursor_set,
595 	.cursor_move = radeon_crtc_cursor_move,
596 	.gamma_set = radeon_crtc_gamma_set,
597 	.set_config = radeon_crtc_set_config,
598 	.destroy = radeon_crtc_destroy,
599 	.page_flip = radeon_crtc_page_flip,
600 };
601 
602 static void radeon_crtc_init(struct drm_device *dev, int index)
603 {
604 	struct radeon_device *rdev = dev->dev_private;
605 	struct radeon_crtc *radeon_crtc;
606 	int i;
607 
608 	radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
609 	if (radeon_crtc == NULL)
610 		return;
611 
612 	drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
613 
614 	drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
615 	radeon_crtc->crtc_id = index;
616 	radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc");
617 	rdev->mode_info.crtcs[index] = radeon_crtc;
618 
619 	if (rdev->family >= CHIP_BONAIRE) {
620 		radeon_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
621 		radeon_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
622 	} else {
623 		radeon_crtc->max_cursor_width = CURSOR_WIDTH;
624 		radeon_crtc->max_cursor_height = CURSOR_HEIGHT;
625 	}
626 	dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
627 	dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
628 
629 #if 0
630 	radeon_crtc->mode_set.crtc = &radeon_crtc->base;
631 	radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
632 	radeon_crtc->mode_set.num_connectors = 0;
633 #endif
634 
635 	for (i = 0; i < 256; i++) {
636 		radeon_crtc->lut_r[i] = i << 2;
637 		radeon_crtc->lut_g[i] = i << 2;
638 		radeon_crtc->lut_b[i] = i << 2;
639 	}
640 
641 	if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
642 		radeon_atombios_init_crtc(dev, radeon_crtc);
643 	else
644 		radeon_legacy_init_crtc(dev, radeon_crtc);
645 }
646 
647 static const char *encoder_names[38] = {
648 	"NONE",
649 	"INTERNAL_LVDS",
650 	"INTERNAL_TMDS1",
651 	"INTERNAL_TMDS2",
652 	"INTERNAL_DAC1",
653 	"INTERNAL_DAC2",
654 	"INTERNAL_SDVOA",
655 	"INTERNAL_SDVOB",
656 	"SI170B",
657 	"CH7303",
658 	"CH7301",
659 	"INTERNAL_DVO1",
660 	"EXTERNAL_SDVOA",
661 	"EXTERNAL_SDVOB",
662 	"TITFP513",
663 	"INTERNAL_LVTM1",
664 	"VT1623",
665 	"HDMI_SI1930",
666 	"HDMI_INTERNAL",
667 	"INTERNAL_KLDSCP_TMDS1",
668 	"INTERNAL_KLDSCP_DVO1",
669 	"INTERNAL_KLDSCP_DAC1",
670 	"INTERNAL_KLDSCP_DAC2",
671 	"SI178",
672 	"MVPU_FPGA",
673 	"INTERNAL_DDI",
674 	"VT1625",
675 	"HDMI_SI1932",
676 	"DP_AN9801",
677 	"DP_DP501",
678 	"INTERNAL_UNIPHY",
679 	"INTERNAL_KLDSCP_LVTMA",
680 	"INTERNAL_UNIPHY1",
681 	"INTERNAL_UNIPHY2",
682 	"NUTMEG",
683 	"TRAVIS",
684 	"INTERNAL_VCE",
685 	"INTERNAL_UNIPHY3",
686 };
687 
688 static const char *hpd_names[6] = {
689 	"HPD1",
690 	"HPD2",
691 	"HPD3",
692 	"HPD4",
693 	"HPD5",
694 	"HPD6",
695 };
696 
697 static void radeon_print_display_setup(struct drm_device *dev)
698 {
699 	struct drm_connector *connector;
700 	struct radeon_connector *radeon_connector;
701 	struct drm_encoder *encoder;
702 	struct radeon_encoder *radeon_encoder;
703 	uint32_t devices;
704 	int i = 0;
705 
706 	DRM_INFO("Radeon Display Connectors\n");
707 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
708 		radeon_connector = to_radeon_connector(connector);
709 		DRM_INFO("Connector %d:\n", i);
710 		DRM_INFO("  %s\n", connector->name);
711 		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
712 			DRM_INFO("  %s\n", hpd_names[radeon_connector->hpd.hpd]);
713 		if (radeon_connector->ddc_bus) {
714 			DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
715 				 radeon_connector->ddc_bus->rec.mask_clk_reg,
716 				 radeon_connector->ddc_bus->rec.mask_data_reg,
717 				 radeon_connector->ddc_bus->rec.a_clk_reg,
718 				 radeon_connector->ddc_bus->rec.a_data_reg,
719 				 radeon_connector->ddc_bus->rec.en_clk_reg,
720 				 radeon_connector->ddc_bus->rec.en_data_reg,
721 				 radeon_connector->ddc_bus->rec.y_clk_reg,
722 				 radeon_connector->ddc_bus->rec.y_data_reg);
723 			if (radeon_connector->router.ddc_valid)
724 				DRM_INFO("  DDC Router 0x%x/0x%x\n",
725 					 radeon_connector->router.ddc_mux_control_pin,
726 					 radeon_connector->router.ddc_mux_state);
727 			if (radeon_connector->router.cd_valid)
728 				DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
729 					 radeon_connector->router.cd_mux_control_pin,
730 					 radeon_connector->router.cd_mux_state);
731 		} else {
732 			if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
733 			    connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
734 			    connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
735 			    connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
736 			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
737 			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
738 				DRM_INFO("  DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
739 		}
740 		DRM_INFO("  Encoders:\n");
741 		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
742 			radeon_encoder = to_radeon_encoder(encoder);
743 			devices = radeon_encoder->devices & radeon_connector->devices;
744 			if (devices) {
745 				if (devices & ATOM_DEVICE_CRT1_SUPPORT)
746 					DRM_INFO("    CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]);
747 				if (devices & ATOM_DEVICE_CRT2_SUPPORT)
748 					DRM_INFO("    CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]);
749 				if (devices & ATOM_DEVICE_LCD1_SUPPORT)
750 					DRM_INFO("    LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]);
751 				if (devices & ATOM_DEVICE_DFP1_SUPPORT)
752 					DRM_INFO("    DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]);
753 				if (devices & ATOM_DEVICE_DFP2_SUPPORT)
754 					DRM_INFO("    DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]);
755 				if (devices & ATOM_DEVICE_DFP3_SUPPORT)
756 					DRM_INFO("    DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]);
757 				if (devices & ATOM_DEVICE_DFP4_SUPPORT)
758 					DRM_INFO("    DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
759 				if (devices & ATOM_DEVICE_DFP5_SUPPORT)
760 					DRM_INFO("    DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
761 				if (devices & ATOM_DEVICE_DFP6_SUPPORT)
762 					DRM_INFO("    DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
763 				if (devices & ATOM_DEVICE_TV1_SUPPORT)
764 					DRM_INFO("    TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
765 				if (devices & ATOM_DEVICE_CV_SUPPORT)
766 					DRM_INFO("    CV: %s\n", encoder_names[radeon_encoder->encoder_id]);
767 			}
768 		}
769 		i++;
770 	}
771 }
772 
773 static bool radeon_setup_enc_conn(struct drm_device *dev)
774 {
775 	struct radeon_device *rdev = dev->dev_private;
776 	bool ret = false;
777 
778 	if (rdev->bios) {
779 		if (rdev->is_atom_bios) {
780 			ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
781 			if (ret == false)
782 				ret = radeon_get_atom_connector_info_from_object_table(dev);
783 		} else {
784 			ret = radeon_get_legacy_connector_info_from_bios(dev);
785 			if (ret == false)
786 				ret = radeon_get_legacy_connector_info_from_table(dev);
787 		}
788 	} else {
789 		if (!ASIC_IS_AVIVO(rdev))
790 			ret = radeon_get_legacy_connector_info_from_table(dev);
791 	}
792 	if (ret) {
793 		radeon_setup_encoder_clones(dev);
794 		radeon_print_display_setup(dev);
795 	}
796 
797 	return ret;
798 }
799 
800 int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
801 {
802 	struct drm_device *dev = radeon_connector->base.dev;
803 	struct radeon_device *rdev = dev->dev_private;
804 	int ret = 0;
805 
806 	/* on hw with routers, select right port */
807 	if (radeon_connector->router.ddc_valid)
808 		radeon_router_select_ddc_port(radeon_connector);
809 
810 	if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
811 	    ENCODER_OBJECT_ID_NONE) {
812 		if (radeon_connector->ddc_bus->has_aux)
813 			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
814 							      &radeon_connector->ddc_bus->aux.ddc);
815 	} else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
816 		   (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
817 		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
818 
819 		if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
820 		     dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
821 		    radeon_connector->ddc_bus->has_aux)
822 			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
823 							      &radeon_connector->ddc_bus->aux.ddc);
824 		else if (radeon_connector->ddc_bus && !radeon_connector->edid)
825 			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
826 							      &radeon_connector->ddc_bus->adapter);
827 	} else {
828 		if (radeon_connector->ddc_bus && !radeon_connector->edid)
829 			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
830 							      &radeon_connector->ddc_bus->adapter);
831 	}
832 
833 	if (!radeon_connector->edid) {
834 		if (rdev->is_atom_bios) {
835 			/* some laptops provide a hardcoded edid in rom for LCDs */
836 			if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
837 			     (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
838 				radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
839 		} else
840 			/* some servers provide a hardcoded edid in rom for KVMs */
841 			radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
842 	}
843 	if (radeon_connector->edid) {
844 		drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
845 		ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
846 		drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
847 		return ret;
848 	}
849 	drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
850 	return 0;
851 }
852 
853 /* avivo */
854 
855 /**
856  * avivo_reduce_ratio - fractional number reduction
857  *
858  * @nom: nominator
859  * @den: denominator
860  * @nom_min: minimum value for nominator
861  * @den_min: minimum value for denominator
862  *
863  * Find the greatest common divisor and apply it on both nominator and
864  * denominator, but make nominator and denominator are at least as large
865  * as their minimum values.
866  */
867 static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
868 			       unsigned nom_min, unsigned den_min)
869 {
870 	unsigned tmp;
871 
872 	/* reduce the numbers to a simpler ratio */
873 	tmp = gcd(*nom, *den);
874 	*nom /= tmp;
875 	*den /= tmp;
876 
877 	/* make sure nominator is large enough */
878         if (*nom < nom_min) {
879 		tmp = DIV_ROUND_UP(nom_min, *nom);
880 		*nom *= tmp;
881 		*den *= tmp;
882 	}
883 
884 	/* make sure the denominator is large enough */
885 	if (*den < den_min) {
886 		tmp = DIV_ROUND_UP(den_min, *den);
887 		*nom *= tmp;
888 		*den *= tmp;
889 	}
890 }
891 
892 /**
893  * avivo_get_fb_ref_div - feedback and ref divider calculation
894  *
895  * @nom: nominator
896  * @den: denominator
897  * @post_div: post divider
898  * @fb_div_max: feedback divider maximum
899  * @ref_div_max: reference divider maximum
900  * @fb_div: resulting feedback divider
901  * @ref_div: resulting reference divider
902  *
903  * Calculate feedback and reference divider for a given post divider. Makes
904  * sure we stay within the limits.
905  */
906 static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
907 				 unsigned fb_div_max, unsigned ref_div_max,
908 				 unsigned *fb_div, unsigned *ref_div)
909 {
910 	/* limit reference * post divider to a maximum */
911 	ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
912 
913 	/* get matching reference and feedback divider */
914 	*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
915 	*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
916 
917 	/* limit fb divider to its maximum */
918         if (*fb_div > fb_div_max) {
919 		*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
920 		*fb_div = fb_div_max;
921 	}
922 }
923 
924 /**
925  * radeon_compute_pll_avivo - compute PLL paramaters
926  *
927  * @pll: information about the PLL
928  * @dot_clock_p: resulting pixel clock
929  * fb_div_p: resulting feedback divider
930  * frac_fb_div_p: fractional part of the feedback divider
931  * ref_div_p: resulting reference divider
932  * post_div_p: resulting reference divider
933  *
934  * Try to calculate the PLL parameters to generate the given frequency:
935  * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
936  */
937 void radeon_compute_pll_avivo(struct radeon_pll *pll,
938 			      u32 freq,
939 			      u32 *dot_clock_p,
940 			      u32 *fb_div_p,
941 			      u32 *frac_fb_div_p,
942 			      u32 *ref_div_p,
943 			      u32 *post_div_p)
944 {
945 	unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ?
946 		freq : freq / 10;
947 
948 	unsigned fb_div_min, fb_div_max, fb_div;
949 	unsigned post_div_min, post_div_max, post_div;
950 	unsigned ref_div_min, ref_div_max, ref_div;
951 	unsigned post_div_best, diff_best;
952 	unsigned nom, den;
953 
954 	/* determine allowed feedback divider range */
955 	fb_div_min = pll->min_feedback_div;
956 	fb_div_max = pll->max_feedback_div;
957 
958 	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
959 		fb_div_min *= 10;
960 		fb_div_max *= 10;
961 	}
962 
963 	/* determine allowed ref divider range */
964 	if (pll->flags & RADEON_PLL_USE_REF_DIV)
965 		ref_div_min = pll->reference_div;
966 	else
967 		ref_div_min = pll->min_ref_div;
968 
969 	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
970 	    pll->flags & RADEON_PLL_USE_REF_DIV)
971 		ref_div_max = pll->reference_div;
972 	else
973 		ref_div_max = pll->max_ref_div;
974 
975 	/* determine allowed post divider range */
976 	if (pll->flags & RADEON_PLL_USE_POST_DIV) {
977 		post_div_min = pll->post_div;
978 		post_div_max = pll->post_div;
979 	} else {
980 		unsigned vco_min, vco_max;
981 
982 		if (pll->flags & RADEON_PLL_IS_LCD) {
983 			vco_min = pll->lcd_pll_out_min;
984 			vco_max = pll->lcd_pll_out_max;
985 		} else {
986 			vco_min = pll->pll_out_min;
987 			vco_max = pll->pll_out_max;
988 		}
989 
990 		if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
991 			vco_min *= 10;
992 			vco_max *= 10;
993 		}
994 
995 		post_div_min = vco_min / target_clock;
996 		if ((target_clock * post_div_min) < vco_min)
997 			++post_div_min;
998 		if (post_div_min < pll->min_post_div)
999 			post_div_min = pll->min_post_div;
1000 
1001 		post_div_max = vco_max / target_clock;
1002 		if ((target_clock * post_div_max) > vco_max)
1003 			--post_div_max;
1004 		if (post_div_max > pll->max_post_div)
1005 			post_div_max = pll->max_post_div;
1006 	}
1007 
1008 	/* represent the searched ratio as fractional number */
1009 	nom = target_clock;
1010 	den = pll->reference_freq;
1011 
1012 	/* reduce the numbers to a simpler ratio */
1013 	avivo_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
1014 
1015 	/* now search for a post divider */
1016 	if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
1017 		post_div_best = post_div_min;
1018 	else
1019 		post_div_best = post_div_max;
1020 	diff_best = ~0;
1021 
1022 	for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
1023 		unsigned diff;
1024 		avivo_get_fb_ref_div(nom, den, post_div, fb_div_max,
1025 				     ref_div_max, &fb_div, &ref_div);
1026 		diff = abs(target_clock - (pll->reference_freq * fb_div) /
1027 			(ref_div * post_div));
1028 
1029 		if (diff < diff_best || (diff == diff_best &&
1030 		    !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) {
1031 
1032 			post_div_best = post_div;
1033 			diff_best = diff;
1034 		}
1035 	}
1036 	post_div = post_div_best;
1037 
1038 	/* get the feedback and reference divider for the optimal value */
1039 	avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
1040 			     &fb_div, &ref_div);
1041 
1042 	/* reduce the numbers to a simpler ratio once more */
1043 	/* this also makes sure that the reference divider is large enough */
1044 	avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
1045 
1046 	/* avoid high jitter with small fractional dividers */
1047 	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
1048 		fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
1049 		if (fb_div < fb_div_min) {
1050 			unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
1051 			fb_div *= tmp;
1052 			ref_div *= tmp;
1053 		}
1054 	}
1055 
1056 	/* and finally save the result */
1057 	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
1058 		*fb_div_p = fb_div / 10;
1059 		*frac_fb_div_p = fb_div % 10;
1060 	} else {
1061 		*fb_div_p = fb_div;
1062 		*frac_fb_div_p = 0;
1063 	}
1064 
1065 	*dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
1066 			(pll->reference_freq * *frac_fb_div_p)) /
1067 		       (ref_div * post_div * 10);
1068 	*ref_div_p = ref_div;
1069 	*post_div_p = post_div;
1070 
1071 	DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
1072 		      freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
1073 		      ref_div, post_div);
1074 }
1075 
1076 /* pre-avivo */
1077 static inline uint32_t radeon_div(uint64_t n, uint32_t d)
1078 {
1079 	uint64_t mod;
1080 
1081 	n += d / 2;
1082 
1083 	mod = do_div(n, d);
1084 	return n;
1085 }
1086 
1087 void radeon_compute_pll_legacy(struct radeon_pll *pll,
1088 			       uint64_t freq,
1089 			       uint32_t *dot_clock_p,
1090 			       uint32_t *fb_div_p,
1091 			       uint32_t *frac_fb_div_p,
1092 			       uint32_t *ref_div_p,
1093 			       uint32_t *post_div_p)
1094 {
1095 	uint32_t min_ref_div = pll->min_ref_div;
1096 	uint32_t max_ref_div = pll->max_ref_div;
1097 	uint32_t min_post_div = pll->min_post_div;
1098 	uint32_t max_post_div = pll->max_post_div;
1099 	uint32_t min_fractional_feed_div = 0;
1100 	uint32_t max_fractional_feed_div = 0;
1101 	uint32_t best_vco = pll->best_vco;
1102 	uint32_t best_post_div = 1;
1103 	uint32_t best_ref_div = 1;
1104 	uint32_t best_feedback_div = 1;
1105 	uint32_t best_frac_feedback_div = 0;
1106 	uint32_t best_freq = -1;
1107 	uint32_t best_error = 0xffffffff;
1108 	uint32_t best_vco_diff = 1;
1109 	uint32_t post_div;
1110 	u32 pll_out_min, pll_out_max;
1111 
1112 	DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
1113 	freq = freq * 1000;
1114 
1115 	if (pll->flags & RADEON_PLL_IS_LCD) {
1116 		pll_out_min = pll->lcd_pll_out_min;
1117 		pll_out_max = pll->lcd_pll_out_max;
1118 	} else {
1119 		pll_out_min = pll->pll_out_min;
1120 		pll_out_max = pll->pll_out_max;
1121 	}
1122 
1123 	if (pll_out_min > 64800)
1124 		pll_out_min = 64800;
1125 
1126 	if (pll->flags & RADEON_PLL_USE_REF_DIV)
1127 		min_ref_div = max_ref_div = pll->reference_div;
1128 	else {
1129 		while (min_ref_div < max_ref_div-1) {
1130 			uint32_t mid = (min_ref_div + max_ref_div) / 2;
1131 			uint32_t pll_in = pll->reference_freq / mid;
1132 			if (pll_in < pll->pll_in_min)
1133 				max_ref_div = mid;
1134 			else if (pll_in > pll->pll_in_max)
1135 				min_ref_div = mid;
1136 			else
1137 				break;
1138 		}
1139 	}
1140 
1141 	if (pll->flags & RADEON_PLL_USE_POST_DIV)
1142 		min_post_div = max_post_div = pll->post_div;
1143 
1144 	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
1145 		min_fractional_feed_div = pll->min_frac_feedback_div;
1146 		max_fractional_feed_div = pll->max_frac_feedback_div;
1147 	}
1148 
1149 	for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
1150 		uint32_t ref_div;
1151 
1152 		if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
1153 			continue;
1154 
1155 		/* legacy radeons only have a few post_divs */
1156 		if (pll->flags & RADEON_PLL_LEGACY) {
1157 			if ((post_div == 5) ||
1158 			    (post_div == 7) ||
1159 			    (post_div == 9) ||
1160 			    (post_div == 10) ||
1161 			    (post_div == 11) ||
1162 			    (post_div == 13) ||
1163 			    (post_div == 14) ||
1164 			    (post_div == 15))
1165 				continue;
1166 		}
1167 
1168 		for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) {
1169 			uint32_t feedback_div, current_freq = 0, error, vco_diff;
1170 			uint32_t pll_in = pll->reference_freq / ref_div;
1171 			uint32_t min_feed_div = pll->min_feedback_div;
1172 			uint32_t max_feed_div = pll->max_feedback_div + 1;
1173 
1174 			if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max)
1175 				continue;
1176 
1177 			while (min_feed_div < max_feed_div) {
1178 				uint32_t vco;
1179 				uint32_t min_frac_feed_div = min_fractional_feed_div;
1180 				uint32_t max_frac_feed_div = max_fractional_feed_div + 1;
1181 				uint32_t frac_feedback_div;
1182 				uint64_t tmp;
1183 
1184 				feedback_div = (min_feed_div + max_feed_div) / 2;
1185 
1186 				tmp = (uint64_t)pll->reference_freq * feedback_div;
1187 				vco = radeon_div(tmp, ref_div);
1188 
1189 				if (vco < pll_out_min) {
1190 					min_feed_div = feedback_div + 1;
1191 					continue;
1192 				} else if (vco > pll_out_max) {
1193 					max_feed_div = feedback_div;
1194 					continue;
1195 				}
1196 
1197 				while (min_frac_feed_div < max_frac_feed_div) {
1198 					frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2;
1199 					tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div;
1200 					tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
1201 					current_freq = radeon_div(tmp, ref_div * post_div);
1202 
1203 					if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
1204 						if (freq < current_freq)
1205 							error = 0xffffffff;
1206 						else
1207 							error = freq - current_freq;
1208 					} else
1209 						error = abs(current_freq - freq);
1210 					vco_diff = abs(vco - best_vco);
1211 
1212 					if ((best_vco == 0 && error < best_error) ||
1213 					    (best_vco != 0 &&
1214 					     ((best_error > 100 && error < best_error - 100) ||
1215 					      (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
1216 						best_post_div = post_div;
1217 						best_ref_div = ref_div;
1218 						best_feedback_div = feedback_div;
1219 						best_frac_feedback_div = frac_feedback_div;
1220 						best_freq = current_freq;
1221 						best_error = error;
1222 						best_vco_diff = vco_diff;
1223 					} else if (current_freq == freq) {
1224 						if (best_freq == -1) {
1225 							best_post_div = post_div;
1226 							best_ref_div = ref_div;
1227 							best_feedback_div = feedback_div;
1228 							best_frac_feedback_div = frac_feedback_div;
1229 							best_freq = current_freq;
1230 							best_error = error;
1231 							best_vco_diff = vco_diff;
1232 						} else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
1233 							   ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
1234 							   ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
1235 							   ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
1236 							   ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
1237 							   ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
1238 							best_post_div = post_div;
1239 							best_ref_div = ref_div;
1240 							best_feedback_div = feedback_div;
1241 							best_frac_feedback_div = frac_feedback_div;
1242 							best_freq = current_freq;
1243 							best_error = error;
1244 							best_vco_diff = vco_diff;
1245 						}
1246 					}
1247 					if (current_freq < freq)
1248 						min_frac_feed_div = frac_feedback_div + 1;
1249 					else
1250 						max_frac_feed_div = frac_feedback_div;
1251 				}
1252 				if (current_freq < freq)
1253 					min_feed_div = feedback_div + 1;
1254 				else
1255 					max_feed_div = feedback_div;
1256 			}
1257 		}
1258 	}
1259 
1260 	*dot_clock_p = best_freq / 10000;
1261 	*fb_div_p = best_feedback_div;
1262 	*frac_fb_div_p = best_frac_feedback_div;
1263 	*ref_div_p = best_ref_div;
1264 	*post_div_p = best_post_div;
1265 	DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
1266 		      (long long)freq,
1267 		      best_freq / 1000, best_feedback_div, best_frac_feedback_div,
1268 		      best_ref_div, best_post_div);
1269 
1270 }
1271 
1272 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
1273 {
1274 	struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
1275 
1276 	if (radeon_fb->obj) {
1277 		drm_gem_object_unreference_unlocked(radeon_fb->obj);
1278 	}
1279 	drm_framebuffer_cleanup(fb);
1280 	kfree(radeon_fb);
1281 }
1282 
1283 static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
1284 						  struct drm_file *file_priv,
1285 						  unsigned int *handle)
1286 {
1287 	struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
1288 
1289 	return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
1290 }
1291 
1292 static const struct drm_framebuffer_funcs radeon_fb_funcs = {
1293 	.destroy = radeon_user_framebuffer_destroy,
1294 	.create_handle = radeon_user_framebuffer_create_handle,
1295 };
1296 
1297 int
1298 radeon_framebuffer_init(struct drm_device *dev,
1299 			struct radeon_framebuffer *rfb,
1300 			struct drm_mode_fb_cmd2 *mode_cmd,
1301 			struct drm_gem_object *obj)
1302 {
1303 	int ret;
1304 	rfb->obj = obj;
1305 	drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
1306 	ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
1307 	if (ret) {
1308 		rfb->obj = NULL;
1309 		return ret;
1310 	}
1311 	return 0;
1312 }
1313 
1314 static struct drm_framebuffer *
1315 radeon_user_framebuffer_create(struct drm_device *dev,
1316 			       struct drm_file *file_priv,
1317 			       struct drm_mode_fb_cmd2 *mode_cmd)
1318 {
1319 	struct drm_gem_object *obj;
1320 	struct radeon_framebuffer *radeon_fb;
1321 	int ret;
1322 
1323 	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
1324 	if (obj ==  NULL) {
1325 		dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
1326 			"can't create framebuffer\n", mode_cmd->handles[0]);
1327 		return ERR_PTR(-ENOENT);
1328 	}
1329 
1330 	radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
1331 	if (radeon_fb == NULL) {
1332 		drm_gem_object_unreference_unlocked(obj);
1333 		return ERR_PTR(-ENOMEM);
1334 	}
1335 
1336 	ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
1337 	if (ret) {
1338 		kfree(radeon_fb);
1339 		drm_gem_object_unreference_unlocked(obj);
1340 		return ERR_PTR(ret);
1341 	}
1342 
1343 	return &radeon_fb->base;
1344 }
1345 
1346 static void radeon_output_poll_changed(struct drm_device *dev)
1347 {
1348 	struct radeon_device *rdev = dev->dev_private;
1349 	radeon_fb_output_poll_changed(rdev);
1350 }
1351 
1352 static const struct drm_mode_config_funcs radeon_mode_funcs = {
1353 	.fb_create = radeon_user_framebuffer_create,
1354 	.output_poll_changed = radeon_output_poll_changed
1355 };
1356 
1357 static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
1358 {	{ 0, "driver" },
1359 	{ 1, "bios" },
1360 };
1361 
1362 static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
1363 {	{ TV_STD_NTSC, "ntsc" },
1364 	{ TV_STD_PAL, "pal" },
1365 	{ TV_STD_PAL_M, "pal-m" },
1366 	{ TV_STD_PAL_60, "pal-60" },
1367 	{ TV_STD_NTSC_J, "ntsc-j" },
1368 	{ TV_STD_SCART_PAL, "scart-pal" },
1369 	{ TV_STD_PAL_CN, "pal-cn" },
1370 	{ TV_STD_SECAM, "secam" },
1371 };
1372 
1373 static struct drm_prop_enum_list radeon_underscan_enum_list[] =
1374 {	{ UNDERSCAN_OFF, "off" },
1375 	{ UNDERSCAN_ON, "on" },
1376 	{ UNDERSCAN_AUTO, "auto" },
1377 };
1378 
1379 static struct drm_prop_enum_list radeon_audio_enum_list[] =
1380 {	{ RADEON_AUDIO_DISABLE, "off" },
1381 	{ RADEON_AUDIO_ENABLE, "on" },
1382 	{ RADEON_AUDIO_AUTO, "auto" },
1383 };
1384 
1385 /* XXX support different dither options? spatial, temporal, both, etc. */
1386 static struct drm_prop_enum_list radeon_dither_enum_list[] =
1387 {	{ RADEON_FMT_DITHER_DISABLE, "off" },
1388 	{ RADEON_FMT_DITHER_ENABLE, "on" },
1389 };
1390 
1391 static int radeon_modeset_create_props(struct radeon_device *rdev)
1392 {
1393 	int sz;
1394 
1395 	if (rdev->is_atom_bios) {
1396 		rdev->mode_info.coherent_mode_property =
1397 			drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1);
1398 		if (!rdev->mode_info.coherent_mode_property)
1399 			return -ENOMEM;
1400 	}
1401 
1402 	if (!ASIC_IS_AVIVO(rdev)) {
1403 		sz = ARRAY_SIZE(radeon_tmds_pll_enum_list);
1404 		rdev->mode_info.tmds_pll_property =
1405 			drm_property_create_enum(rdev->ddev, 0,
1406 					    "tmds_pll",
1407 					    radeon_tmds_pll_enum_list, sz);
1408 	}
1409 
1410 	rdev->mode_info.load_detect_property =
1411 		drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1);
1412 	if (!rdev->mode_info.load_detect_property)
1413 		return -ENOMEM;
1414 
1415 	drm_mode_create_scaling_mode_property(rdev->ddev);
1416 
1417 	sz = ARRAY_SIZE(radeon_tv_std_enum_list);
1418 	rdev->mode_info.tv_std_property =
1419 		drm_property_create_enum(rdev->ddev, 0,
1420 				    "tv standard",
1421 				    radeon_tv_std_enum_list, sz);
1422 
1423 	sz = ARRAY_SIZE(radeon_underscan_enum_list);
1424 	rdev->mode_info.underscan_property =
1425 		drm_property_create_enum(rdev->ddev, 0,
1426 				    "underscan",
1427 				    radeon_underscan_enum_list, sz);
1428 
1429 	rdev->mode_info.underscan_hborder_property =
1430 		drm_property_create_range(rdev->ddev, 0,
1431 					"underscan hborder", 0, 128);
1432 	if (!rdev->mode_info.underscan_hborder_property)
1433 		return -ENOMEM;
1434 
1435 	rdev->mode_info.underscan_vborder_property =
1436 		drm_property_create_range(rdev->ddev, 0,
1437 					"underscan vborder", 0, 128);
1438 	if (!rdev->mode_info.underscan_vborder_property)
1439 		return -ENOMEM;
1440 
1441 	sz = ARRAY_SIZE(radeon_audio_enum_list);
1442 	rdev->mode_info.audio_property =
1443 		drm_property_create_enum(rdev->ddev, 0,
1444 					 "audio",
1445 					 radeon_audio_enum_list, sz);
1446 
1447 	sz = ARRAY_SIZE(radeon_dither_enum_list);
1448 	rdev->mode_info.dither_property =
1449 		drm_property_create_enum(rdev->ddev, 0,
1450 					 "dither",
1451 					 radeon_dither_enum_list, sz);
1452 
1453 	return 0;
1454 }
1455 
1456 void radeon_update_display_priority(struct radeon_device *rdev)
1457 {
1458 	/* adjustment options for the display watermarks */
1459 	if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) {
1460 		/* set display priority to high for r3xx, rv515 chips
1461 		 * this avoids flickering due to underflow to the
1462 		 * display controllers during heavy acceleration.
1463 		 * Don't force high on rs4xx igp chips as it seems to
1464 		 * affect the sound card.  See kernel bug 15982.
1465 		 */
1466 		if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
1467 		    !(rdev->flags & RADEON_IS_IGP))
1468 			rdev->disp_priority = 2;
1469 		else
1470 			rdev->disp_priority = 0;
1471 	} else
1472 		rdev->disp_priority = radeon_disp_priority;
1473 
1474 }
1475 
1476 /*
1477  * Allocate hdmi structs and determine register offsets
1478  */
1479 static void radeon_afmt_init(struct radeon_device *rdev)
1480 {
1481 	int i;
1482 
1483 	for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
1484 		rdev->mode_info.afmt[i] = NULL;
1485 
1486 	if (ASIC_IS_NODCE(rdev)) {
1487 		/* nothing to do */
1488 	} else if (ASIC_IS_DCE4(rdev)) {
1489 		static uint32_t eg_offsets[] = {
1490 			EVERGREEN_CRTC0_REGISTER_OFFSET,
1491 			EVERGREEN_CRTC1_REGISTER_OFFSET,
1492 			EVERGREEN_CRTC2_REGISTER_OFFSET,
1493 			EVERGREEN_CRTC3_REGISTER_OFFSET,
1494 			EVERGREEN_CRTC4_REGISTER_OFFSET,
1495 			EVERGREEN_CRTC5_REGISTER_OFFSET,
1496 			0x13830 - 0x7030,
1497 		};
1498 		int num_afmt;
1499 
1500 		/* DCE8 has 7 audio blocks tied to DIG encoders */
1501 		/* DCE6 has 6 audio blocks tied to DIG encoders */
1502 		/* DCE4/5 has 6 audio blocks tied to DIG encoders */
1503 		/* DCE4.1 has 2 audio blocks tied to DIG encoders */
1504 		if (ASIC_IS_DCE8(rdev))
1505 			num_afmt = 7;
1506 		else if (ASIC_IS_DCE6(rdev))
1507 			num_afmt = 6;
1508 		else if (ASIC_IS_DCE5(rdev))
1509 			num_afmt = 6;
1510 		else if (ASIC_IS_DCE41(rdev))
1511 			num_afmt = 2;
1512 		else /* DCE4 */
1513 			num_afmt = 6;
1514 
1515 		BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets));
1516 		for (i = 0; i < num_afmt; i++) {
1517 			rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1518 			if (rdev->mode_info.afmt[i]) {
1519 				rdev->mode_info.afmt[i]->offset = eg_offsets[i];
1520 				rdev->mode_info.afmt[i]->id = i;
1521 			}
1522 		}
1523 	} else if (ASIC_IS_DCE3(rdev)) {
1524 		/* DCE3.x has 2 audio blocks tied to DIG encoders */
1525 		rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1526 		if (rdev->mode_info.afmt[0]) {
1527 			rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0;
1528 			rdev->mode_info.afmt[0]->id = 0;
1529 		}
1530 		rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1531 		if (rdev->mode_info.afmt[1]) {
1532 			rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1;
1533 			rdev->mode_info.afmt[1]->id = 1;
1534 		}
1535 	} else if (ASIC_IS_DCE2(rdev)) {
1536 		/* DCE2 has at least 1 routable audio block */
1537 		rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1538 		if (rdev->mode_info.afmt[0]) {
1539 			rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0;
1540 			rdev->mode_info.afmt[0]->id = 0;
1541 		}
1542 		/* r6xx has 2 routable audio blocks */
1543 		if (rdev->family >= CHIP_R600) {
1544 			rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1545 			if (rdev->mode_info.afmt[1]) {
1546 				rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1;
1547 				rdev->mode_info.afmt[1]->id = 1;
1548 			}
1549 		}
1550 	}
1551 }
1552 
1553 static void radeon_afmt_fini(struct radeon_device *rdev)
1554 {
1555 	int i;
1556 
1557 	for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) {
1558 		kfree(rdev->mode_info.afmt[i]);
1559 		rdev->mode_info.afmt[i] = NULL;
1560 	}
1561 }
1562 
1563 int radeon_modeset_init(struct radeon_device *rdev)
1564 {
1565 	int i;
1566 	int ret;
1567 
1568 	drm_mode_config_init(rdev->ddev);
1569 	rdev->mode_info.mode_config_initialized = true;
1570 
1571 	rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
1572 
1573 	if (ASIC_IS_DCE5(rdev)) {
1574 		rdev->ddev->mode_config.max_width = 16384;
1575 		rdev->ddev->mode_config.max_height = 16384;
1576 	} else if (ASIC_IS_AVIVO(rdev)) {
1577 		rdev->ddev->mode_config.max_width = 8192;
1578 		rdev->ddev->mode_config.max_height = 8192;
1579 	} else {
1580 		rdev->ddev->mode_config.max_width = 4096;
1581 		rdev->ddev->mode_config.max_height = 4096;
1582 	}
1583 
1584 	rdev->ddev->mode_config.preferred_depth = 24;
1585 	rdev->ddev->mode_config.prefer_shadow = 1;
1586 
1587 	rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
1588 
1589 	ret = radeon_modeset_create_props(rdev);
1590 	if (ret) {
1591 		return ret;
1592 	}
1593 
1594 	/* init i2c buses */
1595 	radeon_i2c_init(rdev);
1596 
1597 	/* check combios for a valid hardcoded EDID - Sun servers */
1598 	if (!rdev->is_atom_bios) {
1599 		/* check for hardcoded EDID in BIOS */
1600 		radeon_combios_check_hardcoded_edid(rdev);
1601 	}
1602 
1603 	/* allocate crtcs */
1604 	for (i = 0; i < rdev->num_crtc; i++) {
1605 		radeon_crtc_init(rdev->ddev, i);
1606 	}
1607 
1608 	/* okay we should have all the bios connectors */
1609 	ret = radeon_setup_enc_conn(rdev->ddev);
1610 	if (!ret) {
1611 		return ret;
1612 	}
1613 
1614 	/* init dig PHYs, disp eng pll */
1615 	if (rdev->is_atom_bios) {
1616 		radeon_atom_encoder_init(rdev);
1617 		radeon_atom_disp_eng_pll_init(rdev);
1618 	}
1619 
1620 	/* initialize hpd */
1621 	radeon_hpd_init(rdev);
1622 
1623 	/* setup afmt */
1624 	radeon_afmt_init(rdev);
1625 
1626 	radeon_fbdev_init(rdev);
1627 	drm_kms_helper_poll_init(rdev->ddev);
1628 
1629 	if (rdev->pm.dpm_enabled) {
1630 		/* do dpm late init */
1631 		ret = radeon_pm_late_init(rdev);
1632 		if (ret) {
1633 			rdev->pm.dpm_enabled = false;
1634 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1635 		}
1636 		/* set the dpm state for PX since there won't be
1637 		 * a modeset to call this.
1638 		 */
1639 		radeon_pm_compute_clocks(rdev);
1640 	}
1641 
1642 	return 0;
1643 }
1644 
1645 void radeon_modeset_fini(struct radeon_device *rdev)
1646 {
1647 	radeon_fbdev_fini(rdev);
1648 	kfree(rdev->mode_info.bios_hardcoded_edid);
1649 
1650 	if (rdev->mode_info.mode_config_initialized) {
1651 		radeon_afmt_fini(rdev);
1652 		drm_kms_helper_poll_fini(rdev->ddev);
1653 		radeon_hpd_fini(rdev);
1654 		drm_mode_config_cleanup(rdev->ddev);
1655 		rdev->mode_info.mode_config_initialized = false;
1656 	}
1657 	/* free i2c buses */
1658 	radeon_i2c_fini(rdev);
1659 }
1660 
1661 static bool is_hdtv_mode(const struct drm_display_mode *mode)
1662 {
1663 	/* try and guess if this is a tv or a monitor */
1664 	if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
1665 	    (mode->vdisplay == 576) || /* 576p */
1666 	    (mode->vdisplay == 720) || /* 720p */
1667 	    (mode->vdisplay == 1080)) /* 1080p */
1668 		return true;
1669 	else
1670 		return false;
1671 }
1672 
1673 bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1674 				const struct drm_display_mode *mode,
1675 				struct drm_display_mode *adjusted_mode)
1676 {
1677 	struct drm_device *dev = crtc->dev;
1678 	struct radeon_device *rdev = dev->dev_private;
1679 	struct drm_encoder *encoder;
1680 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1681 	struct radeon_encoder *radeon_encoder;
1682 	struct drm_connector *connector;
1683 	struct radeon_connector *radeon_connector;
1684 	bool first = true;
1685 	u32 src_v = 1, dst_v = 1;
1686 	u32 src_h = 1, dst_h = 1;
1687 
1688 	radeon_crtc->h_border = 0;
1689 	radeon_crtc->v_border = 0;
1690 
1691 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1692 		if (encoder->crtc != crtc)
1693 			continue;
1694 		radeon_encoder = to_radeon_encoder(encoder);
1695 		connector = radeon_get_connector_for_encoder(encoder);
1696 		radeon_connector = to_radeon_connector(connector);
1697 
1698 		if (first) {
1699 			/* set scaling */
1700 			if (radeon_encoder->rmx_type == RMX_OFF)
1701 				radeon_crtc->rmx_type = RMX_OFF;
1702 			else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
1703 				 mode->vdisplay < radeon_encoder->native_mode.vdisplay)
1704 				radeon_crtc->rmx_type = radeon_encoder->rmx_type;
1705 			else
1706 				radeon_crtc->rmx_type = RMX_OFF;
1707 			/* copy native mode */
1708 			memcpy(&radeon_crtc->native_mode,
1709 			       &radeon_encoder->native_mode,
1710 				sizeof(struct drm_display_mode));
1711 			src_v = crtc->mode.vdisplay;
1712 			dst_v = radeon_crtc->native_mode.vdisplay;
1713 			src_h = crtc->mode.hdisplay;
1714 			dst_h = radeon_crtc->native_mode.hdisplay;
1715 
1716 			/* fix up for overscan on hdmi */
1717 			if (ASIC_IS_AVIVO(rdev) &&
1718 			    (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1719 			    ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
1720 			     ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
1721 			      drm_detect_hdmi_monitor(radeon_connector->edid) &&
1722 			      is_hdtv_mode(mode)))) {
1723 				if (radeon_encoder->underscan_hborder != 0)
1724 					radeon_crtc->h_border = radeon_encoder->underscan_hborder;
1725 				else
1726 					radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
1727 				if (radeon_encoder->underscan_vborder != 0)
1728 					radeon_crtc->v_border = radeon_encoder->underscan_vborder;
1729 				else
1730 					radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
1731 				radeon_crtc->rmx_type = RMX_FULL;
1732 				src_v = crtc->mode.vdisplay;
1733 				dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2);
1734 				src_h = crtc->mode.hdisplay;
1735 				dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2);
1736 			}
1737 			first = false;
1738 		} else {
1739 			if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
1740 				/* WARNING: Right now this can't happen but
1741 				 * in the future we need to check that scaling
1742 				 * are consistent across different encoder
1743 				 * (ie all encoder can work with the same
1744 				 *  scaling).
1745 				 */
1746 				DRM_ERROR("Scaling not consistent across encoder.\n");
1747 				return false;
1748 			}
1749 		}
1750 	}
1751 	if (radeon_crtc->rmx_type != RMX_OFF) {
1752 		fixed20_12 a, b;
1753 		a.full = dfixed_const(src_v);
1754 		b.full = dfixed_const(dst_v);
1755 		radeon_crtc->vsc.full = dfixed_div(a, b);
1756 		a.full = dfixed_const(src_h);
1757 		b.full = dfixed_const(dst_h);
1758 		radeon_crtc->hsc.full = dfixed_div(a, b);
1759 	} else {
1760 		radeon_crtc->vsc.full = dfixed_const(1);
1761 		radeon_crtc->hsc.full = dfixed_const(1);
1762 	}
1763 	return true;
1764 }
1765 
1766 /*
1767  * Retrieve current video scanout position of crtc on a given gpu, and
1768  * an optional accurate timestamp of when query happened.
1769  *
1770  * \param dev Device to query.
1771  * \param crtc Crtc to query.
1772  * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
1773  * \param *vpos Location where vertical scanout position should be stored.
1774  * \param *hpos Location where horizontal scanout position should go.
1775  * \param *stime Target location for timestamp taken immediately before
1776  *               scanout position query. Can be NULL to skip timestamp.
1777  * \param *etime Target location for timestamp taken immediately after
1778  *               scanout position query. Can be NULL to skip timestamp.
1779  *
1780  * Returns vpos as a positive number while in active scanout area.
1781  * Returns vpos as a negative number inside vblank, counting the number
1782  * of scanlines to go until end of vblank, e.g., -1 means "one scanline
1783  * until start of active scanout / end of vblank."
1784  *
1785  * \return Flags, or'ed together as follows:
1786  *
1787  * DRM_SCANOUTPOS_VALID = Query successful.
1788  * DRM_SCANOUTPOS_INVBL = Inside vblank.
1789  * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
1790  * this flag means that returned position may be offset by a constant but
1791  * unknown small number of scanlines wrt. real scanout position.
1792  *
1793  */
1794 int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags,
1795 			       int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
1796 {
1797 	u32 stat_crtc = 0, vbl = 0, position = 0;
1798 	int vbl_start, vbl_end, vtotal, ret = 0;
1799 	bool in_vbl = true;
1800 
1801 	struct radeon_device *rdev = dev->dev_private;
1802 
1803 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1804 
1805 	/* Get optional system timestamp before query. */
1806 	if (stime)
1807 		*stime = ktime_get();
1808 
1809 	if (ASIC_IS_DCE4(rdev)) {
1810 		if (crtc == 0) {
1811 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1812 				     EVERGREEN_CRTC0_REGISTER_OFFSET);
1813 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1814 					  EVERGREEN_CRTC0_REGISTER_OFFSET);
1815 			ret |= DRM_SCANOUTPOS_VALID;
1816 		}
1817 		if (crtc == 1) {
1818 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1819 				     EVERGREEN_CRTC1_REGISTER_OFFSET);
1820 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1821 					  EVERGREEN_CRTC1_REGISTER_OFFSET);
1822 			ret |= DRM_SCANOUTPOS_VALID;
1823 		}
1824 		if (crtc == 2) {
1825 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1826 				     EVERGREEN_CRTC2_REGISTER_OFFSET);
1827 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1828 					  EVERGREEN_CRTC2_REGISTER_OFFSET);
1829 			ret |= DRM_SCANOUTPOS_VALID;
1830 		}
1831 		if (crtc == 3) {
1832 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1833 				     EVERGREEN_CRTC3_REGISTER_OFFSET);
1834 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1835 					  EVERGREEN_CRTC3_REGISTER_OFFSET);
1836 			ret |= DRM_SCANOUTPOS_VALID;
1837 		}
1838 		if (crtc == 4) {
1839 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1840 				     EVERGREEN_CRTC4_REGISTER_OFFSET);
1841 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1842 					  EVERGREEN_CRTC4_REGISTER_OFFSET);
1843 			ret |= DRM_SCANOUTPOS_VALID;
1844 		}
1845 		if (crtc == 5) {
1846 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1847 				     EVERGREEN_CRTC5_REGISTER_OFFSET);
1848 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1849 					  EVERGREEN_CRTC5_REGISTER_OFFSET);
1850 			ret |= DRM_SCANOUTPOS_VALID;
1851 		}
1852 	} else if (ASIC_IS_AVIVO(rdev)) {
1853 		if (crtc == 0) {
1854 			vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
1855 			position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
1856 			ret |= DRM_SCANOUTPOS_VALID;
1857 		}
1858 		if (crtc == 1) {
1859 			vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
1860 			position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
1861 			ret |= DRM_SCANOUTPOS_VALID;
1862 		}
1863 	} else {
1864 		/* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
1865 		if (crtc == 0) {
1866 			/* Assume vbl_end == 0, get vbl_start from
1867 			 * upper 16 bits.
1868 			 */
1869 			vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) &
1870 				RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
1871 			/* Only retrieve vpos from upper 16 bits, set hpos == 0. */
1872 			position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
1873 			stat_crtc = RREG32(RADEON_CRTC_STATUS);
1874 			if (!(stat_crtc & 1))
1875 				in_vbl = false;
1876 
1877 			ret |= DRM_SCANOUTPOS_VALID;
1878 		}
1879 		if (crtc == 1) {
1880 			vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
1881 				RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
1882 			position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
1883 			stat_crtc = RREG32(RADEON_CRTC2_STATUS);
1884 			if (!(stat_crtc & 1))
1885 				in_vbl = false;
1886 
1887 			ret |= DRM_SCANOUTPOS_VALID;
1888 		}
1889 	}
1890 
1891 	/* Get optional system timestamp after query. */
1892 	if (etime)
1893 		*etime = ktime_get();
1894 
1895 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1896 
1897 	/* Decode into vertical and horizontal scanout position. */
1898 	*vpos = position & 0x1fff;
1899 	*hpos = (position >> 16) & 0x1fff;
1900 
1901 	/* Valid vblank area boundaries from gpu retrieved? */
1902 	if (vbl > 0) {
1903 		/* Yes: Decode. */
1904 		ret |= DRM_SCANOUTPOS_ACCURATE;
1905 		vbl_start = vbl & 0x1fff;
1906 		vbl_end = (vbl >> 16) & 0x1fff;
1907 	}
1908 	else {
1909 		/* No: Fake something reasonable which gives at least ok results. */
1910 		vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
1911 		vbl_end = 0;
1912 	}
1913 
1914 	/* Test scanout position against vblank region. */
1915 	if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1916 		in_vbl = false;
1917 
1918 	/* Check if inside vblank area and apply corrective offsets:
1919 	 * vpos will then be >=0 in video scanout area, but negative
1920 	 * within vblank area, counting down the number of lines until
1921 	 * start of scanout.
1922 	 */
1923 
1924 	/* Inside "upper part" of vblank area? Apply corrective offset if so: */
1925 	if (in_vbl && (*vpos >= vbl_start)) {
1926 		vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
1927 		*vpos = *vpos - vtotal;
1928 	}
1929 
1930 	/* Correct for shifted end of vbl at vbl_end. */
1931 	*vpos = *vpos - vbl_end;
1932 
1933 	/* In vblank? */
1934 	if (in_vbl)
1935 		ret |= DRM_SCANOUTPOS_INVBL;
1936 
1937 	/* Is vpos outside nominal vblank area, but less than
1938 	 * 1/100 of a frame height away from start of vblank?
1939 	 * If so, assume this isn't a massively delayed vblank
1940 	 * interrupt, but a vblank interrupt that fired a few
1941 	 * microseconds before true start of vblank. Compensate
1942 	 * by adding a full frame duration to the final timestamp.
1943 	 * Happens, e.g., on ATI R500, R600.
1944 	 *
1945 	 * We only do this if DRM_CALLED_FROM_VBLIRQ.
1946 	 */
1947 	if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
1948 		vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
1949 		vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
1950 
1951 		if (vbl_start - *vpos < vtotal / 100) {
1952 			*vpos -= vtotal;
1953 
1954 			/* Signal this correction as "applied". */
1955 			ret |= 0x8;
1956 		}
1957 	}
1958 
1959 	return ret;
1960 }
1961