1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "i915_drv.h"
25 #include "intel_drv.h"
26 
27 #define FORCEWAKE_ACK_TIMEOUT_MS 2
28 
29 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
30 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
31 
32 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
33 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
34 
35 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
36 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
37 
38 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
39 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
40 
41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
42 
43 
44 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
45 {
46 	u32 gt_thread_status_mask;
47 
48 	if (IS_HASWELL(dev_priv->dev))
49 		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
50 	else
51 		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
52 
53 	/* w/a for a sporadic read returning 0 by waiting for the GT
54 	 * thread to wake up.
55 	 */
56 	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
57 		DRM_ERROR("GT thread status wait timed out\n");
58 }
59 
60 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
61 {
62 	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
63 	/* something from same cacheline, but !FORCEWAKE */
64 	__raw_posting_read(dev_priv, ECOBUS);
65 }
66 
67 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
68 {
69 	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
70 			    FORCEWAKE_ACK_TIMEOUT_MS))
71 		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
72 
73 	__raw_i915_write32(dev_priv, FORCEWAKE, 1);
74 	/* something from same cacheline, but !FORCEWAKE */
75 	__raw_posting_read(dev_priv, ECOBUS);
76 
77 	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
78 			    FORCEWAKE_ACK_TIMEOUT_MS))
79 		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
80 
81 	/* WaRsForcewakeWaitTC0:snb */
82 	__gen6_gt_wait_for_thread_c0(dev_priv);
83 }
84 
85 static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
86 {
87 	__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
88 	/* something from same cacheline, but !FORCEWAKE_MT */
89 	__raw_posting_read(dev_priv, ECOBUS);
90 }
91 
92 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
93 {
94 	u32 forcewake_ack;
95 
96 	if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
97 		forcewake_ack = FORCEWAKE_ACK_HSW;
98 	else
99 		forcewake_ack = FORCEWAKE_MT_ACK;
100 
101 	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
102 			    FORCEWAKE_ACK_TIMEOUT_MS))
103 		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
104 
105 	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
106 			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
107 	/* something from same cacheline, but !FORCEWAKE_MT */
108 	__raw_posting_read(dev_priv, ECOBUS);
109 
110 	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
111 			    FORCEWAKE_ACK_TIMEOUT_MS))
112 		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
113 
114 	/* WaRsForcewakeWaitTC0:ivb,hsw */
115 	if (INTEL_INFO(dev_priv->dev)->gen < 8)
116 		__gen6_gt_wait_for_thread_c0(dev_priv);
117 }
118 
119 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
120 {
121 	u32 gtfifodbg;
122 
123 	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
124 	if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
125 	     "MMIO read or write has been dropped %x\n", gtfifodbg))
126 		__raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
127 }
128 
129 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
130 {
131 	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
132 	/* something from same cacheline, but !FORCEWAKE */
133 	__raw_posting_read(dev_priv, ECOBUS);
134 	gen6_gt_check_fifodbg(dev_priv);
135 }
136 
137 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
138 {
139 	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
140 			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
141 	/* something from same cacheline, but !FORCEWAKE_MT */
142 	__raw_posting_read(dev_priv, ECOBUS);
143 	gen6_gt_check_fifodbg(dev_priv);
144 }
145 
146 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
147 {
148 	int ret = 0;
149 
150 	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
151 		int loop = 500;
152 		u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
153 		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
154 			udelay(10);
155 			fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
156 		}
157 		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
158 			++ret;
159 		dev_priv->uncore.fifo_count = fifo;
160 	}
161 	dev_priv->uncore.fifo_count--;
162 
163 	return ret;
164 }
165 
166 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
167 {
168 	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
169 			   _MASKED_BIT_DISABLE(0xffff));
170 	/* something from same cacheline, but !FORCEWAKE_VLV */
171 	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
172 }
173 
174 static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
175 {
176 	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
177 			    FORCEWAKE_ACK_TIMEOUT_MS))
178 		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
179 
180 	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
181 			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
182 	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
183 			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
184 
185 	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
186 			    FORCEWAKE_ACK_TIMEOUT_MS))
187 		DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
188 
189 	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
190 			     FORCEWAKE_KERNEL),
191 			    FORCEWAKE_ACK_TIMEOUT_MS))
192 		DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
193 
194 	/* WaRsForcewakeWaitTC0:vlv */
195 	__gen6_gt_wait_for_thread_c0(dev_priv);
196 }
197 
198 static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
199 {
200 	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
201 			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
202 	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
203 			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
204 	/* The below doubles as a POSTING_READ */
205 	gen6_gt_check_fifodbg(dev_priv);
206 }
207 
208 static void gen6_force_wake_work(struct work_struct *work)
209 {
210 	struct drm_i915_private *dev_priv =
211 		container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
212 	unsigned long irqflags;
213 
214 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
215 	if (--dev_priv->uncore.forcewake_count == 0)
216 		dev_priv->uncore.funcs.force_wake_put(dev_priv);
217 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
218 }
219 
220 static void intel_uncore_forcewake_reset(struct drm_device *dev)
221 {
222 	struct drm_i915_private *dev_priv = dev->dev_private;
223 
224 	if (IS_VALLEYVIEW(dev)) {
225 		vlv_force_wake_reset(dev_priv);
226 	} else if (INTEL_INFO(dev)->gen >= 6) {
227 		__gen6_gt_force_wake_reset(dev_priv);
228 		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
229 			__gen6_gt_force_wake_mt_reset(dev_priv);
230 	}
231 }
232 
233 void intel_uncore_early_sanitize(struct drm_device *dev)
234 {
235 	struct drm_i915_private *dev_priv = dev->dev_private;
236 
237 	if (HAS_FPGA_DBG_UNCLAIMED(dev))
238 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
239 
240 	if (IS_HASWELL(dev) &&
241 	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
242 		/* The docs do not explain exactly how the calculation can be
243 		 * made. It is somewhat guessable, but for now, it's always
244 		 * 128MB.
245 		 * NB: We can't write IDICR yet because we do not have gt funcs
246 		 * set up */
247 		dev_priv->ellc_size = 128;
248 		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
249 	}
250 
251 	intel_uncore_forcewake_reset(dev);
252 }
253 
254 void intel_uncore_sanitize(struct drm_device *dev)
255 {
256 	struct drm_i915_private *dev_priv = dev->dev_private;
257 	u32 reg_val;
258 
259 	intel_uncore_forcewake_reset(dev);
260 
261 	/* BIOS often leaves RC6 enabled, but disable it for hw init */
262 	intel_disable_gt_powersave(dev);
263 
264 	/* Turn off power gate, require especially for the BIOS less system */
265 	if (IS_VALLEYVIEW(dev)) {
266 
267 		mutex_lock(&dev_priv->rps.hw_lock);
268 		reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
269 
270 		if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
271 			vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
272 
273 		mutex_unlock(&dev_priv->rps.hw_lock);
274 
275 	}
276 }
277 
278 /*
279  * Generally this is called implicitly by the register read function. However,
280  * if some sequence requires the GT to not power down then this function should
281  * be called at the beginning of the sequence followed by a call to
282  * gen6_gt_force_wake_put() at the end of the sequence.
283  */
284 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
285 {
286 	unsigned long irqflags;
287 
288 	if (!dev_priv->uncore.funcs.force_wake_get)
289 		return;
290 
291 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
292 	if (dev_priv->uncore.forcewake_count++ == 0)
293 		dev_priv->uncore.funcs.force_wake_get(dev_priv);
294 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
295 }
296 
297 /*
298  * see gen6_gt_force_wake_get()
299  */
300 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
301 {
302 	unsigned long irqflags;
303 
304 	if (!dev_priv->uncore.funcs.force_wake_put)
305 		return;
306 
307 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
308 	if (--dev_priv->uncore.forcewake_count == 0) {
309 		dev_priv->uncore.forcewake_count++;
310 		mod_delayed_work(dev_priv->wq,
311 				 &dev_priv->uncore.force_wake_work,
312 				 1);
313 	}
314 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
315 }
316 
317 /* We give fast paths for the really cool registers */
318 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
319 	 ((reg) < 0x40000 && (reg) != FORCEWAKE)
320 
321 static void
322 ilk_dummy_write(struct drm_i915_private *dev_priv)
323 {
324 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
325 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
326 	 * hence harmless to write 0 into. */
327 	__raw_i915_write32(dev_priv, MI_MODE, 0);
328 }
329 
330 static void
331 hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
332 {
333 	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
334 		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
335 			  reg);
336 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
337 	}
338 }
339 
340 static void
341 hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
342 {
343 	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
344 		DRM_ERROR("Unclaimed write to %x\n", reg);
345 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
346 	}
347 }
348 
349 #define REG_READ_HEADER(x) \
350 	unsigned long irqflags; \
351 	u##x val = 0; \
352 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
353 
354 #define REG_READ_FOOTER \
355 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
356 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
357 	return val
358 
359 #define __gen4_read(x) \
360 static u##x \
361 gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
362 	REG_READ_HEADER(x); \
363 	val = __raw_i915_read##x(dev_priv, reg); \
364 	REG_READ_FOOTER; \
365 }
366 
367 #define __gen5_read(x) \
368 static u##x \
369 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
370 	REG_READ_HEADER(x); \
371 	ilk_dummy_write(dev_priv); \
372 	val = __raw_i915_read##x(dev_priv, reg); \
373 	REG_READ_FOOTER; \
374 }
375 
376 #define __gen6_read(x) \
377 static u##x \
378 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
379 	REG_READ_HEADER(x); \
380 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
381 		if (dev_priv->uncore.forcewake_count == 0) \
382 			dev_priv->uncore.funcs.force_wake_get(dev_priv); \
383 		val = __raw_i915_read##x(dev_priv, reg); \
384 		if (dev_priv->uncore.forcewake_count == 0) \
385 			dev_priv->uncore.funcs.force_wake_put(dev_priv); \
386 	} else { \
387 		val = __raw_i915_read##x(dev_priv, reg); \
388 	} \
389 	REG_READ_FOOTER; \
390 }
391 
392 __gen6_read(8)
393 __gen6_read(16)
394 __gen6_read(32)
395 __gen6_read(64)
396 __gen5_read(8)
397 __gen5_read(16)
398 __gen5_read(32)
399 __gen5_read(64)
400 __gen4_read(8)
401 __gen4_read(16)
402 __gen4_read(32)
403 __gen4_read(64)
404 
405 #undef __gen6_read
406 #undef __gen5_read
407 #undef __gen4_read
408 #undef REG_READ_FOOTER
409 #undef REG_READ_HEADER
410 
411 #define REG_WRITE_HEADER \
412 	unsigned long irqflags; \
413 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
414 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
415 
416 #define __gen4_write(x) \
417 static void \
418 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
419 	REG_WRITE_HEADER; \
420 	__raw_i915_write##x(dev_priv, reg, val); \
421 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
422 }
423 
424 #define __gen5_write(x) \
425 static void \
426 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
427 	REG_WRITE_HEADER; \
428 	ilk_dummy_write(dev_priv); \
429 	__raw_i915_write##x(dev_priv, reg, val); \
430 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
431 }
432 
433 #define __gen6_write(x) \
434 static void \
435 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
436 	u32 __fifo_ret = 0; \
437 	REG_WRITE_HEADER; \
438 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
439 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
440 	} \
441 	__raw_i915_write##x(dev_priv, reg, val); \
442 	if (unlikely(__fifo_ret)) { \
443 		gen6_gt_check_fifodbg(dev_priv); \
444 	} \
445 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
446 }
447 
448 #define __hsw_write(x) \
449 static void \
450 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
451 	u32 __fifo_ret = 0; \
452 	REG_WRITE_HEADER; \
453 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
454 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
455 	} \
456 	hsw_unclaimed_reg_clear(dev_priv, reg); \
457 	__raw_i915_write##x(dev_priv, reg, val); \
458 	if (unlikely(__fifo_ret)) { \
459 		gen6_gt_check_fifodbg(dev_priv); \
460 	} \
461 	hsw_unclaimed_reg_check(dev_priv, reg); \
462 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
463 }
464 
465 static const u32 gen8_shadowed_regs[] = {
466 	FORCEWAKE_MT,
467 	GEN6_RPNSWREQ,
468 	GEN6_RC_VIDEO_FREQ,
469 	RING_TAIL(RENDER_RING_BASE),
470 	RING_TAIL(GEN6_BSD_RING_BASE),
471 	RING_TAIL(VEBOX_RING_BASE),
472 	RING_TAIL(BLT_RING_BASE),
473 	/* TODO: Other registers are not yet used */
474 };
475 
476 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
477 {
478 	int i;
479 	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
480 		if (reg == gen8_shadowed_regs[i])
481 			return true;
482 
483 	return false;
484 }
485 
486 #define __gen8_write(x) \
487 static void \
488 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
489 	bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
490 	REG_WRITE_HEADER; \
491 	if (__needs_put) { \
492 		dev_priv->uncore.funcs.force_wake_get(dev_priv); \
493 	} \
494 	__raw_i915_write##x(dev_priv, reg, val); \
495 	if (__needs_put) { \
496 		dev_priv->uncore.funcs.force_wake_put(dev_priv); \
497 	} \
498 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
499 }
500 
501 __gen8_write(8)
502 __gen8_write(16)
503 __gen8_write(32)
504 __gen8_write(64)
505 __hsw_write(8)
506 __hsw_write(16)
507 __hsw_write(32)
508 __hsw_write(64)
509 __gen6_write(8)
510 __gen6_write(16)
511 __gen6_write(32)
512 __gen6_write(64)
513 __gen5_write(8)
514 __gen5_write(16)
515 __gen5_write(32)
516 __gen5_write(64)
517 __gen4_write(8)
518 __gen4_write(16)
519 __gen4_write(32)
520 __gen4_write(64)
521 
522 #undef __gen8_write
523 #undef __hsw_write
524 #undef __gen6_write
525 #undef __gen5_write
526 #undef __gen4_write
527 #undef REG_WRITE_HEADER
528 
529 void intel_uncore_init(struct drm_device *dev)
530 {
531 	struct drm_i915_private *dev_priv = dev->dev_private;
532 
533 	INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
534 			  gen6_force_wake_work);
535 
536 	if (IS_VALLEYVIEW(dev)) {
537 		dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
538 		dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
539 	} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
540 		dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
541 		dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
542 	} else if (IS_IVYBRIDGE(dev)) {
543 		u32 ecobus;
544 
545 		/* IVB configs may use multi-threaded forcewake */
546 
547 		/* A small trick here - if the bios hasn't configured
548 		 * MT forcewake, and if the device is in RC6, then
549 		 * force_wake_mt_get will not wake the device and the
550 		 * ECOBUS read will return zero. Which will be
551 		 * (correctly) interpreted by the test below as MT
552 		 * forcewake being disabled.
553 		 */
554 		mutex_lock(&dev->struct_mutex);
555 		__gen6_gt_force_wake_mt_get(dev_priv);
556 		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
557 		__gen6_gt_force_wake_mt_put(dev_priv);
558 		mutex_unlock(&dev->struct_mutex);
559 
560 		if (ecobus & FORCEWAKE_MT_ENABLE) {
561 			dev_priv->uncore.funcs.force_wake_get =
562 				__gen6_gt_force_wake_mt_get;
563 			dev_priv->uncore.funcs.force_wake_put =
564 				__gen6_gt_force_wake_mt_put;
565 		} else {
566 			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
567 			DRM_INFO("when using vblank-synced partial screen updates.\n");
568 			dev_priv->uncore.funcs.force_wake_get =
569 				__gen6_gt_force_wake_get;
570 			dev_priv->uncore.funcs.force_wake_put =
571 				__gen6_gt_force_wake_put;
572 		}
573 	} else if (IS_GEN6(dev)) {
574 		dev_priv->uncore.funcs.force_wake_get =
575 			__gen6_gt_force_wake_get;
576 		dev_priv->uncore.funcs.force_wake_put =
577 			__gen6_gt_force_wake_put;
578 	}
579 
580 	switch (INTEL_INFO(dev)->gen) {
581 	default:
582 		dev_priv->uncore.funcs.mmio_writeb  = gen8_write8;
583 		dev_priv->uncore.funcs.mmio_writew  = gen8_write16;
584 		dev_priv->uncore.funcs.mmio_writel  = gen8_write32;
585 		dev_priv->uncore.funcs.mmio_writeq  = gen8_write64;
586 		dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
587 		dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
588 		dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
589 		dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
590 		break;
591 	case 7:
592 	case 6:
593 		if (IS_HASWELL(dev)) {
594 			dev_priv->uncore.funcs.mmio_writeb  = hsw_write8;
595 			dev_priv->uncore.funcs.mmio_writew  = hsw_write16;
596 			dev_priv->uncore.funcs.mmio_writel  = hsw_write32;
597 			dev_priv->uncore.funcs.mmio_writeq  = hsw_write64;
598 		} else {
599 			dev_priv->uncore.funcs.mmio_writeb  = gen6_write8;
600 			dev_priv->uncore.funcs.mmio_writew  = gen6_write16;
601 			dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
602 			dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
603 		}
604 		dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
605 		dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
606 		dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
607 		dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
608 		break;
609 	case 5:
610 		dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
611 		dev_priv->uncore.funcs.mmio_writew  = gen5_write16;
612 		dev_priv->uncore.funcs.mmio_writel  = gen5_write32;
613 		dev_priv->uncore.funcs.mmio_writeq  = gen5_write64;
614 		dev_priv->uncore.funcs.mmio_readb  = gen5_read8;
615 		dev_priv->uncore.funcs.mmio_readw  = gen5_read16;
616 		dev_priv->uncore.funcs.mmio_readl  = gen5_read32;
617 		dev_priv->uncore.funcs.mmio_readq  = gen5_read64;
618 		break;
619 	case 4:
620 	case 3:
621 	case 2:
622 		dev_priv->uncore.funcs.mmio_writeb  = gen4_write8;
623 		dev_priv->uncore.funcs.mmio_writew  = gen4_write16;
624 		dev_priv->uncore.funcs.mmio_writel  = gen4_write32;
625 		dev_priv->uncore.funcs.mmio_writeq  = gen4_write64;
626 		dev_priv->uncore.funcs.mmio_readb  = gen4_read8;
627 		dev_priv->uncore.funcs.mmio_readw  = gen4_read16;
628 		dev_priv->uncore.funcs.mmio_readl  = gen4_read32;
629 		dev_priv->uncore.funcs.mmio_readq  = gen4_read64;
630 		break;
631 	}
632 }
633 
634 void intel_uncore_fini(struct drm_device *dev)
635 {
636 	struct drm_i915_private *dev_priv = dev->dev_private;
637 
638 	flush_delayed_work(&dev_priv->uncore.force_wake_work);
639 
640 	/* Paranoia: make sure we have disabled everything before we exit. */
641 	intel_uncore_sanitize(dev);
642 }
643 
644 static const struct register_whitelist {
645 	uint64_t offset;
646 	uint32_t size;
647 	uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
648 } whitelist[] = {
649 	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
650 };
651 
652 int i915_reg_read_ioctl(struct drm_device *dev,
653 			void *data, struct drm_file *file)
654 {
655 	struct drm_i915_private *dev_priv = dev->dev_private;
656 	struct drm_i915_reg_read *reg = data;
657 	struct register_whitelist const *entry = whitelist;
658 	int i;
659 
660 	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
661 		if (entry->offset == reg->offset &&
662 		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
663 			break;
664 	}
665 
666 	if (i == ARRAY_SIZE(whitelist))
667 		return -EINVAL;
668 
669 	switch (entry->size) {
670 	case 8:
671 		reg->val = I915_READ64(reg->offset);
672 		break;
673 	case 4:
674 		reg->val = I915_READ(reg->offset);
675 		break;
676 	case 2:
677 		reg->val = I915_READ16(reg->offset);
678 		break;
679 	case 1:
680 		reg->val = I915_READ8(reg->offset);
681 		break;
682 	default:
683 		WARN_ON(1);
684 		return -EINVAL;
685 	}
686 
687 	return 0;
688 }
689 
690 static int i965_reset_complete(struct drm_device *dev)
691 {
692 	u8 gdrst;
693 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
694 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
695 }
696 
697 static int i965_do_reset(struct drm_device *dev)
698 {
699 	int ret;
700 
701 	/*
702 	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
703 	 * well as the reset bit (GR/bit 0).  Setting the GR bit
704 	 * triggers the reset; when done, the hardware will clear it.
705 	 */
706 	pci_write_config_byte(dev->pdev, I965_GDRST,
707 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
708 	ret =  wait_for(i965_reset_complete(dev), 500);
709 	if (ret)
710 		return ret;
711 
712 	/* We can't reset render&media without also resetting display ... */
713 	pci_write_config_byte(dev->pdev, I965_GDRST,
714 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
715 
716 	ret =  wait_for(i965_reset_complete(dev), 500);
717 	if (ret)
718 		return ret;
719 
720 	pci_write_config_byte(dev->pdev, I965_GDRST, 0);
721 
722 	return 0;
723 }
724 
725 static int ironlake_do_reset(struct drm_device *dev)
726 {
727 	struct drm_i915_private *dev_priv = dev->dev_private;
728 	u32 gdrst;
729 	int ret;
730 
731 	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
732 	gdrst &= ~GRDOM_MASK;
733 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
734 		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
735 	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
736 	if (ret)
737 		return ret;
738 
739 	/* We can't reset render&media without also resetting display ... */
740 	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
741 	gdrst &= ~GRDOM_MASK;
742 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
743 		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
744 	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
745 }
746 
747 static int gen6_do_reset(struct drm_device *dev)
748 {
749 	struct drm_i915_private *dev_priv = dev->dev_private;
750 	int	ret;
751 	unsigned long irqflags;
752 
753 	/* Hold uncore.lock across reset to prevent any register access
754 	 * with forcewake not set correctly
755 	 */
756 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
757 
758 	/* Reset the chip */
759 
760 	/* GEN6_GDRST is not in the gt power well, no need to check
761 	 * for fifo space for the write or forcewake the chip for
762 	 * the read
763 	 */
764 	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
765 
766 	/* Spin waiting for the device to ack the reset request */
767 	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
768 
769 	intel_uncore_forcewake_reset(dev);
770 
771 	/* If reset with a user forcewake, try to restore, otherwise turn it off */
772 	if (dev_priv->uncore.forcewake_count)
773 		dev_priv->uncore.funcs.force_wake_get(dev_priv);
774 	else
775 		dev_priv->uncore.funcs.force_wake_put(dev_priv);
776 
777 	/* Restore fifo count */
778 	dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
779 
780 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
781 	return ret;
782 }
783 
784 int intel_gpu_reset(struct drm_device *dev)
785 {
786 	switch (INTEL_INFO(dev)->gen) {
787 	case 7:
788 	case 6: return gen6_do_reset(dev);
789 	case 5: return ironlake_do_reset(dev);
790 	case 4: return i965_do_reset(dev);
791 	default: return -ENODEV;
792 	}
793 }
794 
795 void intel_uncore_clear_errors(struct drm_device *dev)
796 {
797 	struct drm_i915_private *dev_priv = dev->dev_private;
798 
799 	/* XXX needs spinlock around caller's grouping */
800 	if (HAS_FPGA_DBG_UNCLAIMED(dev))
801 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
802 }
803 
804 void intel_uncore_check_errors(struct drm_device *dev)
805 {
806 	struct drm_i915_private *dev_priv = dev->dev_private;
807 
808 	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
809 	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
810 		DRM_ERROR("Unclaimed register before interrupt\n");
811 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
812 	}
813 }
814