1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "i915_drv.h"
25 #include "intel_drv.h"
26 #include "i915_vgpu.h"
27 
28 #include <asm/iosf_mbi.h>
29 #include <linux/pm_runtime.h>
30 
31 #define FORCEWAKE_ACK_TIMEOUT_MS 50
32 
33 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
34 
35 static const char * const forcewake_domain_names[] = {
36 	"render",
37 	"blitter",
38 	"media",
39 };
40 
41 const char *
42 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
43 {
44 	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
45 
46 	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
47 		return forcewake_domain_names[id];
48 
49 	WARN_ON(id);
50 
51 	return "unknown";
52 }
53 
54 static inline void
55 fw_domain_reset(struct drm_i915_private *i915,
56 		const struct intel_uncore_forcewake_domain *d)
57 {
58 	__raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
59 }
60 
61 static inline void
62 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
63 {
64 	d->wake_count++;
65 	hrtimer_start_range_ns(&d->timer,
66 			       NSEC_PER_MSEC,
67 			       NSEC_PER_MSEC,
68 			       HRTIMER_MODE_REL);
69 }
70 
71 static inline void
72 fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
73 			 const struct intel_uncore_forcewake_domain *d)
74 {
75 	if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
76 			     FORCEWAKE_KERNEL) == 0,
77 			    FORCEWAKE_ACK_TIMEOUT_MS))
78 		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
79 			  intel_uncore_forcewake_domain_to_str(d->id));
80 }
81 
82 static inline void
83 fw_domain_get(struct drm_i915_private *i915,
84 	      const struct intel_uncore_forcewake_domain *d)
85 {
86 	__raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
87 }
88 
89 static inline void
90 fw_domain_wait_ack(const struct drm_i915_private *i915,
91 		   const struct intel_uncore_forcewake_domain *d)
92 {
93 	if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
94 			     FORCEWAKE_KERNEL),
95 			    FORCEWAKE_ACK_TIMEOUT_MS))
96 		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
97 			  intel_uncore_forcewake_domain_to_str(d->id));
98 }
99 
100 static inline void
101 fw_domain_put(const struct drm_i915_private *i915,
102 	      const struct intel_uncore_forcewake_domain *d)
103 {
104 	__raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
105 }
106 
107 static void
108 fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
109 {
110 	struct intel_uncore_forcewake_domain *d;
111 	unsigned int tmp;
112 
113 	GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
114 
115 	for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
116 		fw_domain_wait_ack_clear(i915, d);
117 		fw_domain_get(i915, d);
118 	}
119 
120 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
121 		fw_domain_wait_ack(i915, d);
122 
123 	i915->uncore.fw_domains_active |= fw_domains;
124 }
125 
126 static void
127 fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
128 {
129 	struct intel_uncore_forcewake_domain *d;
130 	unsigned int tmp;
131 
132 	GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
133 
134 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
135 		fw_domain_put(i915, d);
136 
137 	i915->uncore.fw_domains_active &= ~fw_domains;
138 }
139 
140 static void
141 fw_domains_reset(struct drm_i915_private *i915,
142 		 enum forcewake_domains fw_domains)
143 {
144 	struct intel_uncore_forcewake_domain *d;
145 	unsigned int tmp;
146 
147 	if (!fw_domains)
148 		return;
149 
150 	GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
151 
152 	for_each_fw_domain_masked(d, fw_domains, i915, tmp)
153 		fw_domain_reset(i915, d);
154 }
155 
156 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
157 {
158 	/* w/a for a sporadic read returning 0 by waiting for the GT
159 	 * thread to wake up.
160 	 */
161 	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
162 				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
163 		DRM_ERROR("GT thread status wait timed out\n");
164 }
165 
166 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
167 					      enum forcewake_domains fw_domains)
168 {
169 	fw_domains_get(dev_priv, fw_domains);
170 
171 	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
172 	__gen6_gt_wait_for_thread_c0(dev_priv);
173 }
174 
175 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
176 {
177 	u32 gtfifodbg;
178 
179 	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
180 	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
181 		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
182 }
183 
184 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
185 				     enum forcewake_domains fw_domains)
186 {
187 	fw_domains_put(dev_priv, fw_domains);
188 	gen6_gt_check_fifodbg(dev_priv);
189 }
190 
191 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
192 {
193 	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
194 
195 	return count & GT_FIFO_FREE_ENTRIES_MASK;
196 }
197 
198 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
199 {
200 	int ret = 0;
201 
202 	/* On VLV, FIFO will be shared by both SW and HW.
203 	 * So, we need to read the FREE_ENTRIES everytime */
204 	if (IS_VALLEYVIEW(dev_priv))
205 		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
206 
207 	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
208 		int loop = 500;
209 		u32 fifo = fifo_free_entries(dev_priv);
210 
211 		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
212 			udelay(10);
213 			fifo = fifo_free_entries(dev_priv);
214 		}
215 		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
216 			++ret;
217 		dev_priv->uncore.fifo_count = fifo;
218 	}
219 	dev_priv->uncore.fifo_count--;
220 
221 	return ret;
222 }
223 
224 static enum hrtimer_restart
225 intel_uncore_fw_release_timer(struct hrtimer *timer)
226 {
227 	struct intel_uncore_forcewake_domain *domain =
228 	       container_of(timer, struct intel_uncore_forcewake_domain, timer);
229 	struct drm_i915_private *dev_priv =
230 		container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
231 	unsigned long irqflags;
232 
233 	assert_rpm_device_not_suspended(dev_priv);
234 
235 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
236 	if (WARN_ON(domain->wake_count == 0))
237 		domain->wake_count++;
238 
239 	if (--domain->wake_count == 0)
240 		dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
241 
242 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
243 
244 	return HRTIMER_NORESTART;
245 }
246 
247 static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
248 					 bool restore)
249 {
250 	unsigned long irqflags;
251 	struct intel_uncore_forcewake_domain *domain;
252 	int retry_count = 100;
253 	enum forcewake_domains fw, active_domains;
254 
255 	/* Hold uncore.lock across reset to prevent any register access
256 	 * with forcewake not set correctly. Wait until all pending
257 	 * timers are run before holding.
258 	 */
259 	while (1) {
260 		unsigned int tmp;
261 
262 		active_domains = 0;
263 
264 		for_each_fw_domain(domain, dev_priv, tmp) {
265 			if (hrtimer_cancel(&domain->timer) == 0)
266 				continue;
267 
268 			intel_uncore_fw_release_timer(&domain->timer);
269 		}
270 
271 		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
272 
273 		for_each_fw_domain(domain, dev_priv, tmp) {
274 			if (hrtimer_active(&domain->timer))
275 				active_domains |= domain->mask;
276 		}
277 
278 		if (active_domains == 0)
279 			break;
280 
281 		if (--retry_count == 0) {
282 			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
283 			break;
284 		}
285 
286 		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
287 		cond_resched();
288 	}
289 
290 	WARN_ON(active_domains);
291 
292 	fw = dev_priv->uncore.fw_domains_active;
293 	if (fw)
294 		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
295 
296 	fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
297 
298 	if (restore) { /* If reset with a user forcewake, try to restore */
299 		if (fw)
300 			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
301 
302 		if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
303 			dev_priv->uncore.fifo_count =
304 				fifo_free_entries(dev_priv);
305 	}
306 
307 	if (!restore)
308 		assert_forcewakes_inactive(dev_priv);
309 
310 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
311 }
312 
313 static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
314 {
315 	const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
316 	const unsigned int sets[4] = { 1, 1, 2, 2 };
317 	const u32 cap = dev_priv->edram_cap;
318 
319 	return EDRAM_NUM_BANKS(cap) *
320 		ways[EDRAM_WAYS_IDX(cap)] *
321 		sets[EDRAM_SETS_IDX(cap)] *
322 		1024 * 1024;
323 }
324 
325 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
326 {
327 	if (!HAS_EDRAM(dev_priv))
328 		return 0;
329 
330 	/* The needed capability bits for size calculation
331 	 * are not there with pre gen9 so return 128MB always.
332 	 */
333 	if (INTEL_GEN(dev_priv) < 9)
334 		return 128 * 1024 * 1024;
335 
336 	return gen9_edram_size(dev_priv);
337 }
338 
339 static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
340 {
341 	if (IS_HASWELL(dev_priv) ||
342 	    IS_BROADWELL(dev_priv) ||
343 	    INTEL_GEN(dev_priv) >= 9) {
344 		dev_priv->edram_cap = __raw_i915_read32(dev_priv,
345 							HSW_EDRAM_CAP);
346 
347 		/* NB: We can't write IDICR yet because we do not have gt funcs
348 		 * set up */
349 	} else {
350 		dev_priv->edram_cap = 0;
351 	}
352 
353 	if (HAS_EDRAM(dev_priv))
354 		DRM_INFO("Found %lluMB of eDRAM\n",
355 			 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
356 }
357 
358 static bool
359 fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
360 {
361 	u32 dbg;
362 
363 	dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
364 	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
365 		return false;
366 
367 	__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
368 
369 	return true;
370 }
371 
372 static bool
373 vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
374 {
375 	u32 cer;
376 
377 	cer = __raw_i915_read32(dev_priv, CLAIM_ER);
378 	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
379 		return false;
380 
381 	__raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
382 
383 	return true;
384 }
385 
386 static bool
387 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
388 {
389 	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
390 		return fpga_check_for_unclaimed_mmio(dev_priv);
391 
392 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
393 		return vlv_check_for_unclaimed_mmio(dev_priv);
394 
395 	return false;
396 }
397 
398 static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
399 					  bool restore_forcewake)
400 {
401 	struct intel_device_info *info = mkwrite_device_info(dev_priv);
402 
403 	/* clear out unclaimed reg detection bit */
404 	if (check_for_unclaimed_mmio(dev_priv))
405 		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
406 
407 	/* clear out old GT FIFO errors */
408 	if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
409 		__raw_i915_write32(dev_priv, GTFIFODBG,
410 				   __raw_i915_read32(dev_priv, GTFIFODBG));
411 
412 	/* WaDisableShadowRegForCpd:chv */
413 	if (IS_CHERRYVIEW(dev_priv)) {
414 		__raw_i915_write32(dev_priv, GTFIFOCTL,
415 				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
416 				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
417 				   GT_FIFO_CTL_RC6_POLICY_STALL);
418 	}
419 
420 	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST))
421 		info->has_decoupled_mmio = false;
422 
423 	intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
424 }
425 
426 void intel_uncore_suspend(struct drm_i915_private *dev_priv)
427 {
428 	iosf_mbi_unregister_pmic_bus_access_notifier(
429 		&dev_priv->uncore.pmic_bus_access_nb);
430 	intel_uncore_forcewake_reset(dev_priv, false);
431 }
432 
433 void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
434 {
435 	__intel_uncore_early_sanitize(dev_priv, true);
436 	iosf_mbi_register_pmic_bus_access_notifier(
437 		&dev_priv->uncore.pmic_bus_access_nb);
438 	i915_check_and_clear_faults(dev_priv);
439 }
440 
441 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
442 {
443 	i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
444 
445 	/* BIOS often leaves RC6 enabled, but disable it for hw init */
446 	intel_sanitize_gt_powersave(dev_priv);
447 }
448 
449 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
450 					 enum forcewake_domains fw_domains)
451 {
452 	struct intel_uncore_forcewake_domain *domain;
453 	unsigned int tmp;
454 
455 	fw_domains &= dev_priv->uncore.fw_domains;
456 
457 	for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
458 		if (domain->wake_count++)
459 			fw_domains &= ~domain->mask;
460 
461 	if (fw_domains)
462 		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
463 }
464 
465 /**
466  * intel_uncore_forcewake_get - grab forcewake domain references
467  * @dev_priv: i915 device instance
468  * @fw_domains: forcewake domains to get reference on
469  *
470  * This function can be used get GT's forcewake domain references.
471  * Normal register access will handle the forcewake domains automatically.
472  * However if some sequence requires the GT to not power down a particular
473  * forcewake domains this function should be called at the beginning of the
474  * sequence. And subsequently the reference should be dropped by symmetric
475  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
476  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
477  */
478 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
479 				enum forcewake_domains fw_domains)
480 {
481 	unsigned long irqflags;
482 
483 	if (!dev_priv->uncore.funcs.force_wake_get)
484 		return;
485 
486 	assert_rpm_wakelock_held(dev_priv);
487 
488 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
489 	__intel_uncore_forcewake_get(dev_priv, fw_domains);
490 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
491 }
492 
493 /**
494  * intel_uncore_forcewake_get__locked - grab forcewake domain references
495  * @dev_priv: i915 device instance
496  * @fw_domains: forcewake domains to get reference on
497  *
498  * See intel_uncore_forcewake_get(). This variant places the onus
499  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
500  */
501 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
502 					enum forcewake_domains fw_domains)
503 {
504 	lockdep_assert_held(&dev_priv->uncore.lock);
505 
506 	if (!dev_priv->uncore.funcs.force_wake_get)
507 		return;
508 
509 	__intel_uncore_forcewake_get(dev_priv, fw_domains);
510 }
511 
512 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
513 					 enum forcewake_domains fw_domains)
514 {
515 	struct intel_uncore_forcewake_domain *domain;
516 	unsigned int tmp;
517 
518 	fw_domains &= dev_priv->uncore.fw_domains;
519 
520 	for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
521 		if (WARN_ON(domain->wake_count == 0))
522 			continue;
523 
524 		if (--domain->wake_count)
525 			continue;
526 
527 		fw_domain_arm_timer(domain);
528 	}
529 }
530 
531 /**
532  * intel_uncore_forcewake_put - release a forcewake domain reference
533  * @dev_priv: i915 device instance
534  * @fw_domains: forcewake domains to put references
535  *
536  * This function drops the device-level forcewakes for specified
537  * domains obtained by intel_uncore_forcewake_get().
538  */
539 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
540 				enum forcewake_domains fw_domains)
541 {
542 	unsigned long irqflags;
543 
544 	if (!dev_priv->uncore.funcs.force_wake_put)
545 		return;
546 
547 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
548 	__intel_uncore_forcewake_put(dev_priv, fw_domains);
549 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
550 }
551 
552 /**
553  * intel_uncore_forcewake_put__locked - grab forcewake domain references
554  * @dev_priv: i915 device instance
555  * @fw_domains: forcewake domains to get reference on
556  *
557  * See intel_uncore_forcewake_put(). This variant places the onus
558  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
559  */
560 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
561 					enum forcewake_domains fw_domains)
562 {
563 	lockdep_assert_held(&dev_priv->uncore.lock);
564 
565 	if (!dev_priv->uncore.funcs.force_wake_put)
566 		return;
567 
568 	__intel_uncore_forcewake_put(dev_priv, fw_domains);
569 }
570 
571 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
572 {
573 	if (!dev_priv->uncore.funcs.force_wake_get)
574 		return;
575 
576 	WARN_ON(dev_priv->uncore.fw_domains_active);
577 }
578 
579 /* We give fast paths for the really cool registers */
580 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
581 
582 #define __gen6_reg_read_fw_domains(offset) \
583 ({ \
584 	enum forcewake_domains __fwd; \
585 	if (NEEDS_FORCE_WAKE(offset)) \
586 		__fwd = FORCEWAKE_RENDER; \
587 	else \
588 		__fwd = 0; \
589 	__fwd; \
590 })
591 
592 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
593 {
594 	if (offset < entry->start)
595 		return -1;
596 	else if (offset > entry->end)
597 		return 1;
598 	else
599 		return 0;
600 }
601 
602 /* Copied and "macroized" from lib/bsearch.c */
603 #define BSEARCH(key, base, num, cmp) ({                                 \
604 	unsigned int start__ = 0, end__ = (num);                        \
605 	typeof(base) result__ = NULL;                                   \
606 	while (start__ < end__) {                                       \
607 		unsigned int mid__ = start__ + (end__ - start__) / 2;   \
608 		int ret__ = (cmp)((key), (base) + mid__);               \
609 		if (ret__ < 0) {                                        \
610 			end__ = mid__;                                  \
611 		} else if (ret__ > 0) {                                 \
612 			start__ = mid__ + 1;                            \
613 		} else {                                                \
614 			result__ = (base) + mid__;                      \
615 			break;                                          \
616 		}                                                       \
617 	}                                                               \
618 	result__;                                                       \
619 })
620 
621 static enum forcewake_domains
622 find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
623 {
624 	const struct intel_forcewake_range *entry;
625 
626 	entry = BSEARCH(offset,
627 			dev_priv->uncore.fw_domains_table,
628 			dev_priv->uncore.fw_domains_table_entries,
629 			fw_range_cmp);
630 
631 	if (!entry)
632 		return 0;
633 
634 	WARN(entry->domains & ~dev_priv->uncore.fw_domains,
635 	     "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
636 	     entry->domains & ~dev_priv->uncore.fw_domains, offset);
637 
638 	return entry->domains;
639 }
640 
641 #define GEN_FW_RANGE(s, e, d) \
642 	{ .start = (s), .end = (e), .domains = (d) }
643 
644 #define HAS_FWTABLE(dev_priv) \
645 	(IS_GEN9(dev_priv) || \
646 	 IS_CHERRYVIEW(dev_priv) || \
647 	 IS_VALLEYVIEW(dev_priv))
648 
649 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
650 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
651 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
652 	GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
653 	GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
654 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
655 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
656 	GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
657 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
658 };
659 
660 #define __fwtable_reg_read_fw_domains(offset) \
661 ({ \
662 	enum forcewake_domains __fwd = 0; \
663 	if (NEEDS_FORCE_WAKE((offset))) \
664 		__fwd = find_fw_domain(dev_priv, offset); \
665 	__fwd; \
666 })
667 
668 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
669 static const i915_reg_t gen8_shadowed_regs[] = {
670 	RING_TAIL(RENDER_RING_BASE),	/* 0x2000 (base) */
671 	GEN6_RPNSWREQ,			/* 0xA008 */
672 	GEN6_RC_VIDEO_FREQ,		/* 0xA00C */
673 	RING_TAIL(GEN6_BSD_RING_BASE),	/* 0x12000 (base) */
674 	RING_TAIL(VEBOX_RING_BASE),	/* 0x1a000 (base) */
675 	RING_TAIL(BLT_RING_BASE),	/* 0x22000 (base) */
676 	/* TODO: Other registers are not yet used */
677 };
678 
679 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
680 {
681 	u32 offset = i915_mmio_reg_offset(*reg);
682 
683 	if (key < offset)
684 		return -1;
685 	else if (key > offset)
686 		return 1;
687 	else
688 		return 0;
689 }
690 
691 static bool is_gen8_shadowed(u32 offset)
692 {
693 	const i915_reg_t *regs = gen8_shadowed_regs;
694 
695 	return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
696 		       mmio_reg_cmp);
697 }
698 
699 #define __gen8_reg_write_fw_domains(offset) \
700 ({ \
701 	enum forcewake_domains __fwd; \
702 	if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
703 		__fwd = FORCEWAKE_RENDER; \
704 	else \
705 		__fwd = 0; \
706 	__fwd; \
707 })
708 
709 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
710 static const struct intel_forcewake_range __chv_fw_ranges[] = {
711 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
712 	GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
713 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
714 	GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
715 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
716 	GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
717 	GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
718 	GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
719 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
720 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
721 	GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
722 	GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
723 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
724 	GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
725 	GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
726 	GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
727 };
728 
729 #define __fwtable_reg_write_fw_domains(offset) \
730 ({ \
731 	enum forcewake_domains __fwd = 0; \
732 	if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
733 		__fwd = find_fw_domain(dev_priv, offset); \
734 	__fwd; \
735 })
736 
737 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
738 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
739 	GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
740 	GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
741 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
742 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
743 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
744 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
745 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
746 	GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
747 	GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
748 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
749 	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
750 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
751 	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
752 	GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
753 	GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
754 	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
755 	GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
756 	GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
757 	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
758 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
759 	GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
760 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
761 	GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
762 	GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
763 	GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
764 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
765 	GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
766 	GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
767 	GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
768 	GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
769 	GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
770 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
771 };
772 
773 static void
774 ilk_dummy_write(struct drm_i915_private *dev_priv)
775 {
776 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
777 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
778 	 * hence harmless to write 0 into. */
779 	__raw_i915_write32(dev_priv, MI_MODE, 0);
780 }
781 
782 static void
783 __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
784 		      const i915_reg_t reg,
785 		      const bool read,
786 		      const bool before)
787 {
788 	if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
789 		 "Unclaimed %s register 0x%x\n",
790 		 read ? "read from" : "write to",
791 		 i915_mmio_reg_offset(reg)))
792 		i915.mmio_debug--; /* Only report the first N failures */
793 }
794 
795 static inline void
796 unclaimed_reg_debug(struct drm_i915_private *dev_priv,
797 		    const i915_reg_t reg,
798 		    const bool read,
799 		    const bool before)
800 {
801 	if (likely(!i915.mmio_debug))
802 		return;
803 
804 	__unclaimed_reg_debug(dev_priv, reg, read, before);
805 }
806 
807 static const enum decoupled_power_domain fw2dpd_domain[] = {
808 	GEN9_DECOUPLED_PD_RENDER,
809 	GEN9_DECOUPLED_PD_BLITTER,
810 	GEN9_DECOUPLED_PD_ALL,
811 	GEN9_DECOUPLED_PD_MEDIA,
812 	GEN9_DECOUPLED_PD_ALL,
813 	GEN9_DECOUPLED_PD_ALL,
814 	GEN9_DECOUPLED_PD_ALL
815 };
816 
817 /*
818  * Decoupled MMIO access for only 1 DWORD
819  */
820 static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
821 					 u32 reg,
822 					 enum forcewake_domains fw_domain,
823 					 enum decoupled_ops operation)
824 {
825 	enum decoupled_power_domain dp_domain;
826 	u32 ctrl_reg_data = 0;
827 
828 	dp_domain = fw2dpd_domain[fw_domain - 1];
829 
830 	ctrl_reg_data |= reg;
831 	ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
832 	ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
833 	ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
834 	__raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
835 
836 	if (wait_for_atomic((__raw_i915_read32(dev_priv,
837 			    GEN9_DECOUPLED_REG0_DW1) &
838 			    GEN9_DECOUPLED_DW1_GO) == 0,
839 			    FORCEWAKE_ACK_TIMEOUT_MS))
840 		DRM_ERROR("Decoupled MMIO wait timed out\n");
841 }
842 
843 static inline u32
844 __gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
845 			     u32 reg,
846 			     enum forcewake_domains fw_domain)
847 {
848 	__gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
849 				     GEN9_DECOUPLED_OP_READ);
850 
851 	return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
852 }
853 
854 static inline void
855 __gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
856 			    u32 reg, u32 data,
857 			    enum forcewake_domains fw_domain)
858 {
859 
860 	__raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
861 
862 	__gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
863 				     GEN9_DECOUPLED_OP_WRITE);
864 }
865 
866 
867 #define GEN2_READ_HEADER(x) \
868 	u##x val = 0; \
869 	assert_rpm_wakelock_held(dev_priv);
870 
871 #define GEN2_READ_FOOTER \
872 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
873 	return val
874 
875 #define __gen2_read(x) \
876 static u##x \
877 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
878 	GEN2_READ_HEADER(x); \
879 	val = __raw_i915_read##x(dev_priv, reg); \
880 	GEN2_READ_FOOTER; \
881 }
882 
883 #define __gen5_read(x) \
884 static u##x \
885 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
886 	GEN2_READ_HEADER(x); \
887 	ilk_dummy_write(dev_priv); \
888 	val = __raw_i915_read##x(dev_priv, reg); \
889 	GEN2_READ_FOOTER; \
890 }
891 
892 __gen5_read(8)
893 __gen5_read(16)
894 __gen5_read(32)
895 __gen5_read(64)
896 __gen2_read(8)
897 __gen2_read(16)
898 __gen2_read(32)
899 __gen2_read(64)
900 
901 #undef __gen5_read
902 #undef __gen2_read
903 
904 #undef GEN2_READ_FOOTER
905 #undef GEN2_READ_HEADER
906 
907 #define GEN6_READ_HEADER(x) \
908 	u32 offset = i915_mmio_reg_offset(reg); \
909 	unsigned long irqflags; \
910 	u##x val = 0; \
911 	assert_rpm_wakelock_held(dev_priv); \
912 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
913 	unclaimed_reg_debug(dev_priv, reg, true, true)
914 
915 #define GEN6_READ_FOOTER \
916 	unclaimed_reg_debug(dev_priv, reg, true, false); \
917 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
918 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
919 	return val
920 
921 static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
922 					enum forcewake_domains fw_domains)
923 {
924 	struct intel_uncore_forcewake_domain *domain;
925 	unsigned int tmp;
926 
927 	GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
928 
929 	for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
930 		fw_domain_arm_timer(domain);
931 
932 	dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
933 }
934 
935 static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
936 				     enum forcewake_domains fw_domains)
937 {
938 	if (WARN_ON(!fw_domains))
939 		return;
940 
941 	/* Turn on all requested but inactive supported forcewake domains. */
942 	fw_domains &= dev_priv->uncore.fw_domains;
943 	fw_domains &= ~dev_priv->uncore.fw_domains_active;
944 
945 	if (fw_domains)
946 		___force_wake_auto(dev_priv, fw_domains);
947 }
948 
949 #define __gen_read(func, x) \
950 static u##x \
951 func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
952 	enum forcewake_domains fw_engine; \
953 	GEN6_READ_HEADER(x); \
954 	fw_engine = __##func##_reg_read_fw_domains(offset); \
955 	if (fw_engine) \
956 		__force_wake_auto(dev_priv, fw_engine); \
957 	val = __raw_i915_read##x(dev_priv, reg); \
958 	GEN6_READ_FOOTER; \
959 }
960 #define __gen6_read(x) __gen_read(gen6, x)
961 #define __fwtable_read(x) __gen_read(fwtable, x)
962 
963 #define __gen9_decoupled_read(x) \
964 static u##x \
965 gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
966 		       i915_reg_t reg, bool trace) { \
967 	enum forcewake_domains fw_engine; \
968 	GEN6_READ_HEADER(x); \
969 	fw_engine = __fwtable_reg_read_fw_domains(offset); \
970 	if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
971 		unsigned i; \
972 		u32 *ptr_data = (u32 *) &val; \
973 		for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
974 			*ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
975 								 offset, \
976 								 fw_engine); \
977 	} else { \
978 		val = __raw_i915_read##x(dev_priv, reg); \
979 	} \
980 	GEN6_READ_FOOTER; \
981 }
982 
983 __gen9_decoupled_read(32)
984 __gen9_decoupled_read(64)
985 __fwtable_read(8)
986 __fwtable_read(16)
987 __fwtable_read(32)
988 __fwtable_read(64)
989 __gen6_read(8)
990 __gen6_read(16)
991 __gen6_read(32)
992 __gen6_read(64)
993 
994 #undef __fwtable_read
995 #undef __gen6_read
996 #undef GEN6_READ_FOOTER
997 #undef GEN6_READ_HEADER
998 
999 #define GEN2_WRITE_HEADER \
1000 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1001 	assert_rpm_wakelock_held(dev_priv); \
1002 
1003 #define GEN2_WRITE_FOOTER
1004 
1005 #define __gen2_write(x) \
1006 static void \
1007 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1008 	GEN2_WRITE_HEADER; \
1009 	__raw_i915_write##x(dev_priv, reg, val); \
1010 	GEN2_WRITE_FOOTER; \
1011 }
1012 
1013 #define __gen5_write(x) \
1014 static void \
1015 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1016 	GEN2_WRITE_HEADER; \
1017 	ilk_dummy_write(dev_priv); \
1018 	__raw_i915_write##x(dev_priv, reg, val); \
1019 	GEN2_WRITE_FOOTER; \
1020 }
1021 
1022 __gen5_write(8)
1023 __gen5_write(16)
1024 __gen5_write(32)
1025 __gen2_write(8)
1026 __gen2_write(16)
1027 __gen2_write(32)
1028 
1029 #undef __gen5_write
1030 #undef __gen2_write
1031 
1032 #undef GEN2_WRITE_FOOTER
1033 #undef GEN2_WRITE_HEADER
1034 
1035 #define GEN6_WRITE_HEADER \
1036 	u32 offset = i915_mmio_reg_offset(reg); \
1037 	unsigned long irqflags; \
1038 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1039 	assert_rpm_wakelock_held(dev_priv); \
1040 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1041 	unclaimed_reg_debug(dev_priv, reg, false, true)
1042 
1043 #define GEN6_WRITE_FOOTER \
1044 	unclaimed_reg_debug(dev_priv, reg, false, false); \
1045 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1046 
1047 #define __gen6_write(x) \
1048 static void \
1049 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1050 	u32 __fifo_ret = 0; \
1051 	GEN6_WRITE_HEADER; \
1052 	if (NEEDS_FORCE_WAKE(offset)) { \
1053 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1054 	} \
1055 	__raw_i915_write##x(dev_priv, reg, val); \
1056 	if (unlikely(__fifo_ret)) { \
1057 		gen6_gt_check_fifodbg(dev_priv); \
1058 	} \
1059 	GEN6_WRITE_FOOTER; \
1060 }
1061 
1062 #define __gen_write(func, x) \
1063 static void \
1064 func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1065 	enum forcewake_domains fw_engine; \
1066 	GEN6_WRITE_HEADER; \
1067 	fw_engine = __##func##_reg_write_fw_domains(offset); \
1068 	if (fw_engine) \
1069 		__force_wake_auto(dev_priv, fw_engine); \
1070 	__raw_i915_write##x(dev_priv, reg, val); \
1071 	GEN6_WRITE_FOOTER; \
1072 }
1073 #define __gen8_write(x) __gen_write(gen8, x)
1074 #define __fwtable_write(x) __gen_write(fwtable, x)
1075 
1076 #define __gen9_decoupled_write(x) \
1077 static void \
1078 gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
1079 			i915_reg_t reg, u##x val, \
1080 		bool trace) { \
1081 	enum forcewake_domains fw_engine; \
1082 	GEN6_WRITE_HEADER; \
1083 	fw_engine = __fwtable_reg_write_fw_domains(offset); \
1084 	if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
1085 		__gen9_decoupled_mmio_write(dev_priv, \
1086 					    offset, \
1087 					    val, \
1088 					    fw_engine); \
1089 	else \
1090 		__raw_i915_write##x(dev_priv, reg, val); \
1091 	GEN6_WRITE_FOOTER; \
1092 }
1093 
1094 __gen9_decoupled_write(32)
1095 __fwtable_write(8)
1096 __fwtable_write(16)
1097 __fwtable_write(32)
1098 __gen8_write(8)
1099 __gen8_write(16)
1100 __gen8_write(32)
1101 __gen6_write(8)
1102 __gen6_write(16)
1103 __gen6_write(32)
1104 
1105 #undef __fwtable_write
1106 #undef __gen8_write
1107 #undef __gen6_write
1108 #undef GEN6_WRITE_FOOTER
1109 #undef GEN6_WRITE_HEADER
1110 
1111 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1112 do { \
1113 	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1114 	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1115 	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1116 } while (0)
1117 
1118 #define ASSIGN_READ_MMIO_VFUNCS(x) \
1119 do { \
1120 	dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1121 	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1122 	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1123 	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1124 } while (0)
1125 
1126 
1127 static void fw_domain_init(struct drm_i915_private *dev_priv,
1128 			   enum forcewake_domain_id domain_id,
1129 			   i915_reg_t reg_set,
1130 			   i915_reg_t reg_ack)
1131 {
1132 	struct intel_uncore_forcewake_domain *d;
1133 
1134 	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1135 		return;
1136 
1137 	d = &dev_priv->uncore.fw_domain[domain_id];
1138 
1139 	WARN_ON(d->wake_count);
1140 
1141 	WARN_ON(!i915_mmio_reg_valid(reg_set));
1142 	WARN_ON(!i915_mmio_reg_valid(reg_ack));
1143 
1144 	d->wake_count = 0;
1145 	d->reg_set = reg_set;
1146 	d->reg_ack = reg_ack;
1147 
1148 	d->id = domain_id;
1149 
1150 	BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1151 	BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1152 	BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1153 
1154 	d->mask = BIT(domain_id);
1155 
1156 	hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1157 	d->timer.function = intel_uncore_fw_release_timer;
1158 
1159 	dev_priv->uncore.fw_domains |= BIT(domain_id);
1160 
1161 	fw_domain_reset(dev_priv, d);
1162 }
1163 
1164 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1165 {
1166 	if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
1167 		return;
1168 
1169 	if (IS_GEN6(dev_priv)) {
1170 		dev_priv->uncore.fw_reset = 0;
1171 		dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
1172 		dev_priv->uncore.fw_clear = 0;
1173 	} else {
1174 		/* WaRsClearFWBitsAtReset:bdw,skl */
1175 		dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
1176 		dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1177 		dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1178 	}
1179 
1180 	if (IS_GEN9(dev_priv)) {
1181 		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1182 		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1183 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1184 			       FORCEWAKE_RENDER_GEN9,
1185 			       FORCEWAKE_ACK_RENDER_GEN9);
1186 		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1187 			       FORCEWAKE_BLITTER_GEN9,
1188 			       FORCEWAKE_ACK_BLITTER_GEN9);
1189 		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1190 			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1191 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1192 		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1193 		if (!IS_CHERRYVIEW(dev_priv))
1194 			dev_priv->uncore.funcs.force_wake_put =
1195 				fw_domains_put_with_fifo;
1196 		else
1197 			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1198 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1199 			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1200 		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1201 			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1202 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1203 		dev_priv->uncore.funcs.force_wake_get =
1204 			fw_domains_get_with_thread_status;
1205 		if (IS_HASWELL(dev_priv))
1206 			dev_priv->uncore.funcs.force_wake_put =
1207 				fw_domains_put_with_fifo;
1208 		else
1209 			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1210 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1211 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1212 	} else if (IS_IVYBRIDGE(dev_priv)) {
1213 		u32 ecobus;
1214 
1215 		/* IVB configs may use multi-threaded forcewake */
1216 
1217 		/* A small trick here - if the bios hasn't configured
1218 		 * MT forcewake, and if the device is in RC6, then
1219 		 * force_wake_mt_get will not wake the device and the
1220 		 * ECOBUS read will return zero. Which will be
1221 		 * (correctly) interpreted by the test below as MT
1222 		 * forcewake being disabled.
1223 		 */
1224 		dev_priv->uncore.funcs.force_wake_get =
1225 			fw_domains_get_with_thread_status;
1226 		dev_priv->uncore.funcs.force_wake_put =
1227 			fw_domains_put_with_fifo;
1228 
1229 		/* We need to init first for ECOBUS access and then
1230 		 * determine later if we want to reinit, in case of MT access is
1231 		 * not working. In this stage we don't know which flavour this
1232 		 * ivb is, so it is better to reset also the gen6 fw registers
1233 		 * before the ecobus check.
1234 		 */
1235 
1236 		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
1237 		__raw_posting_read(dev_priv, ECOBUS);
1238 
1239 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1240 			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1241 
1242 		spin_lock_irq(&dev_priv->uncore.lock);
1243 		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
1244 		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1245 		fw_domains_put_with_fifo(dev_priv, FORCEWAKE_RENDER);
1246 		spin_unlock_irq(&dev_priv->uncore.lock);
1247 
1248 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1249 			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1250 			DRM_INFO("when using vblank-synced partial screen updates.\n");
1251 			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1252 				       FORCEWAKE, FORCEWAKE_ACK);
1253 		}
1254 	} else if (IS_GEN6(dev_priv)) {
1255 		dev_priv->uncore.funcs.force_wake_get =
1256 			fw_domains_get_with_thread_status;
1257 		dev_priv->uncore.funcs.force_wake_put =
1258 			fw_domains_put_with_fifo;
1259 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1260 			       FORCEWAKE, FORCEWAKE_ACK);
1261 	}
1262 
1263 	/* All future platforms are expected to require complex power gating */
1264 	WARN_ON(dev_priv->uncore.fw_domains == 0);
1265 }
1266 
1267 #define ASSIGN_FW_DOMAINS_TABLE(d) \
1268 { \
1269 	dev_priv->uncore.fw_domains_table = \
1270 			(struct intel_forcewake_range *)(d); \
1271 	dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1272 }
1273 
1274 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1275 					 unsigned long action, void *data)
1276 {
1277 	struct drm_i915_private *dev_priv = container_of(nb,
1278 			struct drm_i915_private, uncore.pmic_bus_access_nb);
1279 
1280 	switch (action) {
1281 	case MBI_PMIC_BUS_ACCESS_BEGIN:
1282 		/*
1283 		 * forcewake all now to make sure that we don't need to do a
1284 		 * forcewake later which on systems where this notifier gets
1285 		 * called requires the punit to access to the shared pmic i2c
1286 		 * bus, which will be busy after this notification, leading to:
1287 		 * "render: timed out waiting for forcewake ack request."
1288 		 * errors.
1289 		 */
1290 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1291 		break;
1292 	case MBI_PMIC_BUS_ACCESS_END:
1293 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1294 		break;
1295 	}
1296 
1297 	return NOTIFY_OK;
1298 }
1299 
1300 void intel_uncore_init(struct drm_i915_private *dev_priv)
1301 {
1302 	i915_check_vgpu(dev_priv);
1303 
1304 	intel_uncore_edram_detect(dev_priv);
1305 	intel_uncore_fw_domains_init(dev_priv);
1306 	__intel_uncore_early_sanitize(dev_priv, false);
1307 
1308 	dev_priv->uncore.unclaimed_mmio_check = 1;
1309 	dev_priv->uncore.pmic_bus_access_nb.notifier_call =
1310 		i915_pmic_bus_access_notifier;
1311 
1312 	if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
1313 		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1314 		ASSIGN_READ_MMIO_VFUNCS(gen2);
1315 	} else if (IS_GEN5(dev_priv)) {
1316 		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1317 		ASSIGN_READ_MMIO_VFUNCS(gen5);
1318 	} else if (IS_GEN(dev_priv, 6, 7)) {
1319 		ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1320 
1321 		if (IS_VALLEYVIEW(dev_priv)) {
1322 			ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
1323 			ASSIGN_READ_MMIO_VFUNCS(fwtable);
1324 		} else {
1325 			ASSIGN_READ_MMIO_VFUNCS(gen6);
1326 		}
1327 	} else if (IS_GEN8(dev_priv)) {
1328 		if (IS_CHERRYVIEW(dev_priv)) {
1329 			ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
1330 			ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
1331 			ASSIGN_READ_MMIO_VFUNCS(fwtable);
1332 
1333 		} else {
1334 			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1335 			ASSIGN_READ_MMIO_VFUNCS(gen6);
1336 		}
1337 	} else {
1338 		ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1339 		ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
1340 		ASSIGN_READ_MMIO_VFUNCS(fwtable);
1341 		if (HAS_DECOUPLED_MMIO(dev_priv)) {
1342 			dev_priv->uncore.funcs.mmio_readl =
1343 						gen9_decoupled_read32;
1344 			dev_priv->uncore.funcs.mmio_readq =
1345 						gen9_decoupled_read64;
1346 			dev_priv->uncore.funcs.mmio_writel =
1347 						gen9_decoupled_write32;
1348 		}
1349 	}
1350 
1351 	iosf_mbi_register_pmic_bus_access_notifier(
1352 		&dev_priv->uncore.pmic_bus_access_nb);
1353 
1354 	i915_check_and_clear_faults(dev_priv);
1355 }
1356 #undef ASSIGN_WRITE_MMIO_VFUNCS
1357 #undef ASSIGN_READ_MMIO_VFUNCS
1358 
1359 void intel_uncore_fini(struct drm_i915_private *dev_priv)
1360 {
1361 	iosf_mbi_unregister_pmic_bus_access_notifier(
1362 		&dev_priv->uncore.pmic_bus_access_nb);
1363 
1364 	/* Paranoia: make sure we have disabled everything before we exit. */
1365 	intel_uncore_sanitize(dev_priv);
1366 	intel_uncore_forcewake_reset(dev_priv, false);
1367 }
1368 
1369 #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
1370 
1371 static const struct register_whitelist {
1372 	i915_reg_t offset_ldw, offset_udw;
1373 	uint32_t size;
1374 	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1375 	uint32_t gen_bitmask;
1376 } whitelist[] = {
1377 	{ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1378 	  .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1379 	  .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1380 };
1381 
1382 int i915_reg_read_ioctl(struct drm_device *dev,
1383 			void *data, struct drm_file *file)
1384 {
1385 	struct drm_i915_private *dev_priv = to_i915(dev);
1386 	struct drm_i915_reg_read *reg = data;
1387 	struct register_whitelist const *entry = whitelist;
1388 	unsigned size;
1389 	i915_reg_t offset_ldw, offset_udw;
1390 	int i, ret = 0;
1391 
1392 	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1393 		if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1394 		    (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
1395 			break;
1396 	}
1397 
1398 	if (i == ARRAY_SIZE(whitelist))
1399 		return -EINVAL;
1400 
1401 	/* We use the low bits to encode extra flags as the register should
1402 	 * be naturally aligned (and those that are not so aligned merely
1403 	 * limit the available flags for that register).
1404 	 */
1405 	offset_ldw = entry->offset_ldw;
1406 	offset_udw = entry->offset_udw;
1407 	size = entry->size;
1408 	size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1409 
1410 	intel_runtime_pm_get(dev_priv);
1411 
1412 	switch (size) {
1413 	case 8 | 1:
1414 		reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1415 		break;
1416 	case 8:
1417 		reg->val = I915_READ64(offset_ldw);
1418 		break;
1419 	case 4:
1420 		reg->val = I915_READ(offset_ldw);
1421 		break;
1422 	case 2:
1423 		reg->val = I915_READ16(offset_ldw);
1424 		break;
1425 	case 1:
1426 		reg->val = I915_READ8(offset_ldw);
1427 		break;
1428 	default:
1429 		ret = -EINVAL;
1430 		goto out;
1431 	}
1432 
1433 out:
1434 	intel_runtime_pm_put(dev_priv);
1435 	return ret;
1436 }
1437 
1438 static int i915_reset_complete(struct pci_dev *pdev)
1439 {
1440 	u8 gdrst;
1441 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1442 	return (gdrst & GRDOM_RESET_STATUS) == 0;
1443 }
1444 
1445 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1446 {
1447 	struct pci_dev *pdev = dev_priv->drm.pdev;
1448 
1449 	/* assert reset for at least 20 usec */
1450 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1451 	udelay(20);
1452 	pci_write_config_byte(pdev, I915_GDRST, 0);
1453 
1454 	return wait_for(i915_reset_complete(pdev), 500);
1455 }
1456 
1457 static int g4x_reset_complete(struct pci_dev *pdev)
1458 {
1459 	u8 gdrst;
1460 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1461 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
1462 }
1463 
1464 static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1465 {
1466 	struct pci_dev *pdev = dev_priv->drm.pdev;
1467 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1468 	return wait_for(g4x_reset_complete(pdev), 500);
1469 }
1470 
1471 static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1472 {
1473 	struct pci_dev *pdev = dev_priv->drm.pdev;
1474 	int ret;
1475 
1476 	pci_write_config_byte(pdev, I915_GDRST,
1477 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
1478 	ret =  wait_for(g4x_reset_complete(pdev), 500);
1479 	if (ret)
1480 		return ret;
1481 
1482 	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1483 	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1484 	POSTING_READ(VDECCLK_GATE_D);
1485 
1486 	pci_write_config_byte(pdev, I915_GDRST,
1487 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1488 	ret =  wait_for(g4x_reset_complete(pdev), 500);
1489 	if (ret)
1490 		return ret;
1491 
1492 	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1493 	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1494 	POSTING_READ(VDECCLK_GATE_D);
1495 
1496 	pci_write_config_byte(pdev, I915_GDRST, 0);
1497 
1498 	return 0;
1499 }
1500 
1501 static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1502 			     unsigned engine_mask)
1503 {
1504 	int ret;
1505 
1506 	I915_WRITE(ILK_GDSR,
1507 		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1508 	ret = intel_wait_for_register(dev_priv,
1509 				      ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1510 				      500);
1511 	if (ret)
1512 		return ret;
1513 
1514 	I915_WRITE(ILK_GDSR,
1515 		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1516 	ret = intel_wait_for_register(dev_priv,
1517 				      ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1518 				      500);
1519 	if (ret)
1520 		return ret;
1521 
1522 	I915_WRITE(ILK_GDSR, 0);
1523 
1524 	return 0;
1525 }
1526 
1527 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1528 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1529 				u32 hw_domain_mask)
1530 {
1531 	/* GEN6_GDRST is not in the gt power well, no need to check
1532 	 * for fifo space for the write or forcewake the chip for
1533 	 * the read
1534 	 */
1535 	__raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1536 
1537 	/* Spin waiting for the device to ack the reset requests */
1538 	return intel_wait_for_register_fw(dev_priv,
1539 					  GEN6_GDRST, hw_domain_mask, 0,
1540 					  500);
1541 }
1542 
1543 /**
1544  * gen6_reset_engines - reset individual engines
1545  * @dev_priv: i915 device
1546  * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1547  *
1548  * This function will reset the individual engines that are set in engine_mask.
1549  * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1550  *
1551  * Note: It is responsibility of the caller to handle the difference between
1552  * asking full domain reset versus reset for all available individual engines.
1553  *
1554  * Returns 0 on success, nonzero on error.
1555  */
1556 static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1557 			      unsigned engine_mask)
1558 {
1559 	struct intel_engine_cs *engine;
1560 	const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1561 		[RCS] = GEN6_GRDOM_RENDER,
1562 		[BCS] = GEN6_GRDOM_BLT,
1563 		[VCS] = GEN6_GRDOM_MEDIA,
1564 		[VCS2] = GEN8_GRDOM_MEDIA2,
1565 		[VECS] = GEN6_GRDOM_VECS,
1566 	};
1567 	u32 hw_mask;
1568 	int ret;
1569 
1570 	if (engine_mask == ALL_ENGINES) {
1571 		hw_mask = GEN6_GRDOM_FULL;
1572 	} else {
1573 		unsigned int tmp;
1574 
1575 		hw_mask = 0;
1576 		for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1577 			hw_mask |= hw_engine_mask[engine->id];
1578 	}
1579 
1580 	ret = gen6_hw_domain_reset(dev_priv, hw_mask);
1581 
1582 	intel_uncore_forcewake_reset(dev_priv, true);
1583 
1584 	return ret;
1585 }
1586 
1587 /**
1588  * intel_wait_for_register_fw - wait until register matches expected state
1589  * @dev_priv: the i915 device
1590  * @reg: the register to read
1591  * @mask: mask to apply to register value
1592  * @value: expected value
1593  * @timeout_ms: timeout in millisecond
1594  *
1595  * This routine waits until the target register @reg contains the expected
1596  * @value after applying the @mask, i.e. it waits until ::
1597  *
1598  *     (I915_READ_FW(reg) & mask) == value
1599  *
1600  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1601  *
1602  * Note that this routine assumes the caller holds forcewake asserted, it is
1603  * not suitable for very long waits. See intel_wait_for_register() if you
1604  * wish to wait without holding forcewake for the duration (i.e. you expect
1605  * the wait to be slow).
1606  *
1607  * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1608  */
1609 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1610 			       i915_reg_t reg,
1611 			       const u32 mask,
1612 			       const u32 value,
1613 			       const unsigned long timeout_ms)
1614 {
1615 #define done ((I915_READ_FW(reg) & mask) == value)
1616 	int ret = wait_for_us(done, 2);
1617 	if (ret)
1618 		ret = wait_for(done, timeout_ms);
1619 	return ret;
1620 #undef done
1621 }
1622 
1623 /**
1624  * intel_wait_for_register - wait until register matches expected state
1625  * @dev_priv: the i915 device
1626  * @reg: the register to read
1627  * @mask: mask to apply to register value
1628  * @value: expected value
1629  * @timeout_ms: timeout in millisecond
1630  *
1631  * This routine waits until the target register @reg contains the expected
1632  * @value after applying the @mask, i.e. it waits until ::
1633  *
1634  *     (I915_READ(reg) & mask) == value
1635  *
1636  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1637  *
1638  * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1639  */
1640 int intel_wait_for_register(struct drm_i915_private *dev_priv,
1641 			    i915_reg_t reg,
1642 			    const u32 mask,
1643 			    const u32 value,
1644 			    const unsigned long timeout_ms)
1645 {
1646 
1647 	unsigned fw =
1648 		intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1649 	int ret;
1650 
1651 	intel_uncore_forcewake_get(dev_priv, fw);
1652 	ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
1653 	intel_uncore_forcewake_put(dev_priv, fw);
1654 	if (ret)
1655 		ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1656 			       timeout_ms);
1657 
1658 	return ret;
1659 }
1660 
1661 static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1662 {
1663 	struct drm_i915_private *dev_priv = engine->i915;
1664 	int ret;
1665 
1666 	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1667 		      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1668 
1669 	ret = intel_wait_for_register_fw(dev_priv,
1670 					 RING_RESET_CTL(engine->mmio_base),
1671 					 RESET_CTL_READY_TO_RESET,
1672 					 RESET_CTL_READY_TO_RESET,
1673 					 700);
1674 	if (ret)
1675 		DRM_ERROR("%s: reset request timeout\n", engine->name);
1676 
1677 	return ret;
1678 }
1679 
1680 static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
1681 {
1682 	struct drm_i915_private *dev_priv = engine->i915;
1683 
1684 	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1685 		      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1686 }
1687 
1688 static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1689 			      unsigned engine_mask)
1690 {
1691 	struct intel_engine_cs *engine;
1692 	unsigned int tmp;
1693 
1694 	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1695 		if (gen8_request_engine_reset(engine))
1696 			goto not_ready;
1697 
1698 	return gen6_reset_engines(dev_priv, engine_mask);
1699 
1700 not_ready:
1701 	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1702 		gen8_unrequest_engine_reset(engine);
1703 
1704 	return -EIO;
1705 }
1706 
1707 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1708 
1709 static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1710 {
1711 	if (!i915.reset)
1712 		return NULL;
1713 
1714 	if (INTEL_INFO(dev_priv)->gen >= 8)
1715 		return gen8_reset_engines;
1716 	else if (INTEL_INFO(dev_priv)->gen >= 6)
1717 		return gen6_reset_engines;
1718 	else if (IS_GEN5(dev_priv))
1719 		return ironlake_do_reset;
1720 	else if (IS_G4X(dev_priv))
1721 		return g4x_do_reset;
1722 	else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
1723 		return g33_do_reset;
1724 	else if (INTEL_INFO(dev_priv)->gen >= 3)
1725 		return i915_do_reset;
1726 	else
1727 		return NULL;
1728 }
1729 
1730 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1731 {
1732 	reset_func reset;
1733 	int ret;
1734 
1735 	reset = intel_get_gpu_reset(dev_priv);
1736 	if (reset == NULL)
1737 		return -ENODEV;
1738 
1739 	/* If the power well sleeps during the reset, the reset
1740 	 * request may be dropped and never completes (causing -EIO).
1741 	 */
1742 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1743 	ret = reset(dev_priv, engine_mask);
1744 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1745 
1746 	return ret;
1747 }
1748 
1749 bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
1750 {
1751 	return intel_get_gpu_reset(dev_priv) != NULL;
1752 }
1753 
1754 int intel_guc_reset(struct drm_i915_private *dev_priv)
1755 {
1756 	int ret;
1757 	unsigned long irqflags;
1758 
1759 	if (!HAS_GUC(dev_priv))
1760 		return -EINVAL;
1761 
1762 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1763 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1764 
1765 	ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
1766 
1767 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1768 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1769 
1770 	return ret;
1771 }
1772 
1773 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1774 {
1775 	return check_for_unclaimed_mmio(dev_priv);
1776 }
1777 
1778 bool
1779 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1780 {
1781 	if (unlikely(i915.mmio_debug ||
1782 		     dev_priv->uncore.unclaimed_mmio_check <= 0))
1783 		return false;
1784 
1785 	if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1786 		DRM_DEBUG("Unclaimed register detected, "
1787 			  "enabling oneshot unclaimed register reporting. "
1788 			  "Please use i915.mmio_debug=N for more information.\n");
1789 		i915.mmio_debug++;
1790 		dev_priv->uncore.unclaimed_mmio_check--;
1791 		return true;
1792 	}
1793 
1794 	return false;
1795 }
1796 
1797 static enum forcewake_domains
1798 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1799 				i915_reg_t reg)
1800 {
1801 	u32 offset = i915_mmio_reg_offset(reg);
1802 	enum forcewake_domains fw_domains;
1803 
1804 	if (HAS_FWTABLE(dev_priv)) {
1805 		fw_domains = __fwtable_reg_read_fw_domains(offset);
1806 	} else if (INTEL_GEN(dev_priv) >= 6) {
1807 		fw_domains = __gen6_reg_read_fw_domains(offset);
1808 	} else {
1809 		WARN_ON(!IS_GEN(dev_priv, 2, 5));
1810 		fw_domains = 0;
1811 	}
1812 
1813 	WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1814 
1815 	return fw_domains;
1816 }
1817 
1818 static enum forcewake_domains
1819 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1820 				 i915_reg_t reg)
1821 {
1822 	u32 offset = i915_mmio_reg_offset(reg);
1823 	enum forcewake_domains fw_domains;
1824 
1825 	if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
1826 		fw_domains = __fwtable_reg_write_fw_domains(offset);
1827 	} else if (IS_GEN8(dev_priv)) {
1828 		fw_domains = __gen8_reg_write_fw_domains(offset);
1829 	} else if (IS_GEN(dev_priv, 6, 7)) {
1830 		fw_domains = FORCEWAKE_RENDER;
1831 	} else {
1832 		WARN_ON(!IS_GEN(dev_priv, 2, 5));
1833 		fw_domains = 0;
1834 	}
1835 
1836 	WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1837 
1838 	return fw_domains;
1839 }
1840 
1841 /**
1842  * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1843  * 				    a register
1844  * @dev_priv: pointer to struct drm_i915_private
1845  * @reg: register in question
1846  * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1847  *
1848  * Returns a set of forcewake domains required to be taken with for example
1849  * intel_uncore_forcewake_get for the specified register to be accessible in the
1850  * specified mode (read, write or read/write) with raw mmio accessors.
1851  *
1852  * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1853  * callers to do FIFO management on their own or risk losing writes.
1854  */
1855 enum forcewake_domains
1856 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
1857 			       i915_reg_t reg, unsigned int op)
1858 {
1859 	enum forcewake_domains fw_domains = 0;
1860 
1861 	WARN_ON(!op);
1862 
1863 	if (intel_vgpu_active(dev_priv))
1864 		return 0;
1865 
1866 	if (op & FW_REG_READ)
1867 		fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
1868 
1869 	if (op & FW_REG_WRITE)
1870 		fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
1871 
1872 	return fw_domains;
1873 }
1874 
1875 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1876 #include "selftests/intel_uncore.c"
1877 #endif
1878