xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gt.c (revision 86db9f28)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "intel_gt.h"
8 #include "intel_gt_pm.h"
9 #include "intel_gt_requests.h"
10 #include "intel_mocs.h"
11 #include "intel_rc6.h"
12 #include "intel_uncore.h"
13 #include "intel_pm.h"
14 
15 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
16 {
17 	gt->i915 = i915;
18 	gt->uncore = &i915->uncore;
19 
20 	spin_lock_init(&gt->irq_lock);
21 
22 	INIT_LIST_HEAD(&gt->closed_vma);
23 	spin_lock_init(&gt->closed_lock);
24 
25 	intel_gt_init_hangcheck(gt);
26 	intel_gt_init_reset(gt);
27 	intel_gt_init_requests(gt);
28 	intel_gt_pm_init_early(gt);
29 	intel_uc_init_early(&gt->uc);
30 }
31 
32 void intel_gt_init_hw_early(struct drm_i915_private *i915)
33 {
34 	i915->gt.ggtt = &i915->ggtt;
35 
36 	/* BIOS often leaves RC6 enabled, but disable it for hw init */
37 	intel_gt_pm_disable(&i915->gt);
38 }
39 
40 static void init_unused_ring(struct intel_gt *gt, u32 base)
41 {
42 	struct intel_uncore *uncore = gt->uncore;
43 
44 	intel_uncore_write(uncore, RING_CTL(base), 0);
45 	intel_uncore_write(uncore, RING_HEAD(base), 0);
46 	intel_uncore_write(uncore, RING_TAIL(base), 0);
47 	intel_uncore_write(uncore, RING_START(base), 0);
48 }
49 
50 static void init_unused_rings(struct intel_gt *gt)
51 {
52 	struct drm_i915_private *i915 = gt->i915;
53 
54 	if (IS_I830(i915)) {
55 		init_unused_ring(gt, PRB1_BASE);
56 		init_unused_ring(gt, SRB0_BASE);
57 		init_unused_ring(gt, SRB1_BASE);
58 		init_unused_ring(gt, SRB2_BASE);
59 		init_unused_ring(gt, SRB3_BASE);
60 	} else if (IS_GEN(i915, 2)) {
61 		init_unused_ring(gt, SRB0_BASE);
62 		init_unused_ring(gt, SRB1_BASE);
63 	} else if (IS_GEN(i915, 3)) {
64 		init_unused_ring(gt, PRB1_BASE);
65 		init_unused_ring(gt, PRB2_BASE);
66 	}
67 }
68 
69 int intel_gt_init_hw(struct intel_gt *gt)
70 {
71 	struct drm_i915_private *i915 = gt->i915;
72 	struct intel_uncore *uncore = gt->uncore;
73 	int ret;
74 
75 	BUG_ON(!i915->kernel_context);
76 	ret = intel_gt_terminally_wedged(gt);
77 	if (ret)
78 		return ret;
79 
80 	gt->last_init_time = ktime_get();
81 
82 	/* Double layer security blanket, see i915_gem_init() */
83 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
84 
85 	if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
86 		intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
87 
88 	if (IS_HASWELL(i915))
89 		intel_uncore_write(uncore,
90 				   MI_PREDICATE_RESULT_2,
91 				   IS_HSW_GT3(i915) ?
92 				   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
93 
94 	/* Apply the GT workarounds... */
95 	intel_gt_apply_workarounds(gt);
96 	/* ...and determine whether they are sticking. */
97 	intel_gt_verify_workarounds(gt, "init");
98 
99 	intel_gt_init_swizzling(gt);
100 
101 	/*
102 	 * At least 830 can leave some of the unused rings
103 	 * "active" (ie. head != tail) after resume which
104 	 * will prevent c3 entry. Makes sure all unused rings
105 	 * are totally idle.
106 	 */
107 	init_unused_rings(gt);
108 
109 	ret = i915_ppgtt_init_hw(gt);
110 	if (ret) {
111 		DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
112 		goto out;
113 	}
114 
115 	/* We can't enable contexts until all firmware is loaded */
116 	ret = intel_uc_init_hw(&gt->uc);
117 	if (ret) {
118 		i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
119 		goto out;
120 	}
121 
122 	intel_mocs_init(gt);
123 
124 out:
125 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
126 	return ret;
127 }
128 
129 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
130 {
131 	intel_uncore_rmw(uncore, reg, 0, set);
132 }
133 
134 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
135 {
136 	intel_uncore_rmw(uncore, reg, clr, 0);
137 }
138 
139 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
140 {
141 	intel_uncore_rmw(uncore, reg, 0, 0);
142 }
143 
144 static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
145 {
146 	GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
147 	GEN6_RING_FAULT_REG_POSTING_READ(engine);
148 }
149 
150 void
151 intel_gt_clear_error_registers(struct intel_gt *gt,
152 			       intel_engine_mask_t engine_mask)
153 {
154 	struct drm_i915_private *i915 = gt->i915;
155 	struct intel_uncore *uncore = gt->uncore;
156 	u32 eir;
157 
158 	if (!IS_GEN(i915, 2))
159 		clear_register(uncore, PGTBL_ER);
160 
161 	if (INTEL_GEN(i915) < 4)
162 		clear_register(uncore, IPEIR(RENDER_RING_BASE));
163 	else
164 		clear_register(uncore, IPEIR_I965);
165 
166 	clear_register(uncore, EIR);
167 	eir = intel_uncore_read(uncore, EIR);
168 	if (eir) {
169 		/*
170 		 * some errors might have become stuck,
171 		 * mask them.
172 		 */
173 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
174 		rmw_set(uncore, EMR, eir);
175 		intel_uncore_write(uncore, GEN2_IIR,
176 				   I915_MASTER_ERROR_INTERRUPT);
177 	}
178 
179 	if (INTEL_GEN(i915) >= 12) {
180 		rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
181 		intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
182 	} else if (INTEL_GEN(i915) >= 8) {
183 		rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
184 		intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
185 	} else if (INTEL_GEN(i915) >= 6) {
186 		struct intel_engine_cs *engine;
187 		enum intel_engine_id id;
188 
189 		for_each_engine_masked(engine, i915, engine_mask, id)
190 			gen8_clear_engine_error_register(engine);
191 	}
192 }
193 
194 static void gen6_check_faults(struct intel_gt *gt)
195 {
196 	struct intel_engine_cs *engine;
197 	enum intel_engine_id id;
198 	u32 fault;
199 
200 	for_each_engine(engine, gt->i915, id) {
201 		fault = GEN6_RING_FAULT_REG_READ(engine);
202 		if (fault & RING_FAULT_VALID) {
203 			DRM_DEBUG_DRIVER("Unexpected fault\n"
204 					 "\tAddr: 0x%08lx\n"
205 					 "\tAddress space: %s\n"
206 					 "\tSource ID: %d\n"
207 					 "\tType: %d\n",
208 					 fault & PAGE_MASK,
209 					 fault & RING_FAULT_GTTSEL_MASK ?
210 					 "GGTT" : "PPGTT",
211 					 RING_FAULT_SRCID(fault),
212 					 RING_FAULT_FAULT_TYPE(fault));
213 		}
214 	}
215 }
216 
217 static void gen8_check_faults(struct intel_gt *gt)
218 {
219 	struct intel_uncore *uncore = gt->uncore;
220 	i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
221 	u32 fault;
222 
223 	if (INTEL_GEN(gt->i915) >= 12) {
224 		fault_reg = GEN12_RING_FAULT_REG;
225 		fault_data0_reg = GEN12_FAULT_TLB_DATA0;
226 		fault_data1_reg = GEN12_FAULT_TLB_DATA1;
227 	} else {
228 		fault_reg = GEN8_RING_FAULT_REG;
229 		fault_data0_reg = GEN8_FAULT_TLB_DATA0;
230 		fault_data1_reg = GEN8_FAULT_TLB_DATA1;
231 	}
232 
233 	fault = intel_uncore_read(uncore, fault_reg);
234 	if (fault & RING_FAULT_VALID) {
235 		u32 fault_data0, fault_data1;
236 		u64 fault_addr;
237 
238 		fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
239 		fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
240 
241 		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
242 			     ((u64)fault_data0 << 12);
243 
244 		DRM_DEBUG_DRIVER("Unexpected fault\n"
245 				 "\tAddr: 0x%08x_%08x\n"
246 				 "\tAddress space: %s\n"
247 				 "\tEngine ID: %d\n"
248 				 "\tSource ID: %d\n"
249 				 "\tType: %d\n",
250 				 upper_32_bits(fault_addr),
251 				 lower_32_bits(fault_addr),
252 				 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
253 				 GEN8_RING_FAULT_ENGINE_ID(fault),
254 				 RING_FAULT_SRCID(fault),
255 				 RING_FAULT_FAULT_TYPE(fault));
256 	}
257 }
258 
259 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
260 {
261 	struct drm_i915_private *i915 = gt->i915;
262 
263 	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
264 	if (INTEL_GEN(i915) >= 8)
265 		gen8_check_faults(gt);
266 	else if (INTEL_GEN(i915) >= 6)
267 		gen6_check_faults(gt);
268 	else
269 		return;
270 
271 	intel_gt_clear_error_registers(gt, ALL_ENGINES);
272 }
273 
274 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
275 {
276 	struct drm_i915_private *i915 = gt->i915;
277 	intel_wakeref_t wakeref;
278 
279 	/*
280 	 * No actual flushing is required for the GTT write domain for reads
281 	 * from the GTT domain. Writes to it "immediately" go to main memory
282 	 * as far as we know, so there's no chipset flush. It also doesn't
283 	 * land in the GPU render cache.
284 	 *
285 	 * However, we do have to enforce the order so that all writes through
286 	 * the GTT land before any writes to the device, such as updates to
287 	 * the GATT itself.
288 	 *
289 	 * We also have to wait a bit for the writes to land from the GTT.
290 	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
291 	 * timing. This issue has only been observed when switching quickly
292 	 * between GTT writes and CPU reads from inside the kernel on recent hw,
293 	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
294 	 * system agents we cannot reproduce this behaviour, until Cannonlake
295 	 * that was!).
296 	 */
297 
298 	wmb();
299 
300 	if (INTEL_INFO(i915)->has_coherent_ggtt)
301 		return;
302 
303 	intel_gt_chipset_flush(gt);
304 
305 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
306 		struct intel_uncore *uncore = gt->uncore;
307 		unsigned long flags;
308 
309 		spin_lock_irqsave(&uncore->lock, flags);
310 		intel_uncore_posting_read_fw(uncore,
311 					     RING_HEAD(RENDER_RING_BASE));
312 		spin_unlock_irqrestore(&uncore->lock, flags);
313 	}
314 }
315 
316 void intel_gt_chipset_flush(struct intel_gt *gt)
317 {
318 	wmb();
319 	if (INTEL_GEN(gt->i915) < 6)
320 		intel_gtt_chipset_flush();
321 }
322 
323 void intel_gt_driver_register(struct intel_gt *gt)
324 {
325 	if (IS_GEN(gt->i915, 5))
326 		intel_gpu_ips_init(gt->i915);
327 }
328 
329 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
330 {
331 	struct drm_i915_private *i915 = gt->i915;
332 	struct drm_i915_gem_object *obj;
333 	struct i915_vma *vma;
334 	int ret;
335 
336 	obj = i915_gem_object_create_stolen(i915, size);
337 	if (IS_ERR(obj))
338 		obj = i915_gem_object_create_internal(i915, size);
339 	if (IS_ERR(obj)) {
340 		DRM_ERROR("Failed to allocate scratch page\n");
341 		return PTR_ERR(obj);
342 	}
343 
344 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
345 	if (IS_ERR(vma)) {
346 		ret = PTR_ERR(vma);
347 		goto err_unref;
348 	}
349 
350 	ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
351 	if (ret)
352 		goto err_unref;
353 
354 	gt->scratch = i915_vma_make_unshrinkable(vma);
355 
356 	return 0;
357 
358 err_unref:
359 	i915_gem_object_put(obj);
360 	return ret;
361 }
362 
363 static void intel_gt_fini_scratch(struct intel_gt *gt)
364 {
365 	i915_vma_unpin_and_release(&gt->scratch, 0);
366 }
367 
368 int intel_gt_init(struct intel_gt *gt)
369 {
370 	int err;
371 
372 	err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
373 	if (err)
374 		return err;
375 
376 	intel_gt_pm_init(gt);
377 
378 	return 0;
379 }
380 
381 void intel_gt_driver_remove(struct intel_gt *gt)
382 {
383 	GEM_BUG_ON(gt->awake);
384 	intel_gt_pm_disable(gt);
385 }
386 
387 void intel_gt_driver_unregister(struct intel_gt *gt)
388 {
389 	intel_gpu_ips_teardown();
390 }
391 
392 void intel_gt_driver_release(struct intel_gt *gt)
393 {
394 	/* Paranoia: make sure we have disabled everything before we exit. */
395 	intel_gt_pm_disable(gt);
396 	intel_gt_pm_fini(gt);
397 
398 	intel_gt_fini_scratch(gt);
399 }
400 
401 void intel_gt_driver_late_release(struct intel_gt *gt)
402 {
403 	intel_uc_driver_late_release(&gt->uc);
404 	intel_gt_fini_reset(gt);
405 }
406