xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gt.c (revision b8d312aa)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "intel_gt.h"
8 #include "intel_gt_pm.h"
9 #include "intel_uncore.h"
10 
11 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
12 {
13 	gt->i915 = i915;
14 	gt->uncore = &i915->uncore;
15 
16 	spin_lock_init(&gt->irq_lock);
17 
18 	INIT_LIST_HEAD(&gt->closed_vma);
19 	spin_lock_init(&gt->closed_lock);
20 
21 	intel_gt_init_hangcheck(gt);
22 	intel_gt_init_reset(gt);
23 	intel_gt_pm_init_early(gt);
24 	intel_uc_init_early(&gt->uc);
25 }
26 
27 void intel_gt_init_hw(struct drm_i915_private *i915)
28 {
29 	i915->gt.ggtt = &i915->ggtt;
30 }
31 
32 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
33 {
34 	intel_uncore_rmw(uncore, reg, 0, set);
35 }
36 
37 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
38 {
39 	intel_uncore_rmw(uncore, reg, clr, 0);
40 }
41 
42 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
43 {
44 	intel_uncore_rmw(uncore, reg, 0, 0);
45 }
46 
47 static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
48 {
49 	GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
50 	GEN6_RING_FAULT_REG_POSTING_READ(engine);
51 }
52 
53 void
54 intel_gt_clear_error_registers(struct intel_gt *gt,
55 			       intel_engine_mask_t engine_mask)
56 {
57 	struct drm_i915_private *i915 = gt->i915;
58 	struct intel_uncore *uncore = gt->uncore;
59 	u32 eir;
60 
61 	if (!IS_GEN(i915, 2))
62 		clear_register(uncore, PGTBL_ER);
63 
64 	if (INTEL_GEN(i915) < 4)
65 		clear_register(uncore, IPEIR(RENDER_RING_BASE));
66 	else
67 		clear_register(uncore, IPEIR_I965);
68 
69 	clear_register(uncore, EIR);
70 	eir = intel_uncore_read(uncore, EIR);
71 	if (eir) {
72 		/*
73 		 * some errors might have become stuck,
74 		 * mask them.
75 		 */
76 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
77 		rmw_set(uncore, EMR, eir);
78 		intel_uncore_write(uncore, GEN2_IIR,
79 				   I915_MASTER_ERROR_INTERRUPT);
80 	}
81 
82 	if (INTEL_GEN(i915) >= 12) {
83 		rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
84 		intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
85 	} else if (INTEL_GEN(i915) >= 8) {
86 		rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
87 		intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
88 	} else if (INTEL_GEN(i915) >= 6) {
89 		struct intel_engine_cs *engine;
90 		enum intel_engine_id id;
91 
92 		for_each_engine_masked(engine, i915, engine_mask, id)
93 			gen8_clear_engine_error_register(engine);
94 	}
95 }
96 
97 static void gen6_check_faults(struct intel_gt *gt)
98 {
99 	struct intel_engine_cs *engine;
100 	enum intel_engine_id id;
101 	u32 fault;
102 
103 	for_each_engine(engine, gt->i915, id) {
104 		fault = GEN6_RING_FAULT_REG_READ(engine);
105 		if (fault & RING_FAULT_VALID) {
106 			DRM_DEBUG_DRIVER("Unexpected fault\n"
107 					 "\tAddr: 0x%08lx\n"
108 					 "\tAddress space: %s\n"
109 					 "\tSource ID: %d\n"
110 					 "\tType: %d\n",
111 					 fault & PAGE_MASK,
112 					 fault & RING_FAULT_GTTSEL_MASK ?
113 					 "GGTT" : "PPGTT",
114 					 RING_FAULT_SRCID(fault),
115 					 RING_FAULT_FAULT_TYPE(fault));
116 		}
117 	}
118 }
119 
120 static void gen8_check_faults(struct intel_gt *gt)
121 {
122 	struct intel_uncore *uncore = gt->uncore;
123 	i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
124 	u32 fault;
125 
126 	if (INTEL_GEN(gt->i915) >= 12) {
127 		fault_reg = GEN12_RING_FAULT_REG;
128 		fault_data0_reg = GEN12_FAULT_TLB_DATA0;
129 		fault_data1_reg = GEN12_FAULT_TLB_DATA1;
130 	} else {
131 		fault_reg = GEN8_RING_FAULT_REG;
132 		fault_data0_reg = GEN8_FAULT_TLB_DATA0;
133 		fault_data1_reg = GEN8_FAULT_TLB_DATA1;
134 	}
135 
136 	fault = intel_uncore_read(uncore, fault_reg);
137 	if (fault & RING_FAULT_VALID) {
138 		u32 fault_data0, fault_data1;
139 		u64 fault_addr;
140 
141 		fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
142 		fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
143 
144 		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
145 			     ((u64)fault_data0 << 12);
146 
147 		DRM_DEBUG_DRIVER("Unexpected fault\n"
148 				 "\tAddr: 0x%08x_%08x\n"
149 				 "\tAddress space: %s\n"
150 				 "\tEngine ID: %d\n"
151 				 "\tSource ID: %d\n"
152 				 "\tType: %d\n",
153 				 upper_32_bits(fault_addr),
154 				 lower_32_bits(fault_addr),
155 				 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
156 				 GEN8_RING_FAULT_ENGINE_ID(fault),
157 				 RING_FAULT_SRCID(fault),
158 				 RING_FAULT_FAULT_TYPE(fault));
159 	}
160 }
161 
162 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
163 {
164 	struct drm_i915_private *i915 = gt->i915;
165 
166 	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
167 	if (INTEL_GEN(i915) >= 8)
168 		gen8_check_faults(gt);
169 	else if (INTEL_GEN(i915) >= 6)
170 		gen6_check_faults(gt);
171 	else
172 		return;
173 
174 	intel_gt_clear_error_registers(gt, ALL_ENGINES);
175 }
176 
177 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
178 {
179 	struct drm_i915_private *i915 = gt->i915;
180 	intel_wakeref_t wakeref;
181 
182 	/*
183 	 * No actual flushing is required for the GTT write domain for reads
184 	 * from the GTT domain. Writes to it "immediately" go to main memory
185 	 * as far as we know, so there's no chipset flush. It also doesn't
186 	 * land in the GPU render cache.
187 	 *
188 	 * However, we do have to enforce the order so that all writes through
189 	 * the GTT land before any writes to the device, such as updates to
190 	 * the GATT itself.
191 	 *
192 	 * We also have to wait a bit for the writes to land from the GTT.
193 	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
194 	 * timing. This issue has only been observed when switching quickly
195 	 * between GTT writes and CPU reads from inside the kernel on recent hw,
196 	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
197 	 * system agents we cannot reproduce this behaviour, until Cannonlake
198 	 * that was!).
199 	 */
200 
201 	wmb();
202 
203 	if (INTEL_INFO(i915)->has_coherent_ggtt)
204 		return;
205 
206 	intel_gt_chipset_flush(gt);
207 
208 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
209 		struct intel_uncore *uncore = gt->uncore;
210 
211 		spin_lock_irq(&uncore->lock);
212 		intel_uncore_posting_read_fw(uncore,
213 					     RING_HEAD(RENDER_RING_BASE));
214 		spin_unlock_irq(&uncore->lock);
215 	}
216 }
217 
218 void intel_gt_chipset_flush(struct intel_gt *gt)
219 {
220 	wmb();
221 	if (INTEL_GEN(gt->i915) < 6)
222 		intel_gtt_chipset_flush();
223 }
224 
225 int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
226 {
227 	struct drm_i915_private *i915 = gt->i915;
228 	struct drm_i915_gem_object *obj;
229 	struct i915_vma *vma;
230 	int ret;
231 
232 	obj = i915_gem_object_create_stolen(i915, size);
233 	if (!obj)
234 		obj = i915_gem_object_create_internal(i915, size);
235 	if (IS_ERR(obj)) {
236 		DRM_ERROR("Failed to allocate scratch page\n");
237 		return PTR_ERR(obj);
238 	}
239 
240 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
241 	if (IS_ERR(vma)) {
242 		ret = PTR_ERR(vma);
243 		goto err_unref;
244 	}
245 
246 	ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
247 	if (ret)
248 		goto err_unref;
249 
250 	gt->scratch = i915_vma_make_unshrinkable(vma);
251 
252 	return 0;
253 
254 err_unref:
255 	i915_gem_object_put(obj);
256 	return ret;
257 }
258 
259 void intel_gt_fini_scratch(struct intel_gt *gt)
260 {
261 	i915_vma_unpin_and_release(&gt->scratch, 0);
262 }
263 
264 void intel_gt_driver_late_release(struct intel_gt *gt)
265 {
266 	intel_uc_driver_late_release(&gt->uc);
267 	intel_gt_fini_reset(gt);
268 }
269