xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gt.c (revision 9adc8050)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 
8 #include "intel_gt.h"
9 #include "intel_gt_pm.h"
10 #include "intel_uncore.h"
11 
12 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
13 {
14 	gt->i915 = i915;
15 	gt->uncore = &i915->uncore;
16 
17 	INIT_LIST_HEAD(&gt->active_rings);
18 	INIT_LIST_HEAD(&gt->closed_vma);
19 
20 	spin_lock_init(&gt->closed_lock);
21 
22 	intel_gt_init_hangcheck(gt);
23 	intel_gt_init_reset(gt);
24 	intel_gt_pm_init_early(gt);
25 }
26 
27 void intel_gt_init_hw(struct drm_i915_private *i915)
28 {
29 	i915->gt.ggtt = &i915->ggtt;
30 }
31 
32 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
33 {
34 	intel_uncore_rmw(uncore, reg, 0, set);
35 }
36 
37 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
38 {
39 	intel_uncore_rmw(uncore, reg, clr, 0);
40 }
41 
42 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
43 {
44 	intel_uncore_rmw(uncore, reg, 0, 0);
45 }
46 
47 static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
48 {
49 	GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
50 	GEN6_RING_FAULT_REG_POSTING_READ(engine);
51 }
52 
53 void
54 intel_gt_clear_error_registers(struct intel_gt *gt,
55 			       intel_engine_mask_t engine_mask)
56 {
57 	struct drm_i915_private *i915 = gt->i915;
58 	struct intel_uncore *uncore = gt->uncore;
59 	u32 eir;
60 
61 	if (!IS_GEN(i915, 2))
62 		clear_register(uncore, PGTBL_ER);
63 
64 	if (INTEL_GEN(i915) < 4)
65 		clear_register(uncore, IPEIR(RENDER_RING_BASE));
66 	else
67 		clear_register(uncore, IPEIR_I965);
68 
69 	clear_register(uncore, EIR);
70 	eir = intel_uncore_read(uncore, EIR);
71 	if (eir) {
72 		/*
73 		 * some errors might have become stuck,
74 		 * mask them.
75 		 */
76 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
77 		rmw_set(uncore, EMR, eir);
78 		intel_uncore_write(uncore, GEN2_IIR,
79 				   I915_MASTER_ERROR_INTERRUPT);
80 	}
81 
82 	if (INTEL_GEN(i915) >= 8) {
83 		rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
84 		intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
85 	} else if (INTEL_GEN(i915) >= 6) {
86 		struct intel_engine_cs *engine;
87 		enum intel_engine_id id;
88 
89 		for_each_engine_masked(engine, i915, engine_mask, id)
90 			gen8_clear_engine_error_register(engine);
91 	}
92 }
93 
94 static void gen6_check_faults(struct intel_gt *gt)
95 {
96 	struct intel_engine_cs *engine;
97 	enum intel_engine_id id;
98 	u32 fault;
99 
100 	for_each_engine(engine, gt->i915, id) {
101 		fault = GEN6_RING_FAULT_REG_READ(engine);
102 		if (fault & RING_FAULT_VALID) {
103 			DRM_DEBUG_DRIVER("Unexpected fault\n"
104 					 "\tAddr: 0x%08lx\n"
105 					 "\tAddress space: %s\n"
106 					 "\tSource ID: %d\n"
107 					 "\tType: %d\n",
108 					 fault & PAGE_MASK,
109 					 fault & RING_FAULT_GTTSEL_MASK ?
110 					 "GGTT" : "PPGTT",
111 					 RING_FAULT_SRCID(fault),
112 					 RING_FAULT_FAULT_TYPE(fault));
113 		}
114 	}
115 }
116 
117 static void gen8_check_faults(struct intel_gt *gt)
118 {
119 	struct intel_uncore *uncore = gt->uncore;
120 	u32 fault = intel_uncore_read(uncore, GEN8_RING_FAULT_REG);
121 
122 	if (fault & RING_FAULT_VALID) {
123 		u32 fault_data0, fault_data1;
124 		u64 fault_addr;
125 
126 		fault_data0 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA0);
127 		fault_data1 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA1);
128 		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
129 			     ((u64)fault_data0 << 12);
130 
131 		DRM_DEBUG_DRIVER("Unexpected fault\n"
132 				 "\tAddr: 0x%08x_%08x\n"
133 				 "\tAddress space: %s\n"
134 				 "\tEngine ID: %d\n"
135 				 "\tSource ID: %d\n"
136 				 "\tType: %d\n",
137 				 upper_32_bits(fault_addr),
138 				 lower_32_bits(fault_addr),
139 				 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
140 				 GEN8_RING_FAULT_ENGINE_ID(fault),
141 				 RING_FAULT_SRCID(fault),
142 				 RING_FAULT_FAULT_TYPE(fault));
143 	}
144 }
145 
146 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
147 {
148 	struct drm_i915_private *i915 = gt->i915;
149 
150 	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
151 	if (INTEL_GEN(i915) >= 8)
152 		gen8_check_faults(gt);
153 	else if (INTEL_GEN(i915) >= 6)
154 		gen6_check_faults(gt);
155 	else
156 		return;
157 
158 	intel_gt_clear_error_registers(gt, ALL_ENGINES);
159 }
160 
161 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
162 {
163 	struct drm_i915_private *i915 = gt->i915;
164 	intel_wakeref_t wakeref;
165 
166 	/*
167 	 * No actual flushing is required for the GTT write domain for reads
168 	 * from the GTT domain. Writes to it "immediately" go to main memory
169 	 * as far as we know, so there's no chipset flush. It also doesn't
170 	 * land in the GPU render cache.
171 	 *
172 	 * However, we do have to enforce the order so that all writes through
173 	 * the GTT land before any writes to the device, such as updates to
174 	 * the GATT itself.
175 	 *
176 	 * We also have to wait a bit for the writes to land from the GTT.
177 	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
178 	 * timing. This issue has only been observed when switching quickly
179 	 * between GTT writes and CPU reads from inside the kernel on recent hw,
180 	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
181 	 * system agents we cannot reproduce this behaviour, until Cannonlake
182 	 * that was!).
183 	 */
184 
185 	wmb();
186 
187 	if (INTEL_INFO(i915)->has_coherent_ggtt)
188 		return;
189 
190 	intel_gt_chipset_flush(gt);
191 
192 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
193 		struct intel_uncore *uncore = gt->uncore;
194 
195 		spin_lock_irq(&uncore->lock);
196 		intel_uncore_posting_read_fw(uncore,
197 					     RING_HEAD(RENDER_RING_BASE));
198 		spin_unlock_irq(&uncore->lock);
199 	}
200 }
201 
202 void intel_gt_chipset_flush(struct intel_gt *gt)
203 {
204 	wmb();
205 	if (INTEL_GEN(gt->i915) < 6)
206 		intel_gtt_chipset_flush();
207 }
208 
209 int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
210 {
211 	struct drm_i915_private *i915 = gt->i915;
212 	struct drm_i915_gem_object *obj;
213 	struct i915_vma *vma;
214 	int ret;
215 
216 	obj = i915_gem_object_create_stolen(i915, size);
217 	if (!obj)
218 		obj = i915_gem_object_create_internal(i915, size);
219 	if (IS_ERR(obj)) {
220 		DRM_ERROR("Failed to allocate scratch page\n");
221 		return PTR_ERR(obj);
222 	}
223 
224 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
225 	if (IS_ERR(vma)) {
226 		ret = PTR_ERR(vma);
227 		goto err_unref;
228 	}
229 
230 	ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
231 	if (ret)
232 		goto err_unref;
233 
234 	gt->scratch = vma;
235 	return 0;
236 
237 err_unref:
238 	i915_gem_object_put(obj);
239 	return ret;
240 }
241 
242 void intel_gt_fini_scratch(struct intel_gt *gt)
243 {
244 	i915_vma_unpin_and_release(&gt->scratch, 0);
245 }
246 
247 void intel_gt_cleanup_early(struct intel_gt *gt)
248 {
249 	intel_gt_fini_reset(gt);
250 }
251