xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gsc.c (revision ba51925d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright(c) 2019-2022, Intel Corporation. All rights reserved.
4  */
5 
6 #include <linux/irq.h>
7 #include <linux/mei_aux.h>
8 #include "i915_drv.h"
9 #include "i915_reg.h"
10 #include "gem/i915_gem_lmem.h"
11 #include "gem/i915_gem_region.h"
12 #include "gt/intel_gsc.h"
13 #include "gt/intel_gt.h"
14 
15 #define GSC_BAR_LENGTH  0x00000FFC
16 
gsc_irq_mask(struct irq_data * d)17 static void gsc_irq_mask(struct irq_data *d)
18 {
19 	/* generic irq handling */
20 }
21 
gsc_irq_unmask(struct irq_data * d)22 static void gsc_irq_unmask(struct irq_data *d)
23 {
24 	/* generic irq handling */
25 }
26 
27 static struct irq_chip gsc_irq_chip = {
28 	.name = "gsc_irq_chip",
29 	.irq_mask = gsc_irq_mask,
30 	.irq_unmask = gsc_irq_unmask,
31 };
32 
gsc_irq_init(int irq)33 static int gsc_irq_init(int irq)
34 {
35 	irq_set_chip_and_handler_name(irq, &gsc_irq_chip,
36 				      handle_simple_irq, "gsc_irq_handler");
37 
38 	return irq_set_chip_data(irq, NULL);
39 }
40 
41 static int
gsc_ext_om_alloc(struct intel_gsc * gsc,struct intel_gsc_intf * intf,size_t size)42 gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size)
43 {
44 	struct intel_gt *gt = gsc_to_gt(gsc);
45 	struct drm_i915_gem_object *obj;
46 	int err;
47 
48 	obj = i915_gem_object_create_lmem(gt->i915, size,
49 					  I915_BO_ALLOC_CONTIGUOUS |
50 					  I915_BO_ALLOC_CPU_CLEAR);
51 	if (IS_ERR(obj)) {
52 		drm_err(&gt->i915->drm, "Failed to allocate gsc memory\n");
53 		return PTR_ERR(obj);
54 	}
55 
56 	err = i915_gem_object_pin_pages_unlocked(obj);
57 	if (err) {
58 		drm_err(&gt->i915->drm, "Failed to pin pages for gsc memory\n");
59 		goto out_put;
60 	}
61 
62 	intf->gem_obj = obj;
63 
64 	return 0;
65 
66 out_put:
67 	i915_gem_object_put(obj);
68 	return err;
69 }
70 
gsc_ext_om_destroy(struct intel_gsc_intf * intf)71 static void gsc_ext_om_destroy(struct intel_gsc_intf *intf)
72 {
73 	struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj);
74 
75 	if (!obj)
76 		return;
77 
78 	if (i915_gem_object_has_pinned_pages(obj))
79 		i915_gem_object_unpin_pages(obj);
80 
81 	i915_gem_object_put(obj);
82 }
83 
84 struct gsc_def {
85 	const char *name;
86 	unsigned long bar;
87 	size_t bar_size;
88 	bool use_polling;
89 	bool slow_firmware;
90 	size_t lmem_size;
91 };
92 
93 /* gsc resources and definitions (HECI1 and HECI2) */
94 static const struct gsc_def gsc_def_dg1[] = {
95 	{
96 		/* HECI1 not yet implemented. */
97 	},
98 	{
99 		.name = "mei-gscfi",
100 		.bar = DG1_GSC_HECI2_BASE,
101 		.bar_size = GSC_BAR_LENGTH,
102 	}
103 };
104 
105 static const struct gsc_def gsc_def_xehpsdv[] = {
106 	{
107 		/* HECI1 not enabled on the device. */
108 	},
109 	{
110 		.name = "mei-gscfi",
111 		.bar = DG1_GSC_HECI2_BASE,
112 		.bar_size = GSC_BAR_LENGTH,
113 		.use_polling = true,
114 		.slow_firmware = true,
115 	}
116 };
117 
118 static const struct gsc_def gsc_def_dg2[] = {
119 	{
120 		.name = "mei-gsc",
121 		.bar = DG2_GSC_HECI1_BASE,
122 		.bar_size = GSC_BAR_LENGTH,
123 		.lmem_size = SZ_4M,
124 	},
125 	{
126 		.name = "mei-gscfi",
127 		.bar = DG2_GSC_HECI2_BASE,
128 		.bar_size = GSC_BAR_LENGTH,
129 	}
130 };
131 
gsc_release_dev(struct device * dev)132 static void gsc_release_dev(struct device *dev)
133 {
134 	struct auxiliary_device *aux_dev = to_auxiliary_dev(dev);
135 	struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
136 
137 	kfree(adev);
138 }
139 
gsc_destroy_one(struct drm_i915_private * i915,struct intel_gsc * gsc,unsigned int intf_id)140 static void gsc_destroy_one(struct drm_i915_private *i915,
141 			    struct intel_gsc *gsc, unsigned int intf_id)
142 {
143 	struct intel_gsc_intf *intf = &gsc->intf[intf_id];
144 
145 	if (intf->adev) {
146 		struct auxiliary_device *aux_dev = &intf->adev->aux_dev;
147 
148 		if (intf_id == 0)
149 			intel_huc_unregister_gsc_notifier(&gsc_to_gt(gsc)->uc.huc,
150 							  aux_dev->dev.bus);
151 
152 		auxiliary_device_delete(aux_dev);
153 		auxiliary_device_uninit(aux_dev);
154 		intf->adev = NULL;
155 	}
156 
157 	if (intf->irq >= 0)
158 		irq_free_desc(intf->irq);
159 	intf->irq = -1;
160 
161 	gsc_ext_om_destroy(intf);
162 }
163 
gsc_init_one(struct drm_i915_private * i915,struct intel_gsc * gsc,unsigned int intf_id)164 static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
165 			 unsigned int intf_id)
166 {
167 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
168 	struct mei_aux_device *adev;
169 	struct auxiliary_device *aux_dev;
170 	const struct gsc_def *def;
171 	struct intel_gsc_intf *intf = &gsc->intf[intf_id];
172 	int ret;
173 
174 	intf->irq = -1;
175 	intf->id = intf_id;
176 
177 	/*
178 	 * On the multi-tile setups the GSC is functional on the first tile only
179 	 */
180 	if (gsc_to_gt(gsc)->info.id != 0) {
181 		drm_dbg(&i915->drm, "Not initializing gsc for remote tiles\n");
182 		return;
183 	}
184 
185 	if (intf_id == 0 && !HAS_HECI_PXP(i915))
186 		return;
187 
188 	if (IS_DG1(i915)) {
189 		def = &gsc_def_dg1[intf_id];
190 	} else if (IS_XEHPSDV(i915)) {
191 		def = &gsc_def_xehpsdv[intf_id];
192 	} else if (IS_DG2(i915)) {
193 		def = &gsc_def_dg2[intf_id];
194 	} else {
195 		drm_warn_once(&i915->drm, "Unknown platform\n");
196 		return;
197 	}
198 
199 	if (!def->name) {
200 		drm_warn_once(&i915->drm, "HECI%d is not implemented!\n", intf_id + 1);
201 		return;
202 	}
203 
204 	/* skip irq initialization */
205 	if (def->use_polling)
206 		goto add_device;
207 
208 	intf->irq = irq_alloc_desc(0);
209 	if (intf->irq < 0) {
210 		drm_err(&i915->drm, "gsc irq error %d\n", intf->irq);
211 		goto fail;
212 	}
213 
214 	ret = gsc_irq_init(intf->irq);
215 	if (ret < 0) {
216 		drm_err(&i915->drm, "gsc irq init failed %d\n", ret);
217 		goto fail;
218 	}
219 
220 add_device:
221 	adev = kzalloc(sizeof(*adev), GFP_KERNEL);
222 	if (!adev)
223 		goto fail;
224 
225 	if (def->lmem_size) {
226 		drm_dbg(&i915->drm, "setting up GSC lmem\n");
227 
228 		if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) {
229 			drm_err(&i915->drm, "setting up gsc extended operational memory failed\n");
230 			kfree(adev);
231 			goto fail;
232 		}
233 
234 		adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0);
235 		adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size;
236 	}
237 
238 	adev->irq = intf->irq;
239 	adev->bar.parent = &pdev->resource[0];
240 	adev->bar.start = def->bar + pdev->resource[0].start;
241 	adev->bar.end = adev->bar.start + def->bar_size - 1;
242 	adev->bar.flags = IORESOURCE_MEM;
243 	adev->bar.desc = IORES_DESC_NONE;
244 	adev->slow_firmware = def->slow_firmware;
245 
246 	aux_dev = &adev->aux_dev;
247 	aux_dev->name = def->name;
248 	aux_dev->id = (pci_domain_nr(pdev->bus) << 16) |
249 		      PCI_DEVID(pdev->bus->number, pdev->devfn);
250 	aux_dev->dev.parent = &pdev->dev;
251 	aux_dev->dev.release = gsc_release_dev;
252 
253 	ret = auxiliary_device_init(aux_dev);
254 	if (ret < 0) {
255 		drm_err(&i915->drm, "gsc aux init failed %d\n", ret);
256 		kfree(adev);
257 		goto fail;
258 	}
259 
260 	intf->adev = adev; /* needed by the notifier */
261 
262 	if (intf_id == 0)
263 		intel_huc_register_gsc_notifier(&gsc_to_gt(gsc)->uc.huc,
264 						aux_dev->dev.bus);
265 
266 	ret = auxiliary_device_add(aux_dev);
267 	if (ret < 0) {
268 		drm_err(&i915->drm, "gsc aux add failed %d\n", ret);
269 		if (intf_id == 0)
270 			intel_huc_unregister_gsc_notifier(&gsc_to_gt(gsc)->uc.huc,
271 							  aux_dev->dev.bus);
272 		intf->adev = NULL;
273 
274 		/* adev will be freed with the put_device() and .release sequence */
275 		auxiliary_device_uninit(aux_dev);
276 		goto fail;
277 	}
278 
279 	return;
280 fail:
281 	gsc_destroy_one(i915, gsc, intf->id);
282 }
283 
gsc_irq_handler(struct intel_gt * gt,unsigned int intf_id)284 static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
285 {
286 	int ret;
287 
288 	if (intf_id >= INTEL_GSC_NUM_INTERFACES) {
289 		drm_warn_once(&gt->i915->drm, "GSC irq: intf_id %d is out of range", intf_id);
290 		return;
291 	}
292 
293 	if (!HAS_HECI_GSC(gt->i915)) {
294 		drm_warn_once(&gt->i915->drm, "GSC irq: not supported");
295 		return;
296 	}
297 
298 	if (gt->gsc.intf[intf_id].irq < 0)
299 		return;
300 
301 	ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
302 	if (ret)
303 		drm_err_ratelimited(&gt->i915->drm, "error handling GSC irq: %d\n", ret);
304 }
305 
intel_gsc_irq_handler(struct intel_gt * gt,u32 iir)306 void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir)
307 {
308 	if (iir & GSC_IRQ_INTF(0))
309 		gsc_irq_handler(gt, 0);
310 	if (iir & GSC_IRQ_INTF(1))
311 		gsc_irq_handler(gt, 1);
312 }
313 
intel_gsc_init(struct intel_gsc * gsc,struct drm_i915_private * i915)314 void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915)
315 {
316 	unsigned int i;
317 
318 	if (!HAS_HECI_GSC(i915))
319 		return;
320 
321 	for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
322 		gsc_init_one(i915, gsc, i);
323 }
324 
intel_gsc_fini(struct intel_gsc * gsc)325 void intel_gsc_fini(struct intel_gsc *gsc)
326 {
327 	struct intel_gt *gt = gsc_to_gt(gsc);
328 	unsigned int i;
329 
330 	if (!HAS_HECI_GSC(gt->i915))
331 		return;
332 
333 	for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
334 		gsc_destroy_one(gt->i915, gsc, i);
335 }
336