xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gsc.c (revision 6c8c1406)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright(c) 2019-2022, Intel Corporation. All rights reserved.
4  */
5 
6 #include <linux/irq.h>
7 #include <linux/mei_aux.h>
8 #include "i915_drv.h"
9 #include "i915_reg.h"
10 #include "gem/i915_gem_region.h"
11 #include "gt/intel_gsc.h"
12 #include "gt/intel_gt.h"
13 
14 #define GSC_BAR_LENGTH  0x00000FFC
15 
16 static void gsc_irq_mask(struct irq_data *d)
17 {
18 	/* generic irq handling */
19 }
20 
21 static void gsc_irq_unmask(struct irq_data *d)
22 {
23 	/* generic irq handling */
24 }
25 
26 static struct irq_chip gsc_irq_chip = {
27 	.name = "gsc_irq_chip",
28 	.irq_mask = gsc_irq_mask,
29 	.irq_unmask = gsc_irq_unmask,
30 };
31 
32 static int gsc_irq_init(int irq)
33 {
34 	irq_set_chip_and_handler_name(irq, &gsc_irq_chip,
35 				      handle_simple_irq, "gsc_irq_handler");
36 
37 	return irq_set_chip_data(irq, NULL);
38 }
39 
40 static int
41 gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size)
42 {
43 	struct intel_gt *gt = gsc_to_gt(gsc);
44 	struct drm_i915_gem_object *obj;
45 	int err;
46 
47 	obj = i915_gem_object_create_lmem(gt->i915, size,
48 					  I915_BO_ALLOC_CONTIGUOUS |
49 					  I915_BO_ALLOC_CPU_CLEAR);
50 	if (IS_ERR(obj)) {
51 		drm_err(&gt->i915->drm, "Failed to allocate gsc memory\n");
52 		return PTR_ERR(obj);
53 	}
54 
55 	err = i915_gem_object_pin_pages_unlocked(obj);
56 	if (err) {
57 		drm_err(&gt->i915->drm, "Failed to pin pages for gsc memory\n");
58 		goto out_put;
59 	}
60 
61 	intf->gem_obj = obj;
62 
63 	return 0;
64 
65 out_put:
66 	i915_gem_object_put(obj);
67 	return err;
68 }
69 
70 static void gsc_ext_om_destroy(struct intel_gsc_intf *intf)
71 {
72 	struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj);
73 
74 	if (!obj)
75 		return;
76 
77 	if (i915_gem_object_has_pinned_pages(obj))
78 		i915_gem_object_unpin_pages(obj);
79 
80 	i915_gem_object_put(obj);
81 }
82 
83 struct gsc_def {
84 	const char *name;
85 	unsigned long bar;
86 	size_t bar_size;
87 	bool use_polling;
88 	bool slow_firmware;
89 	size_t lmem_size;
90 };
91 
92 /* gsc resources and definitions (HECI1 and HECI2) */
93 static const struct gsc_def gsc_def_dg1[] = {
94 	{
95 		/* HECI1 not yet implemented. */
96 	},
97 	{
98 		.name = "mei-gscfi",
99 		.bar = DG1_GSC_HECI2_BASE,
100 		.bar_size = GSC_BAR_LENGTH,
101 	}
102 };
103 
104 static const struct gsc_def gsc_def_xehpsdv[] = {
105 	{
106 		/* HECI1 not enabled on the device. */
107 	},
108 	{
109 		.name = "mei-gscfi",
110 		.bar = DG1_GSC_HECI2_BASE,
111 		.bar_size = GSC_BAR_LENGTH,
112 		.use_polling = true,
113 		.slow_firmware = true,
114 	}
115 };
116 
117 static const struct gsc_def gsc_def_dg2[] = {
118 	{
119 		.name = "mei-gsc",
120 		.bar = DG2_GSC_HECI1_BASE,
121 		.bar_size = GSC_BAR_LENGTH,
122 		.lmem_size = SZ_4M,
123 	},
124 	{
125 		.name = "mei-gscfi",
126 		.bar = DG2_GSC_HECI2_BASE,
127 		.bar_size = GSC_BAR_LENGTH,
128 	}
129 };
130 
131 static void gsc_release_dev(struct device *dev)
132 {
133 	struct auxiliary_device *aux_dev = to_auxiliary_dev(dev);
134 	struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
135 
136 	kfree(adev);
137 }
138 
139 static void gsc_destroy_one(struct drm_i915_private *i915,
140 			    struct intel_gsc *gsc, unsigned int intf_id)
141 {
142 	struct intel_gsc_intf *intf = &gsc->intf[intf_id];
143 
144 	if (intf->adev) {
145 		auxiliary_device_delete(&intf->adev->aux_dev);
146 		auxiliary_device_uninit(&intf->adev->aux_dev);
147 		intf->adev = NULL;
148 	}
149 
150 	if (intf->irq >= 0)
151 		irq_free_desc(intf->irq);
152 	intf->irq = -1;
153 
154 	gsc_ext_om_destroy(intf);
155 }
156 
157 static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
158 			 unsigned int intf_id)
159 {
160 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
161 	struct mei_aux_device *adev;
162 	struct auxiliary_device *aux_dev;
163 	const struct gsc_def *def;
164 	struct intel_gsc_intf *intf = &gsc->intf[intf_id];
165 	int ret;
166 
167 	intf->irq = -1;
168 	intf->id = intf_id;
169 
170 	if (intf_id == 0 && !HAS_HECI_PXP(i915))
171 		return;
172 
173 	if (IS_DG1(i915)) {
174 		def = &gsc_def_dg1[intf_id];
175 	} else if (IS_XEHPSDV(i915)) {
176 		def = &gsc_def_xehpsdv[intf_id];
177 	} else if (IS_DG2(i915)) {
178 		def = &gsc_def_dg2[intf_id];
179 	} else {
180 		drm_warn_once(&i915->drm, "Unknown platform\n");
181 		return;
182 	}
183 
184 	if (!def->name) {
185 		drm_warn_once(&i915->drm, "HECI%d is not implemented!\n", intf_id + 1);
186 		return;
187 	}
188 
189 	/* skip irq initialization */
190 	if (def->use_polling)
191 		goto add_device;
192 
193 	intf->irq = irq_alloc_desc(0);
194 	if (intf->irq < 0) {
195 		drm_err(&i915->drm, "gsc irq error %d\n", intf->irq);
196 		goto fail;
197 	}
198 
199 	ret = gsc_irq_init(intf->irq);
200 	if (ret < 0) {
201 		drm_err(&i915->drm, "gsc irq init failed %d\n", ret);
202 		goto fail;
203 	}
204 
205 add_device:
206 	adev = kzalloc(sizeof(*adev), GFP_KERNEL);
207 	if (!adev)
208 		goto fail;
209 
210 	if (def->lmem_size) {
211 		drm_dbg(&i915->drm, "setting up GSC lmem\n");
212 
213 		if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) {
214 			drm_err(&i915->drm, "setting up gsc extended operational memory failed\n");
215 			kfree(adev);
216 			goto fail;
217 		}
218 
219 		adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0);
220 		adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size;
221 	}
222 
223 	adev->irq = intf->irq;
224 	adev->bar.parent = &pdev->resource[0];
225 	adev->bar.start = def->bar + pdev->resource[0].start;
226 	adev->bar.end = adev->bar.start + def->bar_size - 1;
227 	adev->bar.flags = IORESOURCE_MEM;
228 	adev->bar.desc = IORES_DESC_NONE;
229 	adev->slow_firmware = def->slow_firmware;
230 
231 	aux_dev = &adev->aux_dev;
232 	aux_dev->name = def->name;
233 	aux_dev->id = (pci_domain_nr(pdev->bus) << 16) |
234 		      PCI_DEVID(pdev->bus->number, pdev->devfn);
235 	aux_dev->dev.parent = &pdev->dev;
236 	aux_dev->dev.release = gsc_release_dev;
237 
238 	ret = auxiliary_device_init(aux_dev);
239 	if (ret < 0) {
240 		drm_err(&i915->drm, "gsc aux init failed %d\n", ret);
241 		kfree(adev);
242 		goto fail;
243 	}
244 
245 	ret = auxiliary_device_add(aux_dev);
246 	if (ret < 0) {
247 		drm_err(&i915->drm, "gsc aux add failed %d\n", ret);
248 		/* adev will be freed with the put_device() and .release sequence */
249 		auxiliary_device_uninit(aux_dev);
250 		goto fail;
251 	}
252 	intf->adev = adev;
253 
254 	return;
255 fail:
256 	gsc_destroy_one(i915, gsc, intf->id);
257 }
258 
259 static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
260 {
261 	int ret;
262 
263 	if (intf_id >= INTEL_GSC_NUM_INTERFACES) {
264 		drm_warn_once(&gt->i915->drm, "GSC irq: intf_id %d is out of range", intf_id);
265 		return;
266 	}
267 
268 	if (!HAS_HECI_GSC(gt->i915)) {
269 		drm_warn_once(&gt->i915->drm, "GSC irq: not supported");
270 		return;
271 	}
272 
273 	if (gt->gsc.intf[intf_id].irq < 0)
274 		return;
275 
276 	ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
277 	if (ret)
278 		drm_err_ratelimited(&gt->i915->drm, "error handling GSC irq: %d\n", ret);
279 }
280 
281 void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir)
282 {
283 	if (iir & GSC_IRQ_INTF(0))
284 		gsc_irq_handler(gt, 0);
285 	if (iir & GSC_IRQ_INTF(1))
286 		gsc_irq_handler(gt, 1);
287 }
288 
289 void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915)
290 {
291 	unsigned int i;
292 
293 	if (!HAS_HECI_GSC(i915))
294 		return;
295 
296 	for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
297 		gsc_init_one(i915, gsc, i);
298 }
299 
300 void intel_gsc_fini(struct intel_gsc *gsc)
301 {
302 	struct intel_gt *gt = gsc_to_gt(gsc);
303 	unsigned int i;
304 
305 	if (!HAS_HECI_GSC(gt->i915))
306 		return;
307 
308 	for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
309 		gsc_destroy_one(gt->i915, gsc, i);
310 }
311