1 /*
2  * Copyright 2021 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <core/intr.h>
23 #include <core/device.h>
24 #include <core/subdev.h>
25 #include <subdev/pci.h>
26 #include <subdev/top.h>
27 
28 #include <subdev/mc.h>
29 
30 static int
31 nvkm_intr_xlat(struct nvkm_subdev *subdev, struct nvkm_intr *intr,
32 	       enum nvkm_intr_type type, int *leaf, u32 *mask)
33 {
34 	struct nvkm_device *device = subdev->device;
35 
36 	if (type < NVKM_INTR_VECTOR_0) {
37 		if (type == NVKM_INTR_SUBDEV) {
38 			const struct nvkm_intr_data *data = intr->data;
39 			struct nvkm_top_device *tdev;
40 
41 			while (data && data->mask) {
42 				if (data->type == NVKM_SUBDEV_TOP) {
43 					list_for_each_entry(tdev, &device->top->device, head) {
44 						if (tdev->intr >= 0 &&
45 						    tdev->type == subdev->type &&
46 						    tdev->inst == subdev->inst) {
47 							if (data->mask & BIT(tdev->intr)) {
48 								*leaf = data->leaf;
49 								*mask = BIT(tdev->intr);
50 								return 0;
51 							}
52 						}
53 					}
54 				} else
55 				if (data->type == subdev->type && data->inst == subdev->inst) {
56 					*leaf = data->leaf;
57 					*mask = data->mask;
58 					return 0;
59 				}
60 
61 				data++;
62 			}
63 		} else {
64 			return -ENOSYS;
65 		}
66 	} else {
67 		if (type < intr->leaves * sizeof(*intr->stat) * 8) {
68 			*leaf = type / 32;
69 			*mask = BIT(type % 32);
70 			return 0;
71 		}
72 	}
73 
74 	return -EINVAL;
75 }
76 
77 static struct nvkm_intr *
78 nvkm_intr_find(struct nvkm_subdev *subdev, enum nvkm_intr_type type, int *leaf, u32 *mask)
79 {
80 	struct nvkm_intr *intr;
81 	int ret;
82 
83 	list_for_each_entry(intr, &subdev->device->intr.intr, head) {
84 		ret = nvkm_intr_xlat(subdev, intr, type, leaf, mask);
85 		if (ret == 0)
86 			return intr;
87 	}
88 
89 	return NULL;
90 }
91 
92 static void
93 nvkm_intr_allow_locked(struct nvkm_intr *intr, int leaf, u32 mask)
94 {
95 	intr->mask[leaf] |= mask;
96 	if (intr->func->allow) {
97 		if (intr->func->reset)
98 			intr->func->reset(intr, leaf, mask);
99 		intr->func->allow(intr, leaf, mask);
100 	}
101 }
102 
103 void
104 nvkm_intr_allow(struct nvkm_subdev *subdev, enum nvkm_intr_type type)
105 {
106 	struct nvkm_device *device = subdev->device;
107 	struct nvkm_intr *intr;
108 	unsigned long flags;
109 	int leaf;
110 	u32 mask;
111 
112 	intr = nvkm_intr_find(subdev, type, &leaf, &mask);
113 	if (intr) {
114 		nvkm_debug(intr->subdev, "intr %d/%08x allowed by %s\n", leaf, mask, subdev->name);
115 		spin_lock_irqsave(&device->intr.lock, flags);
116 		nvkm_intr_allow_locked(intr, leaf, mask);
117 		spin_unlock_irqrestore(&device->intr.lock, flags);
118 	}
119 }
120 
121 static void
122 nvkm_intr_block_locked(struct nvkm_intr *intr, int leaf, u32 mask)
123 {
124 	intr->mask[leaf] &= ~mask;
125 	if (intr->func->block)
126 		intr->func->block(intr, leaf, mask);
127 }
128 
129 void
130 nvkm_intr_block(struct nvkm_subdev *subdev, enum nvkm_intr_type type)
131 {
132 	struct nvkm_device *device = subdev->device;
133 	struct nvkm_intr *intr;
134 	unsigned long flags;
135 	int leaf;
136 	u32 mask;
137 
138 	intr = nvkm_intr_find(subdev, type, &leaf, &mask);
139 	if (intr) {
140 		nvkm_debug(intr->subdev, "intr %d/%08x blocked by %s\n", leaf, mask, subdev->name);
141 		spin_lock_irqsave(&device->intr.lock, flags);
142 		nvkm_intr_block_locked(intr, leaf, mask);
143 		spin_unlock_irqrestore(&device->intr.lock, flags);
144 	}
145 }
146 
147 static void
148 nvkm_intr_rearm_locked(struct nvkm_device *device)
149 {
150 	struct nvkm_intr *intr;
151 
152 	list_for_each_entry(intr, &device->intr.intr, head)
153 		intr->func->rearm(intr);
154 	nvkm_mc_intr_rearm(device);
155 }
156 
157 static void
158 nvkm_intr_unarm_locked(struct nvkm_device *device)
159 {
160 	struct nvkm_intr *intr;
161 
162 	list_for_each_entry(intr, &device->intr.intr, head)
163 		intr->func->unarm(intr);
164 	nvkm_mc_intr_unarm(device);
165 }
166 
167 static irqreturn_t
168 nvkm_intr(int irq, void *arg)
169 {
170 	struct nvkm_device *device = arg;
171 	struct nvkm_intr *intr;
172 	struct nvkm_inth *inth;
173 	irqreturn_t ret = IRQ_NONE;
174 	bool pending = false, handled;
175 	int prio, leaf;
176 
177 	/* Disable all top-level interrupt sources, and re-arm MSI interrupts. */
178 	spin_lock(&device->intr.lock);
179 	if (!device->intr.armed)
180 		goto done_unlock;
181 
182 	nvkm_intr_unarm_locked(device);
183 	nvkm_pci_msi_rearm(device);
184 
185 	/* Fetch pending interrupt masks. */
186 	list_for_each_entry(intr, &device->intr.intr, head) {
187 		if (intr->func->pending(intr))
188 			pending = true;
189 	}
190 
191 	nvkm_mc_intr(device, &handled);
192 	if (handled)
193 		ret = IRQ_HANDLED;
194 
195 	if (!pending)
196 		goto done;
197 
198 	/* Check that GPU is still on the bus by reading NV_PMC_BOOT_0. */
199 	if (WARN_ON(nvkm_rd32(device, 0x000000) == 0xffffffff))
200 		goto done;
201 
202 	/* Execute handlers. */
203 	for (prio = 0; prio < ARRAY_SIZE(device->intr.prio); prio++) {
204 		list_for_each_entry(inth, &device->intr.prio[prio], head) {
205 			struct nvkm_intr *intr = inth->intr;
206 
207 			if (intr->stat[inth->leaf] & inth->mask) {
208 				if (atomic_read(&inth->allowed)) {
209 					if (intr->func->reset)
210 						intr->func->reset(intr, inth->leaf, inth->mask);
211 					if (inth->func(inth) == IRQ_HANDLED)
212 						ret = IRQ_HANDLED;
213 				}
214 			}
215 		}
216 	}
217 
218 	/* Nothing handled?  Some debugging/protection from IRQ storms is in order... */
219 	if (ret == IRQ_NONE) {
220 		list_for_each_entry(intr, &device->intr.intr, head) {
221 			for (leaf = 0; leaf < intr->leaves; leaf++) {
222 				if (intr->stat[leaf]) {
223 					nvkm_warn(intr->subdev, "intr%d: %08x\n",
224 						  leaf, intr->stat[leaf]);
225 					nvkm_intr_block_locked(intr, leaf, intr->stat[leaf]);
226 				}
227 			}
228 		}
229 	}
230 
231 done:
232 	/* Re-enable all top-level interrupt sources. */
233 	nvkm_intr_rearm_locked(device);
234 done_unlock:
235 	spin_unlock(&device->intr.lock);
236 	return ret;
237 }
238 
239 int
240 nvkm_intr_add(const struct nvkm_intr_func *func, const struct nvkm_intr_data *data,
241 	      struct nvkm_subdev *subdev, int leaves, struct nvkm_intr *intr)
242 {
243 	struct nvkm_device *device = subdev->device;
244 	int i;
245 
246 	intr->func = func;
247 	intr->data = data;
248 	intr->subdev = subdev;
249 	intr->leaves = leaves;
250 	intr->stat = kcalloc(leaves, sizeof(*intr->stat), GFP_KERNEL);
251 	intr->mask = kcalloc(leaves, sizeof(*intr->mask), GFP_KERNEL);
252 	if (!intr->stat || !intr->mask) {
253 		kfree(intr->stat);
254 		return -ENOMEM;
255 	}
256 
257 	if (intr->subdev->debug >= NV_DBG_DEBUG) {
258 		for (i = 0; i < intr->leaves; i++)
259 			intr->mask[i] = ~0;
260 	}
261 
262 	spin_lock_irq(&device->intr.lock);
263 	list_add_tail(&intr->head, &device->intr.intr);
264 	spin_unlock_irq(&device->intr.lock);
265 	return 0;
266 }
267 
268 static irqreturn_t
269 nvkm_intr_subdev(struct nvkm_inth *inth)
270 {
271 	struct nvkm_subdev *subdev = container_of(inth, typeof(*subdev), inth);
272 
273 	nvkm_subdev_intr(subdev);
274 	return IRQ_HANDLED;
275 }
276 
277 static void
278 nvkm_intr_subdev_add_dev(struct nvkm_intr *intr, enum nvkm_subdev_type type, int inst)
279 {
280 	struct nvkm_subdev *subdev;
281 	enum nvkm_intr_prio prio;
282 	int ret;
283 
284 	subdev = nvkm_device_subdev(intr->subdev->device, type, inst);
285 	if (!subdev || !subdev->func->intr)
286 		return;
287 
288 	if (type == NVKM_ENGINE_DISP)
289 		prio = NVKM_INTR_PRIO_VBLANK;
290 	else
291 		prio = NVKM_INTR_PRIO_NORMAL;
292 
293 	ret = nvkm_inth_add(intr, NVKM_INTR_SUBDEV, prio, subdev, nvkm_intr_subdev, &subdev->inth);
294 	if (WARN_ON(ret))
295 		return;
296 
297 	nvkm_inth_allow(&subdev->inth);
298 }
299 
300 static void
301 nvkm_intr_subdev_add(struct nvkm_intr *intr)
302 {
303 	const struct nvkm_intr_data *data;
304 	struct nvkm_device *device = intr->subdev->device;
305 	struct nvkm_top_device *tdev;
306 
307 	for (data = intr->data; data && data->mask; data++) {
308 		if (data->legacy) {
309 			if (data->type == NVKM_SUBDEV_TOP) {
310 				list_for_each_entry(tdev, &device->top->device, head) {
311 					if (tdev->intr < 0 || !(data->mask & BIT(tdev->intr)))
312 						continue;
313 
314 					nvkm_intr_subdev_add_dev(intr, tdev->type, tdev->inst);
315 				}
316 			} else {
317 				nvkm_intr_subdev_add_dev(intr, data->type, data->inst);
318 			}
319 		}
320 	}
321 }
322 
323 void
324 nvkm_intr_rearm(struct nvkm_device *device)
325 {
326 	struct nvkm_intr *intr;
327 	int i;
328 
329 	if (unlikely(!device->intr.legacy_done)) {
330 		list_for_each_entry(intr, &device->intr.intr, head)
331 			nvkm_intr_subdev_add(intr);
332 		device->intr.legacy_done = true;
333 	}
334 
335 	spin_lock_irq(&device->intr.lock);
336 	list_for_each_entry(intr, &device->intr.intr, head) {
337 		for (i = 0; intr->func->block && i < intr->leaves; i++) {
338 			intr->func->block(intr, i, ~0);
339 			intr->func->allow(intr, i, intr->mask[i]);
340 		}
341 	}
342 
343 	nvkm_intr_rearm_locked(device);
344 	device->intr.armed = true;
345 	spin_unlock_irq(&device->intr.lock);
346 }
347 
348 void
349 nvkm_intr_unarm(struct nvkm_device *device)
350 {
351 	spin_lock_irq(&device->intr.lock);
352 	nvkm_intr_unarm_locked(device);
353 	device->intr.armed = false;
354 	spin_unlock_irq(&device->intr.lock);
355 }
356 
357 int
358 nvkm_intr_install(struct nvkm_device *device)
359 {
360 	int ret;
361 
362 	device->intr.irq = device->func->irq(device);
363 	if (device->intr.irq < 0)
364 		return device->intr.irq;
365 
366 	ret = request_irq(device->intr.irq, nvkm_intr, IRQF_SHARED, "nvkm", device);
367 	if (ret)
368 		return ret;
369 
370 	device->intr.alloc = true;
371 	return 0;
372 }
373 
374 void
375 nvkm_intr_dtor(struct nvkm_device *device)
376 {
377 	struct nvkm_intr *intr, *intt;
378 
379 	list_for_each_entry_safe(intr, intt, &device->intr.intr, head) {
380 		list_del(&intr->head);
381 		kfree(intr->mask);
382 		kfree(intr->stat);
383 	}
384 
385 	if (device->intr.alloc)
386 		free_irq(device->intr.irq, device);
387 }
388 
389 void
390 nvkm_intr_ctor(struct nvkm_device *device)
391 {
392 	int i;
393 
394 	INIT_LIST_HEAD(&device->intr.intr);
395 	for (i = 0; i < ARRAY_SIZE(device->intr.prio); i++)
396 		INIT_LIST_HEAD(&device->intr.prio[i]);
397 
398 	spin_lock_init(&device->intr.lock);
399 	device->intr.armed = false;
400 }
401 
402 void
403 nvkm_inth_block(struct nvkm_inth *inth)
404 {
405 	if (unlikely(!inth->intr))
406 		return;
407 
408 	atomic_set(&inth->allowed, 0);
409 }
410 
411 void
412 nvkm_inth_allow(struct nvkm_inth *inth)
413 {
414 	struct nvkm_intr *intr = inth->intr;
415 	unsigned long flags;
416 
417 	if (unlikely(!inth->intr))
418 		return;
419 
420 	spin_lock_irqsave(&intr->subdev->device->intr.lock, flags);
421 	if (!atomic_xchg(&inth->allowed, 1)) {
422 		if ((intr->mask[inth->leaf] & inth->mask) != inth->mask)
423 			nvkm_intr_allow_locked(intr, inth->leaf, inth->mask);
424 	}
425 	spin_unlock_irqrestore(&intr->subdev->device->intr.lock, flags);
426 }
427 
428 int
429 nvkm_inth_add(struct nvkm_intr *intr, enum nvkm_intr_type type, enum nvkm_intr_prio prio,
430 	      struct nvkm_subdev *subdev, nvkm_inth_func func, struct nvkm_inth *inth)
431 {
432 	struct nvkm_device *device = subdev->device;
433 	int ret;
434 
435 	if (WARN_ON(inth->mask))
436 		return -EBUSY;
437 
438 	ret = nvkm_intr_xlat(subdev, intr, type, &inth->leaf, &inth->mask);
439 	if (ret)
440 		return ret;
441 
442 	nvkm_debug(intr->subdev, "intr %d/%08x requested by %s\n",
443 		   inth->leaf, inth->mask, subdev->name);
444 
445 	inth->intr = intr;
446 	inth->func = func;
447 	atomic_set(&inth->allowed, 0);
448 	list_add_tail(&inth->head, &device->intr.prio[prio]);
449 	return 0;
450 }
451