xref: /openbmc/linux/drivers/base/regmap/regmap-irq.c (revision abfbd895)
1 /*
2  * regmap based irq_chip
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/device.h>
14 #include <linux/export.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/regmap.h>
20 #include <linux/slab.h>
21 
22 #include "internal.h"
23 
24 struct regmap_irq_chip_data {
25 	struct mutex lock;
26 	struct irq_chip irq_chip;
27 
28 	struct regmap *map;
29 	const struct regmap_irq_chip *chip;
30 
31 	int irq_base;
32 	struct irq_domain *domain;
33 
34 	int irq;
35 	int wake_count;
36 
37 	void *status_reg_buf;
38 	unsigned int *status_buf;
39 	unsigned int *mask_buf;
40 	unsigned int *mask_buf_def;
41 	unsigned int *wake_buf;
42 
43 	unsigned int irq_reg_stride;
44 };
45 
46 static inline const
47 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
48 				     int irq)
49 {
50 	return &data->chip->irqs[irq];
51 }
52 
53 static void regmap_irq_lock(struct irq_data *data)
54 {
55 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
56 
57 	mutex_lock(&d->lock);
58 }
59 
60 static void regmap_irq_sync_unlock(struct irq_data *data)
61 {
62 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
63 	struct regmap *map = d->map;
64 	int i, ret;
65 	u32 reg;
66 	u32 unmask_offset;
67 
68 	if (d->chip->runtime_pm) {
69 		ret = pm_runtime_get_sync(map->dev);
70 		if (ret < 0)
71 			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
72 				ret);
73 	}
74 
75 	/*
76 	 * If there's been a change in the mask write it back to the
77 	 * hardware.  We rely on the use of the regmap core cache to
78 	 * suppress pointless writes.
79 	 */
80 	for (i = 0; i < d->chip->num_regs; i++) {
81 		reg = d->chip->mask_base +
82 			(i * map->reg_stride * d->irq_reg_stride);
83 		if (d->chip->mask_invert) {
84 			ret = regmap_update_bits(d->map, reg,
85 					 d->mask_buf_def[i], ~d->mask_buf[i]);
86 		} else if (d->chip->unmask_base) {
87 			/* set mask with mask_base register */
88 			ret = regmap_update_bits(d->map, reg,
89 					d->mask_buf_def[i], ~d->mask_buf[i]);
90 			if (ret < 0)
91 				dev_err(d->map->dev,
92 					"Failed to sync unmasks in %x\n",
93 					reg);
94 			unmask_offset = d->chip->unmask_base -
95 							d->chip->mask_base;
96 			/* clear mask with unmask_base register */
97 			ret = regmap_update_bits(d->map,
98 					reg + unmask_offset,
99 					d->mask_buf_def[i],
100 					d->mask_buf[i]);
101 		} else {
102 			ret = regmap_update_bits(d->map, reg,
103 					 d->mask_buf_def[i], d->mask_buf[i]);
104 		}
105 		if (ret != 0)
106 			dev_err(d->map->dev, "Failed to sync masks in %x\n",
107 				reg);
108 
109 		reg = d->chip->wake_base +
110 			(i * map->reg_stride * d->irq_reg_stride);
111 		if (d->wake_buf) {
112 			if (d->chip->wake_invert)
113 				ret = regmap_update_bits(d->map, reg,
114 							 d->mask_buf_def[i],
115 							 ~d->wake_buf[i]);
116 			else
117 				ret = regmap_update_bits(d->map, reg,
118 							 d->mask_buf_def[i],
119 							 d->wake_buf[i]);
120 			if (ret != 0)
121 				dev_err(d->map->dev,
122 					"Failed to sync wakes in %x: %d\n",
123 					reg, ret);
124 		}
125 
126 		if (!d->chip->init_ack_masked)
127 			continue;
128 		/*
129 		 * Ack all the masked interrupts unconditionally,
130 		 * OR if there is masked interrupt which hasn't been Acked,
131 		 * it'll be ignored in irq handler, then may introduce irq storm
132 		 */
133 		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
134 			reg = d->chip->ack_base +
135 				(i * map->reg_stride * d->irq_reg_stride);
136 			/* some chips ack by write 0 */
137 			if (d->chip->ack_invert)
138 				ret = regmap_write(map, reg, ~d->mask_buf[i]);
139 			else
140 				ret = regmap_write(map, reg, d->mask_buf[i]);
141 			if (ret != 0)
142 				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
143 					reg, ret);
144 		}
145 	}
146 
147 	if (d->chip->runtime_pm)
148 		pm_runtime_put(map->dev);
149 
150 	/* If we've changed our wakeup count propagate it to the parent */
151 	if (d->wake_count < 0)
152 		for (i = d->wake_count; i < 0; i++)
153 			irq_set_irq_wake(d->irq, 0);
154 	else if (d->wake_count > 0)
155 		for (i = 0; i < d->wake_count; i++)
156 			irq_set_irq_wake(d->irq, 1);
157 
158 	d->wake_count = 0;
159 
160 	mutex_unlock(&d->lock);
161 }
162 
163 static void regmap_irq_enable(struct irq_data *data)
164 {
165 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
166 	struct regmap *map = d->map;
167 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
168 
169 	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
170 }
171 
172 static void regmap_irq_disable(struct irq_data *data)
173 {
174 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
175 	struct regmap *map = d->map;
176 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
177 
178 	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
179 }
180 
181 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
182 {
183 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
184 	struct regmap *map = d->map;
185 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
186 
187 	if (on) {
188 		if (d->wake_buf)
189 			d->wake_buf[irq_data->reg_offset / map->reg_stride]
190 				&= ~irq_data->mask;
191 		d->wake_count++;
192 	} else {
193 		if (d->wake_buf)
194 			d->wake_buf[irq_data->reg_offset / map->reg_stride]
195 				|= irq_data->mask;
196 		d->wake_count--;
197 	}
198 
199 	return 0;
200 }
201 
202 static const struct irq_chip regmap_irq_chip = {
203 	.irq_bus_lock		= regmap_irq_lock,
204 	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
205 	.irq_disable		= regmap_irq_disable,
206 	.irq_enable		= regmap_irq_enable,
207 	.irq_set_wake		= regmap_irq_set_wake,
208 };
209 
210 static irqreturn_t regmap_irq_thread(int irq, void *d)
211 {
212 	struct regmap_irq_chip_data *data = d;
213 	const struct regmap_irq_chip *chip = data->chip;
214 	struct regmap *map = data->map;
215 	int ret, i;
216 	bool handled = false;
217 	u32 reg;
218 
219 	if (chip->runtime_pm) {
220 		ret = pm_runtime_get_sync(map->dev);
221 		if (ret < 0) {
222 			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
223 				ret);
224 			pm_runtime_put(map->dev);
225 			return IRQ_NONE;
226 		}
227 	}
228 
229 	/*
230 	 * Read in the statuses, using a single bulk read if possible
231 	 * in order to reduce the I/O overheads.
232 	 */
233 	if (!map->use_single_read && map->reg_stride == 1 &&
234 	    data->irq_reg_stride == 1) {
235 		u8 *buf8 = data->status_reg_buf;
236 		u16 *buf16 = data->status_reg_buf;
237 		u32 *buf32 = data->status_reg_buf;
238 
239 		BUG_ON(!data->status_reg_buf);
240 
241 		ret = regmap_bulk_read(map, chip->status_base,
242 				       data->status_reg_buf,
243 				       chip->num_regs);
244 		if (ret != 0) {
245 			dev_err(map->dev, "Failed to read IRQ status: %d\n",
246 				ret);
247 			return IRQ_NONE;
248 		}
249 
250 		for (i = 0; i < data->chip->num_regs; i++) {
251 			switch (map->format.val_bytes) {
252 			case 1:
253 				data->status_buf[i] = buf8[i];
254 				break;
255 			case 2:
256 				data->status_buf[i] = buf16[i];
257 				break;
258 			case 4:
259 				data->status_buf[i] = buf32[i];
260 				break;
261 			default:
262 				BUG();
263 				return IRQ_NONE;
264 			}
265 		}
266 
267 	} else {
268 		for (i = 0; i < data->chip->num_regs; i++) {
269 			ret = regmap_read(map, chip->status_base +
270 					  (i * map->reg_stride
271 					   * data->irq_reg_stride),
272 					  &data->status_buf[i]);
273 
274 			if (ret != 0) {
275 				dev_err(map->dev,
276 					"Failed to read IRQ status: %d\n",
277 					ret);
278 				if (chip->runtime_pm)
279 					pm_runtime_put(map->dev);
280 				return IRQ_NONE;
281 			}
282 		}
283 	}
284 
285 	/*
286 	 * Ignore masked IRQs and ack if we need to; we ack early so
287 	 * there is no race between handling and acknowleding the
288 	 * interrupt.  We assume that typically few of the interrupts
289 	 * will fire simultaneously so don't worry about overhead from
290 	 * doing a write per register.
291 	 */
292 	for (i = 0; i < data->chip->num_regs; i++) {
293 		data->status_buf[i] &= ~data->mask_buf[i];
294 
295 		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
296 			reg = chip->ack_base +
297 				(i * map->reg_stride * data->irq_reg_stride);
298 			ret = regmap_write(map, reg, data->status_buf[i]);
299 			if (ret != 0)
300 				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
301 					reg, ret);
302 		}
303 	}
304 
305 	for (i = 0; i < chip->num_irqs; i++) {
306 		if (data->status_buf[chip->irqs[i].reg_offset /
307 				     map->reg_stride] & chip->irqs[i].mask) {
308 			handle_nested_irq(irq_find_mapping(data->domain, i));
309 			handled = true;
310 		}
311 	}
312 
313 	if (chip->runtime_pm)
314 		pm_runtime_put(map->dev);
315 
316 	if (handled)
317 		return IRQ_HANDLED;
318 	else
319 		return IRQ_NONE;
320 }
321 
322 static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
323 			  irq_hw_number_t hw)
324 {
325 	struct regmap_irq_chip_data *data = h->host_data;
326 
327 	irq_set_chip_data(virq, data);
328 	irq_set_chip(virq, &data->irq_chip);
329 	irq_set_nested_thread(virq, 1);
330 	irq_set_noprobe(virq);
331 
332 	return 0;
333 }
334 
335 static const struct irq_domain_ops regmap_domain_ops = {
336 	.map	= regmap_irq_map,
337 	.xlate	= irq_domain_xlate_twocell,
338 };
339 
340 /**
341  * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
342  *
343  * map:       The regmap for the device.
344  * irq:       The IRQ the device uses to signal interrupts
345  * irq_flags: The IRQF_ flags to use for the primary interrupt.
346  * chip:      Configuration for the interrupt controller.
347  * data:      Runtime data structure for the controller, allocated on success
348  *
349  * Returns 0 on success or an errno on failure.
350  *
351  * In order for this to be efficient the chip really should use a
352  * register cache.  The chip driver is responsible for restoring the
353  * register values used by the IRQ controller over suspend and resume.
354  */
355 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
356 			int irq_base, const struct regmap_irq_chip *chip,
357 			struct regmap_irq_chip_data **data)
358 {
359 	struct regmap_irq_chip_data *d;
360 	int i;
361 	int ret = -ENOMEM;
362 	u32 reg;
363 	u32 unmask_offset;
364 
365 	if (chip->num_regs <= 0)
366 		return -EINVAL;
367 
368 	for (i = 0; i < chip->num_irqs; i++) {
369 		if (chip->irqs[i].reg_offset % map->reg_stride)
370 			return -EINVAL;
371 		if (chip->irqs[i].reg_offset / map->reg_stride >=
372 		    chip->num_regs)
373 			return -EINVAL;
374 	}
375 
376 	if (irq_base) {
377 		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
378 		if (irq_base < 0) {
379 			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
380 				 irq_base);
381 			return irq_base;
382 		}
383 	}
384 
385 	d = kzalloc(sizeof(*d), GFP_KERNEL);
386 	if (!d)
387 		return -ENOMEM;
388 
389 	d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
390 				GFP_KERNEL);
391 	if (!d->status_buf)
392 		goto err_alloc;
393 
394 	d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
395 			      GFP_KERNEL);
396 	if (!d->mask_buf)
397 		goto err_alloc;
398 
399 	d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
400 				  GFP_KERNEL);
401 	if (!d->mask_buf_def)
402 		goto err_alloc;
403 
404 	if (chip->wake_base) {
405 		d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
406 				      GFP_KERNEL);
407 		if (!d->wake_buf)
408 			goto err_alloc;
409 	}
410 
411 	d->irq_chip = regmap_irq_chip;
412 	d->irq_chip.name = chip->name;
413 	d->irq = irq;
414 	d->map = map;
415 	d->chip = chip;
416 	d->irq_base = irq_base;
417 
418 	if (chip->irq_reg_stride)
419 		d->irq_reg_stride = chip->irq_reg_stride;
420 	else
421 		d->irq_reg_stride = 1;
422 
423 	if (!map->use_single_read && map->reg_stride == 1 &&
424 	    d->irq_reg_stride == 1) {
425 		d->status_reg_buf = kmalloc(map->format.val_bytes *
426 					    chip->num_regs, GFP_KERNEL);
427 		if (!d->status_reg_buf)
428 			goto err_alloc;
429 	}
430 
431 	mutex_init(&d->lock);
432 
433 	for (i = 0; i < chip->num_irqs; i++)
434 		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
435 			|= chip->irqs[i].mask;
436 
437 	/* Mask all the interrupts by default */
438 	for (i = 0; i < chip->num_regs; i++) {
439 		d->mask_buf[i] = d->mask_buf_def[i];
440 		reg = chip->mask_base +
441 			(i * map->reg_stride * d->irq_reg_stride);
442 		if (chip->mask_invert)
443 			ret = regmap_update_bits(map, reg,
444 					 d->mask_buf[i], ~d->mask_buf[i]);
445 		else if (d->chip->unmask_base) {
446 			unmask_offset = d->chip->unmask_base -
447 					d->chip->mask_base;
448 			ret = regmap_update_bits(d->map,
449 					reg + unmask_offset,
450 					d->mask_buf[i],
451 					d->mask_buf[i]);
452 		} else
453 			ret = regmap_update_bits(map, reg,
454 					 d->mask_buf[i], d->mask_buf[i]);
455 		if (ret != 0) {
456 			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
457 				reg, ret);
458 			goto err_alloc;
459 		}
460 
461 		if (!chip->init_ack_masked)
462 			continue;
463 
464 		/* Ack masked but set interrupts */
465 		reg = chip->status_base +
466 			(i * map->reg_stride * d->irq_reg_stride);
467 		ret = regmap_read(map, reg, &d->status_buf[i]);
468 		if (ret != 0) {
469 			dev_err(map->dev, "Failed to read IRQ status: %d\n",
470 				ret);
471 			goto err_alloc;
472 		}
473 
474 		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
475 			reg = chip->ack_base +
476 				(i * map->reg_stride * d->irq_reg_stride);
477 			if (chip->ack_invert)
478 				ret = regmap_write(map, reg,
479 					~(d->status_buf[i] & d->mask_buf[i]));
480 			else
481 				ret = regmap_write(map, reg,
482 					d->status_buf[i] & d->mask_buf[i]);
483 			if (ret != 0) {
484 				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
485 					reg, ret);
486 				goto err_alloc;
487 			}
488 		}
489 	}
490 
491 	/* Wake is disabled by default */
492 	if (d->wake_buf) {
493 		for (i = 0; i < chip->num_regs; i++) {
494 			d->wake_buf[i] = d->mask_buf_def[i];
495 			reg = chip->wake_base +
496 				(i * map->reg_stride * d->irq_reg_stride);
497 
498 			if (chip->wake_invert)
499 				ret = regmap_update_bits(map, reg,
500 							 d->mask_buf_def[i],
501 							 0);
502 			else
503 				ret = regmap_update_bits(map, reg,
504 							 d->mask_buf_def[i],
505 							 d->wake_buf[i]);
506 			if (ret != 0) {
507 				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
508 					reg, ret);
509 				goto err_alloc;
510 			}
511 		}
512 	}
513 
514 	if (irq_base)
515 		d->domain = irq_domain_add_legacy(map->dev->of_node,
516 						  chip->num_irqs, irq_base, 0,
517 						  &regmap_domain_ops, d);
518 	else
519 		d->domain = irq_domain_add_linear(map->dev->of_node,
520 						  chip->num_irqs,
521 						  &regmap_domain_ops, d);
522 	if (!d->domain) {
523 		dev_err(map->dev, "Failed to create IRQ domain\n");
524 		ret = -ENOMEM;
525 		goto err_alloc;
526 	}
527 
528 	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
529 				   irq_flags | IRQF_ONESHOT,
530 				   chip->name, d);
531 	if (ret != 0) {
532 		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
533 			irq, chip->name, ret);
534 		goto err_domain;
535 	}
536 
537 	*data = d;
538 
539 	return 0;
540 
541 err_domain:
542 	/* Should really dispose of the domain but... */
543 err_alloc:
544 	kfree(d->wake_buf);
545 	kfree(d->mask_buf_def);
546 	kfree(d->mask_buf);
547 	kfree(d->status_buf);
548 	kfree(d->status_reg_buf);
549 	kfree(d);
550 	return ret;
551 }
552 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
553 
554 /**
555  * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
556  *
557  * @irq: Primary IRQ for the device
558  * @d:   regmap_irq_chip_data allocated by regmap_add_irq_chip()
559  */
560 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
561 {
562 	if (!d)
563 		return;
564 
565 	free_irq(irq, d);
566 	irq_domain_remove(d->domain);
567 	kfree(d->wake_buf);
568 	kfree(d->mask_buf_def);
569 	kfree(d->mask_buf);
570 	kfree(d->status_reg_buf);
571 	kfree(d->status_buf);
572 	kfree(d);
573 }
574 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
575 
576 /**
577  * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
578  *
579  * Useful for drivers to request their own IRQs.
580  *
581  * @data: regmap_irq controller to operate on.
582  */
583 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
584 {
585 	WARN_ON(!data->irq_base);
586 	return data->irq_base;
587 }
588 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
589 
590 /**
591  * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
592  *
593  * Useful for drivers to request their own IRQs.
594  *
595  * @data: regmap_irq controller to operate on.
596  * @irq: index of the interrupt requested in the chip IRQs
597  */
598 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
599 {
600 	/* Handle holes in the IRQ list */
601 	if (!data->chip->irqs[irq].mask)
602 		return -EINVAL;
603 
604 	return irq_create_mapping(data->domain, irq);
605 }
606 EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
607 
608 /**
609  * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
610  *
611  * Useful for drivers to request their own IRQs and for integration
612  * with subsystems.  For ease of integration NULL is accepted as a
613  * domain, allowing devices to just call this even if no domain is
614  * allocated.
615  *
616  * @data: regmap_irq controller to operate on.
617  */
618 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
619 {
620 	if (data)
621 		return data->domain;
622 	else
623 		return NULL;
624 }
625 EXPORT_SYMBOL_GPL(regmap_irq_get_domain);
626