xref: /openbmc/linux/drivers/base/regmap/regmap-irq.c (revision 176f011b)
1 /*
2  * regmap based irq_chip
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/device.h>
14 #include <linux/export.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/regmap.h>
20 #include <linux/slab.h>
21 
22 #include "internal.h"
23 
24 struct regmap_irq_chip_data {
25 	struct mutex lock;
26 	struct irq_chip irq_chip;
27 
28 	struct regmap *map;
29 	const struct regmap_irq_chip *chip;
30 
31 	int irq_base;
32 	struct irq_domain *domain;
33 
34 	int irq;
35 	int wake_count;
36 
37 	void *status_reg_buf;
38 	unsigned int *status_buf;
39 	unsigned int *mask_buf;
40 	unsigned int *mask_buf_def;
41 	unsigned int *wake_buf;
42 	unsigned int *type_buf;
43 	unsigned int *type_buf_def;
44 
45 	unsigned int irq_reg_stride;
46 	unsigned int type_reg_stride;
47 
48 	bool clear_status:1;
49 };
50 
51 static inline const
52 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
53 				     int irq)
54 {
55 	return &data->chip->irqs[irq];
56 }
57 
58 static void regmap_irq_lock(struct irq_data *data)
59 {
60 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
61 
62 	mutex_lock(&d->lock);
63 }
64 
65 static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
66 				  unsigned int reg, unsigned int mask,
67 				  unsigned int val)
68 {
69 	if (d->chip->mask_writeonly)
70 		return regmap_write_bits(d->map, reg, mask, val);
71 	else
72 		return regmap_update_bits(d->map, reg, mask, val);
73 }
74 
75 static void regmap_irq_sync_unlock(struct irq_data *data)
76 {
77 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
78 	struct regmap *map = d->map;
79 	int i, ret;
80 	u32 reg;
81 	u32 unmask_offset;
82 	u32 val;
83 
84 	if (d->chip->runtime_pm) {
85 		ret = pm_runtime_get_sync(map->dev);
86 		if (ret < 0)
87 			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
88 				ret);
89 	}
90 
91 	if (d->clear_status) {
92 		for (i = 0; i < d->chip->num_regs; i++) {
93 			reg = d->chip->status_base +
94 				(i * map->reg_stride * d->irq_reg_stride);
95 
96 			ret = regmap_read(map, reg, &val);
97 			if (ret)
98 				dev_err(d->map->dev,
99 					"Failed to clear the interrupt status bits\n");
100 		}
101 
102 		d->clear_status = false;
103 	}
104 
105 	/*
106 	 * If there's been a change in the mask write it back to the
107 	 * hardware.  We rely on the use of the regmap core cache to
108 	 * suppress pointless writes.
109 	 */
110 	for (i = 0; i < d->chip->num_regs; i++) {
111 		if (!d->chip->mask_base)
112 			continue;
113 
114 		reg = d->chip->mask_base +
115 			(i * map->reg_stride * d->irq_reg_stride);
116 		if (d->chip->mask_invert) {
117 			ret = regmap_irq_update_bits(d, reg,
118 					 d->mask_buf_def[i], ~d->mask_buf[i]);
119 		} else if (d->chip->unmask_base) {
120 			/* set mask with mask_base register */
121 			ret = regmap_irq_update_bits(d, reg,
122 					d->mask_buf_def[i], ~d->mask_buf[i]);
123 			if (ret < 0)
124 				dev_err(d->map->dev,
125 					"Failed to sync unmasks in %x\n",
126 					reg);
127 			unmask_offset = d->chip->unmask_base -
128 							d->chip->mask_base;
129 			/* clear mask with unmask_base register */
130 			ret = regmap_irq_update_bits(d,
131 					reg + unmask_offset,
132 					d->mask_buf_def[i],
133 					d->mask_buf[i]);
134 		} else {
135 			ret = regmap_irq_update_bits(d, reg,
136 					 d->mask_buf_def[i], d->mask_buf[i]);
137 		}
138 		if (ret != 0)
139 			dev_err(d->map->dev, "Failed to sync masks in %x\n",
140 				reg);
141 
142 		reg = d->chip->wake_base +
143 			(i * map->reg_stride * d->irq_reg_stride);
144 		if (d->wake_buf) {
145 			if (d->chip->wake_invert)
146 				ret = regmap_irq_update_bits(d, reg,
147 							 d->mask_buf_def[i],
148 							 ~d->wake_buf[i]);
149 			else
150 				ret = regmap_irq_update_bits(d, reg,
151 							 d->mask_buf_def[i],
152 							 d->wake_buf[i]);
153 			if (ret != 0)
154 				dev_err(d->map->dev,
155 					"Failed to sync wakes in %x: %d\n",
156 					reg, ret);
157 		}
158 
159 		if (!d->chip->init_ack_masked)
160 			continue;
161 		/*
162 		 * Ack all the masked interrupts unconditionally,
163 		 * OR if there is masked interrupt which hasn't been Acked,
164 		 * it'll be ignored in irq handler, then may introduce irq storm
165 		 */
166 		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
167 			reg = d->chip->ack_base +
168 				(i * map->reg_stride * d->irq_reg_stride);
169 			/* some chips ack by write 0 */
170 			if (d->chip->ack_invert)
171 				ret = regmap_write(map, reg, ~d->mask_buf[i]);
172 			else
173 				ret = regmap_write(map, reg, d->mask_buf[i]);
174 			if (ret != 0)
175 				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
176 					reg, ret);
177 		}
178 	}
179 
180 	/* Don't update the type bits if we're using mask bits for irq type. */
181 	if (!d->chip->type_in_mask) {
182 		for (i = 0; i < d->chip->num_type_reg; i++) {
183 			if (!d->type_buf_def[i])
184 				continue;
185 			reg = d->chip->type_base +
186 				(i * map->reg_stride * d->type_reg_stride);
187 			if (d->chip->type_invert)
188 				ret = regmap_irq_update_bits(d, reg,
189 					d->type_buf_def[i], ~d->type_buf[i]);
190 			else
191 				ret = regmap_irq_update_bits(d, reg,
192 					d->type_buf_def[i], d->type_buf[i]);
193 			if (ret != 0)
194 				dev_err(d->map->dev, "Failed to sync type in %x\n",
195 					reg);
196 		}
197 	}
198 
199 	if (d->chip->runtime_pm)
200 		pm_runtime_put(map->dev);
201 
202 	/* If we've changed our wakeup count propagate it to the parent */
203 	if (d->wake_count < 0)
204 		for (i = d->wake_count; i < 0; i++)
205 			irq_set_irq_wake(d->irq, 0);
206 	else if (d->wake_count > 0)
207 		for (i = 0; i < d->wake_count; i++)
208 			irq_set_irq_wake(d->irq, 1);
209 
210 	d->wake_count = 0;
211 
212 	mutex_unlock(&d->lock);
213 }
214 
215 static void regmap_irq_enable(struct irq_data *data)
216 {
217 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
218 	struct regmap *map = d->map;
219 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
220 	unsigned int mask, type;
221 
222 	type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
223 
224 	/*
225 	 * The type_in_mask flag means that the underlying hardware uses
226 	 * separate mask bits for rising and falling edge interrupts, but
227 	 * we want to make them into a single virtual interrupt with
228 	 * configurable edge.
229 	 *
230 	 * If the interrupt we're enabling defines the falling or rising
231 	 * masks then instead of using the regular mask bits for this
232 	 * interrupt, use the value previously written to the type buffer
233 	 * at the corresponding offset in regmap_irq_set_type().
234 	 */
235 	if (d->chip->type_in_mask && type)
236 		mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
237 	else
238 		mask = irq_data->mask;
239 
240 	if (d->chip->clear_on_unmask)
241 		d->clear_status = true;
242 
243 	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
244 }
245 
246 static void regmap_irq_disable(struct irq_data *data)
247 {
248 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
249 	struct regmap *map = d->map;
250 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
251 
252 	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
253 }
254 
255 static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
256 {
257 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
258 	struct regmap *map = d->map;
259 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
260 	int reg;
261 	const struct regmap_irq_type *t = &irq_data->type;
262 
263 	if ((t->types_supported & type) != type)
264 		return 0;
265 
266 	reg = t->type_reg_offset / map->reg_stride;
267 
268 	if (t->type_reg_mask)
269 		d->type_buf[reg] &= ~t->type_reg_mask;
270 	else
271 		d->type_buf[reg] &= ~(t->type_falling_val |
272 				      t->type_rising_val |
273 				      t->type_level_low_val |
274 				      t->type_level_high_val);
275 	switch (type) {
276 	case IRQ_TYPE_EDGE_FALLING:
277 		d->type_buf[reg] |= t->type_falling_val;
278 		break;
279 
280 	case IRQ_TYPE_EDGE_RISING:
281 		d->type_buf[reg] |= t->type_rising_val;
282 		break;
283 
284 	case IRQ_TYPE_EDGE_BOTH:
285 		d->type_buf[reg] |= (t->type_falling_val |
286 					t->type_rising_val);
287 		break;
288 
289 	case IRQ_TYPE_LEVEL_HIGH:
290 		d->type_buf[reg] |= t->type_level_high_val;
291 		break;
292 
293 	case IRQ_TYPE_LEVEL_LOW:
294 		d->type_buf[reg] |= t->type_level_low_val;
295 		break;
296 	default:
297 		return -EINVAL;
298 	}
299 	return 0;
300 }
301 
302 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
303 {
304 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
305 	struct regmap *map = d->map;
306 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
307 
308 	if (on) {
309 		if (d->wake_buf)
310 			d->wake_buf[irq_data->reg_offset / map->reg_stride]
311 				&= ~irq_data->mask;
312 		d->wake_count++;
313 	} else {
314 		if (d->wake_buf)
315 			d->wake_buf[irq_data->reg_offset / map->reg_stride]
316 				|= irq_data->mask;
317 		d->wake_count--;
318 	}
319 
320 	return 0;
321 }
322 
323 static const struct irq_chip regmap_irq_chip = {
324 	.irq_bus_lock		= regmap_irq_lock,
325 	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
326 	.irq_disable		= regmap_irq_disable,
327 	.irq_enable		= regmap_irq_enable,
328 	.irq_set_type		= regmap_irq_set_type,
329 	.irq_set_wake		= regmap_irq_set_wake,
330 };
331 
332 static irqreturn_t regmap_irq_thread(int irq, void *d)
333 {
334 	struct regmap_irq_chip_data *data = d;
335 	const struct regmap_irq_chip *chip = data->chip;
336 	struct regmap *map = data->map;
337 	int ret, i;
338 	bool handled = false;
339 	u32 reg;
340 
341 	if (chip->handle_pre_irq)
342 		chip->handle_pre_irq(chip->irq_drv_data);
343 
344 	if (chip->runtime_pm) {
345 		ret = pm_runtime_get_sync(map->dev);
346 		if (ret < 0) {
347 			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
348 				ret);
349 			pm_runtime_put(map->dev);
350 			goto exit;
351 		}
352 	}
353 
354 	/*
355 	 * Read in the statuses, using a single bulk read if possible
356 	 * in order to reduce the I/O overheads.
357 	 */
358 	if (!map->use_single_read && map->reg_stride == 1 &&
359 	    data->irq_reg_stride == 1) {
360 		u8 *buf8 = data->status_reg_buf;
361 		u16 *buf16 = data->status_reg_buf;
362 		u32 *buf32 = data->status_reg_buf;
363 
364 		BUG_ON(!data->status_reg_buf);
365 
366 		ret = regmap_bulk_read(map, chip->status_base,
367 				       data->status_reg_buf,
368 				       chip->num_regs);
369 		if (ret != 0) {
370 			dev_err(map->dev, "Failed to read IRQ status: %d\n",
371 				ret);
372 			goto exit;
373 		}
374 
375 		for (i = 0; i < data->chip->num_regs; i++) {
376 			switch (map->format.val_bytes) {
377 			case 1:
378 				data->status_buf[i] = buf8[i];
379 				break;
380 			case 2:
381 				data->status_buf[i] = buf16[i];
382 				break;
383 			case 4:
384 				data->status_buf[i] = buf32[i];
385 				break;
386 			default:
387 				BUG();
388 				goto exit;
389 			}
390 		}
391 
392 	} else {
393 		for (i = 0; i < data->chip->num_regs; i++) {
394 			ret = regmap_read(map, chip->status_base +
395 					  (i * map->reg_stride
396 					   * data->irq_reg_stride),
397 					  &data->status_buf[i]);
398 
399 			if (ret != 0) {
400 				dev_err(map->dev,
401 					"Failed to read IRQ status: %d\n",
402 					ret);
403 				if (chip->runtime_pm)
404 					pm_runtime_put(map->dev);
405 				goto exit;
406 			}
407 		}
408 	}
409 
410 	/*
411 	 * Ignore masked IRQs and ack if we need to; we ack early so
412 	 * there is no race between handling and acknowleding the
413 	 * interrupt.  We assume that typically few of the interrupts
414 	 * will fire simultaneously so don't worry about overhead from
415 	 * doing a write per register.
416 	 */
417 	for (i = 0; i < data->chip->num_regs; i++) {
418 		data->status_buf[i] &= ~data->mask_buf[i];
419 
420 		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
421 			reg = chip->ack_base +
422 				(i * map->reg_stride * data->irq_reg_stride);
423 			ret = regmap_write(map, reg, data->status_buf[i]);
424 			if (ret != 0)
425 				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
426 					reg, ret);
427 		}
428 	}
429 
430 	for (i = 0; i < chip->num_irqs; i++) {
431 		if (data->status_buf[chip->irqs[i].reg_offset /
432 				     map->reg_stride] & chip->irqs[i].mask) {
433 			handle_nested_irq(irq_find_mapping(data->domain, i));
434 			handled = true;
435 		}
436 	}
437 
438 	if (chip->runtime_pm)
439 		pm_runtime_put(map->dev);
440 
441 exit:
442 	if (chip->handle_post_irq)
443 		chip->handle_post_irq(chip->irq_drv_data);
444 
445 	if (handled)
446 		return IRQ_HANDLED;
447 	else
448 		return IRQ_NONE;
449 }
450 
451 static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
452 			  irq_hw_number_t hw)
453 {
454 	struct regmap_irq_chip_data *data = h->host_data;
455 
456 	irq_set_chip_data(virq, data);
457 	irq_set_chip(virq, &data->irq_chip);
458 	irq_set_nested_thread(virq, 1);
459 	irq_set_parent(virq, data->irq);
460 	irq_set_noprobe(virq);
461 
462 	return 0;
463 }
464 
465 static const struct irq_domain_ops regmap_domain_ops = {
466 	.map	= regmap_irq_map,
467 	.xlate	= irq_domain_xlate_onetwocell,
468 };
469 
470 /**
471  * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
472  *
473  * @map: The regmap for the device.
474  * @irq: The IRQ the device uses to signal interrupts.
475  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
476  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
477  * @chip: Configuration for the interrupt controller.
478  * @data: Runtime data structure for the controller, allocated on success.
479  *
480  * Returns 0 on success or an errno on failure.
481  *
482  * In order for this to be efficient the chip really should use a
483  * register cache.  The chip driver is responsible for restoring the
484  * register values used by the IRQ controller over suspend and resume.
485  */
486 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
487 			int irq_base, const struct regmap_irq_chip *chip,
488 			struct regmap_irq_chip_data **data)
489 {
490 	struct regmap_irq_chip_data *d;
491 	int i;
492 	int ret = -ENOMEM;
493 	int num_type_reg;
494 	u32 reg;
495 	u32 unmask_offset;
496 
497 	if (chip->num_regs <= 0)
498 		return -EINVAL;
499 
500 	if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
501 		return -EINVAL;
502 
503 	for (i = 0; i < chip->num_irqs; i++) {
504 		if (chip->irqs[i].reg_offset % map->reg_stride)
505 			return -EINVAL;
506 		if (chip->irqs[i].reg_offset / map->reg_stride >=
507 		    chip->num_regs)
508 			return -EINVAL;
509 	}
510 
511 	if (irq_base) {
512 		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
513 		if (irq_base < 0) {
514 			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
515 				 irq_base);
516 			return irq_base;
517 		}
518 	}
519 
520 	d = kzalloc(sizeof(*d), GFP_KERNEL);
521 	if (!d)
522 		return -ENOMEM;
523 
524 	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
525 				GFP_KERNEL);
526 	if (!d->status_buf)
527 		goto err_alloc;
528 
529 	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
530 			      GFP_KERNEL);
531 	if (!d->mask_buf)
532 		goto err_alloc;
533 
534 	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
535 				  GFP_KERNEL);
536 	if (!d->mask_buf_def)
537 		goto err_alloc;
538 
539 	if (chip->wake_base) {
540 		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
541 				      GFP_KERNEL);
542 		if (!d->wake_buf)
543 			goto err_alloc;
544 	}
545 
546 	num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
547 	if (num_type_reg) {
548 		d->type_buf_def = kcalloc(num_type_reg,
549 					  sizeof(unsigned int), GFP_KERNEL);
550 		if (!d->type_buf_def)
551 			goto err_alloc;
552 
553 		d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
554 				      GFP_KERNEL);
555 		if (!d->type_buf)
556 			goto err_alloc;
557 	}
558 
559 	d->irq_chip = regmap_irq_chip;
560 	d->irq_chip.name = chip->name;
561 	d->irq = irq;
562 	d->map = map;
563 	d->chip = chip;
564 	d->irq_base = irq_base;
565 
566 	if (chip->irq_reg_stride)
567 		d->irq_reg_stride = chip->irq_reg_stride;
568 	else
569 		d->irq_reg_stride = 1;
570 
571 	if (chip->type_reg_stride)
572 		d->type_reg_stride = chip->type_reg_stride;
573 	else
574 		d->type_reg_stride = 1;
575 
576 	if (!map->use_single_read && map->reg_stride == 1 &&
577 	    d->irq_reg_stride == 1) {
578 		d->status_reg_buf = kmalloc_array(chip->num_regs,
579 						  map->format.val_bytes,
580 						  GFP_KERNEL);
581 		if (!d->status_reg_buf)
582 			goto err_alloc;
583 	}
584 
585 	mutex_init(&d->lock);
586 
587 	for (i = 0; i < chip->num_irqs; i++)
588 		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
589 			|= chip->irqs[i].mask;
590 
591 	/* Mask all the interrupts by default */
592 	for (i = 0; i < chip->num_regs; i++) {
593 		d->mask_buf[i] = d->mask_buf_def[i];
594 		if (!chip->mask_base)
595 			continue;
596 
597 		reg = chip->mask_base +
598 			(i * map->reg_stride * d->irq_reg_stride);
599 		if (chip->mask_invert)
600 			ret = regmap_irq_update_bits(d, reg,
601 					 d->mask_buf[i], ~d->mask_buf[i]);
602 		else if (d->chip->unmask_base) {
603 			unmask_offset = d->chip->unmask_base -
604 					d->chip->mask_base;
605 			ret = regmap_irq_update_bits(d,
606 					reg + unmask_offset,
607 					d->mask_buf[i],
608 					d->mask_buf[i]);
609 		} else
610 			ret = regmap_irq_update_bits(d, reg,
611 					 d->mask_buf[i], d->mask_buf[i]);
612 		if (ret != 0) {
613 			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
614 				reg, ret);
615 			goto err_alloc;
616 		}
617 
618 		if (!chip->init_ack_masked)
619 			continue;
620 
621 		/* Ack masked but set interrupts */
622 		reg = chip->status_base +
623 			(i * map->reg_stride * d->irq_reg_stride);
624 		ret = regmap_read(map, reg, &d->status_buf[i]);
625 		if (ret != 0) {
626 			dev_err(map->dev, "Failed to read IRQ status: %d\n",
627 				ret);
628 			goto err_alloc;
629 		}
630 
631 		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
632 			reg = chip->ack_base +
633 				(i * map->reg_stride * d->irq_reg_stride);
634 			if (chip->ack_invert)
635 				ret = regmap_write(map, reg,
636 					~(d->status_buf[i] & d->mask_buf[i]));
637 			else
638 				ret = regmap_write(map, reg,
639 					d->status_buf[i] & d->mask_buf[i]);
640 			if (ret != 0) {
641 				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
642 					reg, ret);
643 				goto err_alloc;
644 			}
645 		}
646 	}
647 
648 	/* Wake is disabled by default */
649 	if (d->wake_buf) {
650 		for (i = 0; i < chip->num_regs; i++) {
651 			d->wake_buf[i] = d->mask_buf_def[i];
652 			reg = chip->wake_base +
653 				(i * map->reg_stride * d->irq_reg_stride);
654 
655 			if (chip->wake_invert)
656 				ret = regmap_irq_update_bits(d, reg,
657 							 d->mask_buf_def[i],
658 							 0);
659 			else
660 				ret = regmap_irq_update_bits(d, reg,
661 							 d->mask_buf_def[i],
662 							 d->wake_buf[i]);
663 			if (ret != 0) {
664 				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
665 					reg, ret);
666 				goto err_alloc;
667 			}
668 		}
669 	}
670 
671 	if (chip->num_type_reg && !chip->type_in_mask) {
672 		for (i = 0; i < chip->num_type_reg; ++i) {
673 			if (!d->type_buf_def[i])
674 				continue;
675 
676 			reg = chip->type_base +
677 				(i * map->reg_stride * d->type_reg_stride);
678 
679 			ret = regmap_read(map, reg, &d->type_buf_def[i]);
680 
681 			if (d->chip->type_invert)
682 				d->type_buf_def[i] = ~d->type_buf_def[i];
683 
684 			if (ret) {
685 				dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
686 					reg, ret);
687 				goto err_alloc;
688 			}
689 		}
690 	}
691 
692 	if (irq_base)
693 		d->domain = irq_domain_add_legacy(map->dev->of_node,
694 						  chip->num_irqs, irq_base, 0,
695 						  &regmap_domain_ops, d);
696 	else
697 		d->domain = irq_domain_add_linear(map->dev->of_node,
698 						  chip->num_irqs,
699 						  &regmap_domain_ops, d);
700 	if (!d->domain) {
701 		dev_err(map->dev, "Failed to create IRQ domain\n");
702 		ret = -ENOMEM;
703 		goto err_alloc;
704 	}
705 
706 	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
707 				   irq_flags | IRQF_ONESHOT,
708 				   chip->name, d);
709 	if (ret != 0) {
710 		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
711 			irq, chip->name, ret);
712 		goto err_domain;
713 	}
714 
715 	*data = d;
716 
717 	return 0;
718 
719 err_domain:
720 	/* Should really dispose of the domain but... */
721 err_alloc:
722 	kfree(d->type_buf);
723 	kfree(d->type_buf_def);
724 	kfree(d->wake_buf);
725 	kfree(d->mask_buf_def);
726 	kfree(d->mask_buf);
727 	kfree(d->status_buf);
728 	kfree(d->status_reg_buf);
729 	kfree(d);
730 	return ret;
731 }
732 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
733 
734 /**
735  * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
736  *
737  * @irq: Primary IRQ for the device
738  * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
739  *
740  * This function also disposes of all mapped IRQs on the chip.
741  */
742 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
743 {
744 	unsigned int virq;
745 	int hwirq;
746 
747 	if (!d)
748 		return;
749 
750 	free_irq(irq, d);
751 
752 	/* Dispose all virtual irq from irq domain before removing it */
753 	for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
754 		/* Ignore hwirq if holes in the IRQ list */
755 		if (!d->chip->irqs[hwirq].mask)
756 			continue;
757 
758 		/*
759 		 * Find the virtual irq of hwirq on chip and if it is
760 		 * there then dispose it
761 		 */
762 		virq = irq_find_mapping(d->domain, hwirq);
763 		if (virq)
764 			irq_dispose_mapping(virq);
765 	}
766 
767 	irq_domain_remove(d->domain);
768 	kfree(d->type_buf);
769 	kfree(d->type_buf_def);
770 	kfree(d->wake_buf);
771 	kfree(d->mask_buf_def);
772 	kfree(d->mask_buf);
773 	kfree(d->status_reg_buf);
774 	kfree(d->status_buf);
775 	kfree(d);
776 }
777 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
778 
779 static void devm_regmap_irq_chip_release(struct device *dev, void *res)
780 {
781 	struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
782 
783 	regmap_del_irq_chip(d->irq, d);
784 }
785 
786 static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
787 
788 {
789 	struct regmap_irq_chip_data **r = res;
790 
791 	if (!r || !*r) {
792 		WARN_ON(!r || !*r);
793 		return 0;
794 	}
795 	return *r == data;
796 }
797 
798 /**
799  * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
800  *
801  * @dev: The device pointer on which irq_chip belongs to.
802  * @map: The regmap for the device.
803  * @irq: The IRQ the device uses to signal interrupts
804  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
805  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
806  * @chip: Configuration for the interrupt controller.
807  * @data: Runtime data structure for the controller, allocated on success
808  *
809  * Returns 0 on success or an errno on failure.
810  *
811  * The &regmap_irq_chip_data will be automatically released when the device is
812  * unbound.
813  */
814 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
815 			     int irq_flags, int irq_base,
816 			     const struct regmap_irq_chip *chip,
817 			     struct regmap_irq_chip_data **data)
818 {
819 	struct regmap_irq_chip_data **ptr, *d;
820 	int ret;
821 
822 	ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
823 			   GFP_KERNEL);
824 	if (!ptr)
825 		return -ENOMEM;
826 
827 	ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
828 				  chip, &d);
829 	if (ret < 0) {
830 		devres_free(ptr);
831 		return ret;
832 	}
833 
834 	*ptr = d;
835 	devres_add(dev, ptr);
836 	*data = d;
837 	return 0;
838 }
839 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
840 
841 /**
842  * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
843  *
844  * @dev: Device for which which resource was allocated.
845  * @irq: Primary IRQ for the device.
846  * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
847  *
848  * A resource managed version of regmap_del_irq_chip().
849  */
850 void devm_regmap_del_irq_chip(struct device *dev, int irq,
851 			      struct regmap_irq_chip_data *data)
852 {
853 	int rc;
854 
855 	WARN_ON(irq != data->irq);
856 	rc = devres_release(dev, devm_regmap_irq_chip_release,
857 			    devm_regmap_irq_chip_match, data);
858 
859 	if (rc != 0)
860 		WARN_ON(rc);
861 }
862 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
863 
864 /**
865  * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
866  *
867  * @data: regmap irq controller to operate on.
868  *
869  * Useful for drivers to request their own IRQs.
870  */
871 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
872 {
873 	WARN_ON(!data->irq_base);
874 	return data->irq_base;
875 }
876 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
877 
878 /**
879  * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
880  *
881  * @data: regmap irq controller to operate on.
882  * @irq: index of the interrupt requested in the chip IRQs.
883  *
884  * Useful for drivers to request their own IRQs.
885  */
886 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
887 {
888 	/* Handle holes in the IRQ list */
889 	if (!data->chip->irqs[irq].mask)
890 		return -EINVAL;
891 
892 	return irq_create_mapping(data->domain, irq);
893 }
894 EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
895 
896 /**
897  * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
898  *
899  * @data: regmap_irq controller to operate on.
900  *
901  * Useful for drivers to request their own IRQs and for integration
902  * with subsystems.  For ease of integration NULL is accepted as a
903  * domain, allowing devices to just call this even if no domain is
904  * allocated.
905  */
906 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
907 {
908 	if (data)
909 		return data->domain;
910 	else
911 		return NULL;
912 }
913 EXPORT_SYMBOL_GPL(regmap_irq_get_domain);
914