xref: /openbmc/linux/kernel/irq/irq_sim.c (revision 21673fcb)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017-2018 Bartosz Golaszewski <brgl@bgdev.pl>
4  * Copyright (C) 2020 Bartosz Golaszewski <bgolaszewski@baylibre.com>
5  */
6 
7 #include <linux/irq.h>
8 #include <linux/irq_sim.h>
9 #include <linux/irq_work.h>
10 #include <linux/interrupt.h>
11 #include <linux/slab.h>
12 
13 struct irq_sim_work_ctx {
14 	struct irq_work		work;
15 	int			irq_base;
16 	unsigned int		irq_count;
17 	unsigned long		*pending;
18 	struct irq_domain	*domain;
19 };
20 
21 struct irq_sim_irq_ctx {
22 	int			irqnum;
23 	bool			enabled;
24 	struct irq_sim_work_ctx	*work_ctx;
25 };
26 
irq_sim_irqmask(struct irq_data * data)27 static void irq_sim_irqmask(struct irq_data *data)
28 {
29 	struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
30 
31 	irq_ctx->enabled = false;
32 }
33 
irq_sim_irqunmask(struct irq_data * data)34 static void irq_sim_irqunmask(struct irq_data *data)
35 {
36 	struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
37 
38 	irq_ctx->enabled = true;
39 }
40 
irq_sim_set_type(struct irq_data * data,unsigned int type)41 static int irq_sim_set_type(struct irq_data *data, unsigned int type)
42 {
43 	/* We only support rising and falling edge trigger types. */
44 	if (type & ~IRQ_TYPE_EDGE_BOTH)
45 		return -EINVAL;
46 
47 	irqd_set_trigger_type(data, type);
48 
49 	return 0;
50 }
51 
irq_sim_get_irqchip_state(struct irq_data * data,enum irqchip_irq_state which,bool * state)52 static int irq_sim_get_irqchip_state(struct irq_data *data,
53 				     enum irqchip_irq_state which, bool *state)
54 {
55 	struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
56 	irq_hw_number_t hwirq = irqd_to_hwirq(data);
57 
58 	switch (which) {
59 	case IRQCHIP_STATE_PENDING:
60 		if (irq_ctx->enabled)
61 			*state = test_bit(hwirq, irq_ctx->work_ctx->pending);
62 		break;
63 	default:
64 		return -EINVAL;
65 	}
66 
67 	return 0;
68 }
69 
irq_sim_set_irqchip_state(struct irq_data * data,enum irqchip_irq_state which,bool state)70 static int irq_sim_set_irqchip_state(struct irq_data *data,
71 				     enum irqchip_irq_state which, bool state)
72 {
73 	struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
74 	irq_hw_number_t hwirq = irqd_to_hwirq(data);
75 
76 	switch (which) {
77 	case IRQCHIP_STATE_PENDING:
78 		if (irq_ctx->enabled) {
79 			assign_bit(hwirq, irq_ctx->work_ctx->pending, state);
80 			if (state)
81 				irq_work_queue(&irq_ctx->work_ctx->work);
82 		}
83 		break;
84 	default:
85 		return -EINVAL;
86 	}
87 
88 	return 0;
89 }
90 
91 static struct irq_chip irq_sim_irqchip = {
92 	.name			= "irq_sim",
93 	.irq_mask		= irq_sim_irqmask,
94 	.irq_unmask		= irq_sim_irqunmask,
95 	.irq_set_type		= irq_sim_set_type,
96 	.irq_get_irqchip_state	= irq_sim_get_irqchip_state,
97 	.irq_set_irqchip_state	= irq_sim_set_irqchip_state,
98 };
99 
irq_sim_handle_irq(struct irq_work * work)100 static void irq_sim_handle_irq(struct irq_work *work)
101 {
102 	struct irq_sim_work_ctx *work_ctx;
103 	unsigned int offset = 0;
104 	int irqnum;
105 
106 	work_ctx = container_of(work, struct irq_sim_work_ctx, work);
107 
108 	while (!bitmap_empty(work_ctx->pending, work_ctx->irq_count)) {
109 		offset = find_next_bit(work_ctx->pending,
110 				       work_ctx->irq_count, offset);
111 		clear_bit(offset, work_ctx->pending);
112 		irqnum = irq_find_mapping(work_ctx->domain, offset);
113 		handle_simple_irq(irq_to_desc(irqnum));
114 	}
115 }
116 
irq_sim_domain_map(struct irq_domain * domain,unsigned int virq,irq_hw_number_t hw)117 static int irq_sim_domain_map(struct irq_domain *domain,
118 			      unsigned int virq, irq_hw_number_t hw)
119 {
120 	struct irq_sim_work_ctx *work_ctx = domain->host_data;
121 	struct irq_sim_irq_ctx *irq_ctx;
122 
123 	irq_ctx = kzalloc(sizeof(*irq_ctx), GFP_KERNEL);
124 	if (!irq_ctx)
125 		return -ENOMEM;
126 
127 	irq_set_chip(virq, &irq_sim_irqchip);
128 	irq_set_chip_data(virq, irq_ctx);
129 	irq_set_handler(virq, handle_simple_irq);
130 	irq_modify_status(virq, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
131 	irq_ctx->work_ctx = work_ctx;
132 
133 	return 0;
134 }
135 
irq_sim_domain_unmap(struct irq_domain * domain,unsigned int virq)136 static void irq_sim_domain_unmap(struct irq_domain *domain, unsigned int virq)
137 {
138 	struct irq_sim_irq_ctx *irq_ctx;
139 	struct irq_data *irqd;
140 
141 	irqd = irq_domain_get_irq_data(domain, virq);
142 	irq_ctx = irq_data_get_irq_chip_data(irqd);
143 
144 	irq_set_handler(virq, NULL);
145 	irq_domain_reset_irq_data(irqd);
146 	kfree(irq_ctx);
147 }
148 
149 static const struct irq_domain_ops irq_sim_domain_ops = {
150 	.map		= irq_sim_domain_map,
151 	.unmap		= irq_sim_domain_unmap,
152 };
153 
154 /**
155  * irq_domain_create_sim - Create a new interrupt simulator irq_domain and
156  *                         allocate a range of dummy interrupts.
157  *
158  * @fwnode:     struct fwnode_handle to be associated with this domain.
159  * @num_irqs:   Number of interrupts to allocate.
160  *
161  * On success: return a new irq_domain object.
162  * On failure: a negative errno wrapped with ERR_PTR().
163  */
irq_domain_create_sim(struct fwnode_handle * fwnode,unsigned int num_irqs)164 struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
165 					 unsigned int num_irqs)
166 {
167 	struct irq_sim_work_ctx *work_ctx;
168 
169 	work_ctx = kmalloc(sizeof(*work_ctx), GFP_KERNEL);
170 	if (!work_ctx)
171 		goto err_out;
172 
173 	work_ctx->pending = bitmap_zalloc(num_irqs, GFP_KERNEL);
174 	if (!work_ctx->pending)
175 		goto err_free_work_ctx;
176 
177 	work_ctx->domain = irq_domain_create_linear(fwnode, num_irqs,
178 						    &irq_sim_domain_ops,
179 						    work_ctx);
180 	if (!work_ctx->domain)
181 		goto err_free_bitmap;
182 
183 	work_ctx->irq_count = num_irqs;
184 	work_ctx->work = IRQ_WORK_INIT_HARD(irq_sim_handle_irq);
185 
186 	return work_ctx->domain;
187 
188 err_free_bitmap:
189 	bitmap_free(work_ctx->pending);
190 err_free_work_ctx:
191 	kfree(work_ctx);
192 err_out:
193 	return ERR_PTR(-ENOMEM);
194 }
195 EXPORT_SYMBOL_GPL(irq_domain_create_sim);
196 
197 /**
198  * irq_domain_remove_sim - Deinitialize the interrupt simulator domain: free
199  *                         the interrupt descriptors and allocated memory.
200  *
201  * @domain:     The interrupt simulator domain to tear down.
202  */
irq_domain_remove_sim(struct irq_domain * domain)203 void irq_domain_remove_sim(struct irq_domain *domain)
204 {
205 	struct irq_sim_work_ctx *work_ctx = domain->host_data;
206 
207 	irq_work_sync(&work_ctx->work);
208 	bitmap_free(work_ctx->pending);
209 	kfree(work_ctx);
210 
211 	irq_domain_remove(domain);
212 }
213 EXPORT_SYMBOL_GPL(irq_domain_remove_sim);
214 
devm_irq_domain_remove_sim(void * data)215 static void devm_irq_domain_remove_sim(void *data)
216 {
217 	struct irq_domain *domain = data;
218 
219 	irq_domain_remove_sim(domain);
220 }
221 
222 /**
223  * devm_irq_domain_create_sim - Create a new interrupt simulator for
224  *                              a managed device.
225  *
226  * @dev:        Device to initialize the simulator object for.
227  * @fwnode:     struct fwnode_handle to be associated with this domain.
228  * @num_irqs:   Number of interrupts to allocate
229  *
230  * On success: return a new irq_domain object.
231  * On failure: a negative errno wrapped with ERR_PTR().
232  */
devm_irq_domain_create_sim(struct device * dev,struct fwnode_handle * fwnode,unsigned int num_irqs)233 struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
234 					      struct fwnode_handle *fwnode,
235 					      unsigned int num_irqs)
236 {
237 	struct irq_domain *domain;
238 	int ret;
239 
240 	domain = irq_domain_create_sim(fwnode, num_irqs);
241 	if (IS_ERR(domain))
242 		return domain;
243 
244 	ret = devm_add_action_or_reset(dev, devm_irq_domain_remove_sim, domain);
245 	if (ret)
246 		return ERR_PTR(ret);
247 
248 	return domain;
249 }
250 EXPORT_SYMBOL_GPL(devm_irq_domain_create_sim);
251