1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021, Linaro Limited
4 * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
5 */
6
7 #include <linux/delay.h>
8 #include <linux/err.h>
9 #include <linux/init.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/irqchip.h>
13 #include <linux/irqdomain.h>
14 #include <linux/mailbox_client.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_platform.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_domain.h>
20 #include <linux/slab.h>
21 #include <linux/soc/qcom/irq.h>
22 #include <linux/spinlock.h>
23
24 /*
25 * This is the driver for Qualcomm MPM (MSM Power Manager) interrupt controller,
26 * which is commonly found on Qualcomm SoCs built on the RPM architecture.
27 * Sitting in always-on domain, MPM monitors the wakeup interrupts when SoC is
28 * asleep, and wakes up the AP when one of those interrupts occurs. This driver
29 * doesn't directly access physical MPM registers though. Instead, the access
30 * is bridged via a piece of internal memory (SRAM) that is accessible to both
31 * AP and RPM. This piece of memory is called 'vMPM' in the driver.
32 *
33 * When SoC is awake, the vMPM is owned by AP and the register setup by this
34 * driver all happens on vMPM. When AP is about to get power collapsed, the
35 * driver sends a mailbox notification to RPM, which will take over the vMPM
36 * ownership and dump vMPM into physical MPM registers. On wakeup, AP is woken
37 * up by a MPM pin/interrupt, and RPM will copy STATUS registers into vMPM.
38 * Then AP start owning vMPM again.
39 *
40 * vMPM register map:
41 *
42 * 31 0
43 * +--------------------------------+
44 * | TIMER0 | 0x00
45 * +--------------------------------+
46 * | TIMER1 | 0x04
47 * +--------------------------------+
48 * | ENABLE0 | 0x08
49 * +--------------------------------+
50 * | ... | ...
51 * +--------------------------------+
52 * | ENABLEn |
53 * +--------------------------------+
54 * | FALLING_EDGE0 |
55 * +--------------------------------+
56 * | ... |
57 * +--------------------------------+
58 * | STATUSn |
59 * +--------------------------------+
60 *
61 * n = DIV_ROUND_UP(pin_cnt, 32)
62 *
63 */
64
65 #define MPM_REG_ENABLE 0
66 #define MPM_REG_FALLING_EDGE 1
67 #define MPM_REG_RISING_EDGE 2
68 #define MPM_REG_POLARITY 3
69 #define MPM_REG_STATUS 4
70
71 /* MPM pin map to GIC hwirq */
72 struct mpm_gic_map {
73 int pin;
74 irq_hw_number_t hwirq;
75 };
76
77 struct qcom_mpm_priv {
78 void __iomem *base;
79 raw_spinlock_t lock;
80 struct mbox_client mbox_client;
81 struct mbox_chan *mbox_chan;
82 struct mpm_gic_map *maps;
83 unsigned int map_cnt;
84 unsigned int reg_stride;
85 struct irq_domain *domain;
86 struct generic_pm_domain genpd;
87 };
88
qcom_mpm_read(struct qcom_mpm_priv * priv,unsigned int reg,unsigned int index)89 static u32 qcom_mpm_read(struct qcom_mpm_priv *priv, unsigned int reg,
90 unsigned int index)
91 {
92 unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
93
94 return readl_relaxed(priv->base + offset);
95 }
96
qcom_mpm_write(struct qcom_mpm_priv * priv,unsigned int reg,unsigned int index,u32 val)97 static void qcom_mpm_write(struct qcom_mpm_priv *priv, unsigned int reg,
98 unsigned int index, u32 val)
99 {
100 unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
101
102 writel_relaxed(val, priv->base + offset);
103
104 /* Ensure the write is completed */
105 wmb();
106 }
107
qcom_mpm_enable_irq(struct irq_data * d,bool en)108 static void qcom_mpm_enable_irq(struct irq_data *d, bool en)
109 {
110 struct qcom_mpm_priv *priv = d->chip_data;
111 int pin = d->hwirq;
112 unsigned int index = pin / 32;
113 unsigned int shift = pin % 32;
114 unsigned long flags, val;
115
116 raw_spin_lock_irqsave(&priv->lock, flags);
117
118 val = qcom_mpm_read(priv, MPM_REG_ENABLE, index);
119 __assign_bit(shift, &val, en);
120 qcom_mpm_write(priv, MPM_REG_ENABLE, index, val);
121
122 raw_spin_unlock_irqrestore(&priv->lock, flags);
123 }
124
qcom_mpm_mask(struct irq_data * d)125 static void qcom_mpm_mask(struct irq_data *d)
126 {
127 qcom_mpm_enable_irq(d, false);
128
129 if (d->parent_data)
130 irq_chip_mask_parent(d);
131 }
132
qcom_mpm_unmask(struct irq_data * d)133 static void qcom_mpm_unmask(struct irq_data *d)
134 {
135 qcom_mpm_enable_irq(d, true);
136
137 if (d->parent_data)
138 irq_chip_unmask_parent(d);
139 }
140
mpm_set_type(struct qcom_mpm_priv * priv,bool set,unsigned int reg,unsigned int index,unsigned int shift)141 static void mpm_set_type(struct qcom_mpm_priv *priv, bool set, unsigned int reg,
142 unsigned int index, unsigned int shift)
143 {
144 unsigned long flags, val;
145
146 raw_spin_lock_irqsave(&priv->lock, flags);
147
148 val = qcom_mpm_read(priv, reg, index);
149 __assign_bit(shift, &val, set);
150 qcom_mpm_write(priv, reg, index, val);
151
152 raw_spin_unlock_irqrestore(&priv->lock, flags);
153 }
154
qcom_mpm_set_type(struct irq_data * d,unsigned int type)155 static int qcom_mpm_set_type(struct irq_data *d, unsigned int type)
156 {
157 struct qcom_mpm_priv *priv = d->chip_data;
158 int pin = d->hwirq;
159 unsigned int index = pin / 32;
160 unsigned int shift = pin % 32;
161
162 if (type & IRQ_TYPE_EDGE_RISING)
163 mpm_set_type(priv, true, MPM_REG_RISING_EDGE, index, shift);
164 else
165 mpm_set_type(priv, false, MPM_REG_RISING_EDGE, index, shift);
166
167 if (type & IRQ_TYPE_EDGE_FALLING)
168 mpm_set_type(priv, true, MPM_REG_FALLING_EDGE, index, shift);
169 else
170 mpm_set_type(priv, false, MPM_REG_FALLING_EDGE, index, shift);
171
172 if (type & IRQ_TYPE_LEVEL_HIGH)
173 mpm_set_type(priv, true, MPM_REG_POLARITY, index, shift);
174 else
175 mpm_set_type(priv, false, MPM_REG_POLARITY, index, shift);
176
177 if (!d->parent_data)
178 return 0;
179
180 if (type & IRQ_TYPE_EDGE_BOTH)
181 type = IRQ_TYPE_EDGE_RISING;
182
183 if (type & IRQ_TYPE_LEVEL_MASK)
184 type = IRQ_TYPE_LEVEL_HIGH;
185
186 return irq_chip_set_type_parent(d, type);
187 }
188
189 static struct irq_chip qcom_mpm_chip = {
190 .name = "mpm",
191 .irq_eoi = irq_chip_eoi_parent,
192 .irq_mask = qcom_mpm_mask,
193 .irq_unmask = qcom_mpm_unmask,
194 .irq_retrigger = irq_chip_retrigger_hierarchy,
195 .irq_set_type = qcom_mpm_set_type,
196 .irq_set_affinity = irq_chip_set_affinity_parent,
197 .flags = IRQCHIP_MASK_ON_SUSPEND |
198 IRQCHIP_SKIP_SET_WAKE,
199 };
200
get_mpm_gic_map(struct qcom_mpm_priv * priv,int pin)201 static struct mpm_gic_map *get_mpm_gic_map(struct qcom_mpm_priv *priv, int pin)
202 {
203 struct mpm_gic_map *maps = priv->maps;
204 int i;
205
206 for (i = 0; i < priv->map_cnt; i++) {
207 if (maps[i].pin == pin)
208 return &maps[i];
209 }
210
211 return NULL;
212 }
213
qcom_mpm_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * data)214 static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq,
215 unsigned int nr_irqs, void *data)
216 {
217 struct qcom_mpm_priv *priv = domain->host_data;
218 struct irq_fwspec *fwspec = data;
219 struct irq_fwspec parent_fwspec;
220 struct mpm_gic_map *map;
221 irq_hw_number_t pin;
222 unsigned int type;
223 int ret;
224
225 ret = irq_domain_translate_twocell(domain, fwspec, &pin, &type);
226 if (ret)
227 return ret;
228
229 if (pin == GPIO_NO_WAKE_IRQ)
230 return irq_domain_disconnect_hierarchy(domain, virq);
231
232 ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
233 &qcom_mpm_chip, priv);
234 if (ret)
235 return ret;
236
237 map = get_mpm_gic_map(priv, pin);
238 if (map == NULL)
239 return irq_domain_disconnect_hierarchy(domain->parent, virq);
240
241 if (type & IRQ_TYPE_EDGE_BOTH)
242 type = IRQ_TYPE_EDGE_RISING;
243
244 if (type & IRQ_TYPE_LEVEL_MASK)
245 type = IRQ_TYPE_LEVEL_HIGH;
246
247 parent_fwspec.fwnode = domain->parent->fwnode;
248 parent_fwspec.param_count = 3;
249 parent_fwspec.param[0] = 0;
250 parent_fwspec.param[1] = map->hwirq;
251 parent_fwspec.param[2] = type;
252
253 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
254 &parent_fwspec);
255 }
256
257 static const struct irq_domain_ops qcom_mpm_ops = {
258 .alloc = qcom_mpm_alloc,
259 .free = irq_domain_free_irqs_common,
260 .translate = irq_domain_translate_twocell,
261 };
262
263 /* Triggered by RPM when system resumes from deep sleep */
qcom_mpm_handler(int irq,void * dev_id)264 static irqreturn_t qcom_mpm_handler(int irq, void *dev_id)
265 {
266 struct qcom_mpm_priv *priv = dev_id;
267 unsigned long enable, pending;
268 irqreturn_t ret = IRQ_NONE;
269 unsigned long flags;
270 int i, j;
271
272 for (i = 0; i < priv->reg_stride; i++) {
273 raw_spin_lock_irqsave(&priv->lock, flags);
274 enable = qcom_mpm_read(priv, MPM_REG_ENABLE, i);
275 pending = qcom_mpm_read(priv, MPM_REG_STATUS, i);
276 pending &= enable;
277 raw_spin_unlock_irqrestore(&priv->lock, flags);
278
279 for_each_set_bit(j, &pending, 32) {
280 unsigned int pin = 32 * i + j;
281 struct irq_desc *desc = irq_resolve_mapping(priv->domain, pin);
282 struct irq_data *d = &desc->irq_data;
283
284 if (!irqd_is_level_type(d))
285 irq_set_irqchip_state(d->irq,
286 IRQCHIP_STATE_PENDING, true);
287 ret = IRQ_HANDLED;
288 }
289 }
290
291 return ret;
292 }
293
mpm_pd_power_off(struct generic_pm_domain * genpd)294 static int mpm_pd_power_off(struct generic_pm_domain *genpd)
295 {
296 struct qcom_mpm_priv *priv = container_of(genpd, struct qcom_mpm_priv,
297 genpd);
298 int i, ret;
299
300 for (i = 0; i < priv->reg_stride; i++)
301 qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
302
303 /* Notify RPM to write vMPM into HW */
304 ret = mbox_send_message(priv->mbox_chan, NULL);
305 if (ret < 0)
306 return ret;
307
308 return 0;
309 }
310
gic_hwirq_is_mapped(struct mpm_gic_map * maps,int cnt,u32 hwirq)311 static bool gic_hwirq_is_mapped(struct mpm_gic_map *maps, int cnt, u32 hwirq)
312 {
313 int i;
314
315 for (i = 0; i < cnt; i++)
316 if (maps[i].hwirq == hwirq)
317 return true;
318
319 return false;
320 }
321
qcom_mpm_init(struct device_node * np,struct device_node * parent)322 static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
323 {
324 struct platform_device *pdev = of_find_device_by_node(np);
325 struct device *dev = &pdev->dev;
326 struct irq_domain *parent_domain;
327 struct generic_pm_domain *genpd;
328 struct qcom_mpm_priv *priv;
329 unsigned int pin_cnt;
330 int i, irq;
331 int ret;
332
333 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
334 if (!priv)
335 return -ENOMEM;
336
337 ret = of_property_read_u32(np, "qcom,mpm-pin-count", &pin_cnt);
338 if (ret) {
339 dev_err(dev, "failed to read qcom,mpm-pin-count: %d\n", ret);
340 return ret;
341 }
342
343 priv->reg_stride = DIV_ROUND_UP(pin_cnt, 32);
344
345 ret = of_property_count_u32_elems(np, "qcom,mpm-pin-map");
346 if (ret < 0) {
347 dev_err(dev, "failed to read qcom,mpm-pin-map: %d\n", ret);
348 return ret;
349 }
350
351 if (ret % 2) {
352 dev_err(dev, "invalid qcom,mpm-pin-map\n");
353 return -EINVAL;
354 }
355
356 priv->map_cnt = ret / 2;
357 priv->maps = devm_kcalloc(dev, priv->map_cnt, sizeof(*priv->maps),
358 GFP_KERNEL);
359 if (!priv->maps)
360 return -ENOMEM;
361
362 for (i = 0; i < priv->map_cnt; i++) {
363 u32 pin, hwirq;
364
365 of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2, &pin);
366 of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2 + 1, &hwirq);
367
368 if (gic_hwirq_is_mapped(priv->maps, i, hwirq)) {
369 dev_warn(dev, "failed to map pin %d as GIC hwirq %d is already mapped\n",
370 pin, hwirq);
371 continue;
372 }
373
374 priv->maps[i].pin = pin;
375 priv->maps[i].hwirq = hwirq;
376 }
377
378 raw_spin_lock_init(&priv->lock);
379
380 priv->base = devm_platform_ioremap_resource(pdev, 0);
381 if (IS_ERR(priv->base))
382 return PTR_ERR(priv->base);
383
384 for (i = 0; i < priv->reg_stride; i++) {
385 qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0);
386 qcom_mpm_write(priv, MPM_REG_FALLING_EDGE, i, 0);
387 qcom_mpm_write(priv, MPM_REG_RISING_EDGE, i, 0);
388 qcom_mpm_write(priv, MPM_REG_POLARITY, i, 0);
389 qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
390 }
391
392 irq = platform_get_irq(pdev, 0);
393 if (irq < 0)
394 return irq;
395
396 genpd = &priv->genpd;
397 genpd->flags = GENPD_FLAG_IRQ_SAFE;
398 genpd->power_off = mpm_pd_power_off;
399
400 genpd->name = devm_kasprintf(dev, GFP_KERNEL, "%s", dev_name(dev));
401 if (!genpd->name)
402 return -ENOMEM;
403
404 ret = pm_genpd_init(genpd, NULL, false);
405 if (ret) {
406 dev_err(dev, "failed to init genpd: %d\n", ret);
407 return ret;
408 }
409
410 ret = of_genpd_add_provider_simple(np, genpd);
411 if (ret) {
412 dev_err(dev, "failed to add genpd provider: %d\n", ret);
413 goto remove_genpd;
414 }
415
416 priv->mbox_client.dev = dev;
417 priv->mbox_chan = mbox_request_channel(&priv->mbox_client, 0);
418 if (IS_ERR(priv->mbox_chan)) {
419 ret = PTR_ERR(priv->mbox_chan);
420 dev_err(dev, "failed to acquire IPC channel: %d\n", ret);
421 return ret;
422 }
423
424 parent_domain = irq_find_host(parent);
425 if (!parent_domain) {
426 dev_err(dev, "failed to find MPM parent domain\n");
427 ret = -ENXIO;
428 goto free_mbox;
429 }
430
431 priv->domain = irq_domain_create_hierarchy(parent_domain,
432 IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt,
433 of_node_to_fwnode(np), &qcom_mpm_ops, priv);
434 if (!priv->domain) {
435 dev_err(dev, "failed to create MPM domain\n");
436 ret = -ENOMEM;
437 goto free_mbox;
438 }
439
440 irq_domain_update_bus_token(priv->domain, DOMAIN_BUS_WAKEUP);
441
442 ret = devm_request_irq(dev, irq, qcom_mpm_handler, IRQF_NO_SUSPEND,
443 "qcom_mpm", priv);
444 if (ret) {
445 dev_err(dev, "failed to request irq: %d\n", ret);
446 goto remove_domain;
447 }
448
449 return 0;
450
451 remove_domain:
452 irq_domain_remove(priv->domain);
453 free_mbox:
454 mbox_free_channel(priv->mbox_chan);
455 remove_genpd:
456 pm_genpd_remove(genpd);
457 return ret;
458 }
459
460 IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm)
461 IRQCHIP_MATCH("qcom,mpm", qcom_mpm_init)
462 IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm)
463 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager");
464 MODULE_LICENSE("GPL v2");
465