xref: /openbmc/linux/drivers/irqchip/irq-imx-mu-msi.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Freescale MU used as MSI controller
4   *
5   * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
6   * Copyright 2022 NXP
7   *	Frank Li <Frank.Li@nxp.com>
8   *	Peng Fan <peng.fan@nxp.com>
9   *
10   * Based on drivers/mailbox/imx-mailbox.c
11   */
12  
13  #include <linux/clk.h>
14  #include <linux/irq.h>
15  #include <linux/irqchip.h>
16  #include <linux/irqchip/chained_irq.h>
17  #include <linux/irqdomain.h>
18  #include <linux/kernel.h>
19  #include <linux/module.h>
20  #include <linux/msi.h>
21  #include <linux/of_irq.h>
22  #include <linux/of_platform.h>
23  #include <linux/pm_runtime.h>
24  #include <linux/pm_domain.h>
25  #include <linux/spinlock.h>
26  
27  #define IMX_MU_CHANS            4
28  
29  enum imx_mu_xcr {
30  	IMX_MU_GIER,
31  	IMX_MU_GCR,
32  	IMX_MU_TCR,
33  	IMX_MU_RCR,
34  	IMX_MU_xCR_MAX,
35  };
36  
37  enum imx_mu_xsr {
38  	IMX_MU_SR,
39  	IMX_MU_GSR,
40  	IMX_MU_TSR,
41  	IMX_MU_RSR,
42  	IMX_MU_xSR_MAX
43  };
44  
45  enum imx_mu_type {
46  	IMX_MU_V2 = BIT(1),
47  };
48  
49  /* Receive Interrupt Enable */
50  #define IMX_MU_xCR_RIEn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
51  #define IMX_MU_xSR_RFn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
52  
53  struct imx_mu_dcfg {
54  	enum imx_mu_type type;
55  	u32     xTR;            /* Transmit Register0 */
56  	u32     xRR;            /* Receive Register0 */
57  	u32     xSR[IMX_MU_xSR_MAX];         /* Status Registers */
58  	u32     xCR[IMX_MU_xCR_MAX];         /* Control Registers */
59  };
60  
61  struct imx_mu_msi {
62  	raw_spinlock_t			lock;
63  	struct irq_domain		*msi_domain;
64  	void __iomem			*regs;
65  	phys_addr_t			msiir_addr;
66  	const struct imx_mu_dcfg	*cfg;
67  	unsigned long			used;
68  	struct clk			*clk;
69  };
70  
imx_mu_write(struct imx_mu_msi * msi_data,u32 val,u32 offs)71  static void imx_mu_write(struct imx_mu_msi *msi_data, u32 val, u32 offs)
72  {
73  	iowrite32(val, msi_data->regs + offs);
74  }
75  
imx_mu_read(struct imx_mu_msi * msi_data,u32 offs)76  static u32 imx_mu_read(struct imx_mu_msi *msi_data, u32 offs)
77  {
78  	return ioread32(msi_data->regs + offs);
79  }
80  
imx_mu_xcr_rmw(struct imx_mu_msi * msi_data,enum imx_mu_xcr type,u32 set,u32 clr)81  static u32 imx_mu_xcr_rmw(struct imx_mu_msi *msi_data, enum imx_mu_xcr type, u32 set, u32 clr)
82  {
83  	unsigned long flags;
84  	u32 val;
85  
86  	raw_spin_lock_irqsave(&msi_data->lock, flags);
87  	val = imx_mu_read(msi_data, msi_data->cfg->xCR[type]);
88  	val &= ~clr;
89  	val |= set;
90  	imx_mu_write(msi_data, val, msi_data->cfg->xCR[type]);
91  	raw_spin_unlock_irqrestore(&msi_data->lock, flags);
92  
93  	return val;
94  }
95  
imx_mu_msi_parent_mask_irq(struct irq_data * data)96  static void imx_mu_msi_parent_mask_irq(struct irq_data *data)
97  {
98  	struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
99  
100  	imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(msi_data, data->hwirq));
101  }
102  
imx_mu_msi_parent_unmask_irq(struct irq_data * data)103  static void imx_mu_msi_parent_unmask_irq(struct irq_data *data)
104  {
105  	struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
106  
107  	imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, IMX_MU_xCR_RIEn(msi_data, data->hwirq), 0);
108  }
109  
imx_mu_msi_parent_ack_irq(struct irq_data * data)110  static void imx_mu_msi_parent_ack_irq(struct irq_data *data)
111  {
112  	struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
113  
114  	imx_mu_read(msi_data, msi_data->cfg->xRR + data->hwirq * 4);
115  }
116  
117  static struct irq_chip imx_mu_msi_irq_chip = {
118  	.name = "MU-MSI",
119  	.irq_ack = irq_chip_ack_parent,
120  };
121  
122  static struct msi_domain_ops imx_mu_msi_irq_ops = {
123  };
124  
125  static struct msi_domain_info imx_mu_msi_domain_info = {
126  	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
127  	.ops	= &imx_mu_msi_irq_ops,
128  	.chip	= &imx_mu_msi_irq_chip,
129  };
130  
imx_mu_msi_parent_compose_msg(struct irq_data * data,struct msi_msg * msg)131  static void imx_mu_msi_parent_compose_msg(struct irq_data *data,
132  					  struct msi_msg *msg)
133  {
134  	struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data);
135  	u64 addr = msi_data->msiir_addr + 4 * data->hwirq;
136  
137  	msg->address_hi = upper_32_bits(addr);
138  	msg->address_lo = lower_32_bits(addr);
139  	msg->data = data->hwirq;
140  }
141  
imx_mu_msi_parent_set_affinity(struct irq_data * irq_data,const struct cpumask * mask,bool force)142  static int imx_mu_msi_parent_set_affinity(struct irq_data *irq_data,
143  				   const struct cpumask *mask, bool force)
144  {
145  	return -EINVAL;
146  }
147  
148  static struct irq_chip imx_mu_msi_parent_chip = {
149  	.name		= "MU",
150  	.irq_mask	= imx_mu_msi_parent_mask_irq,
151  	.irq_unmask	= imx_mu_msi_parent_unmask_irq,
152  	.irq_ack	= imx_mu_msi_parent_ack_irq,
153  	.irq_compose_msi_msg	= imx_mu_msi_parent_compose_msg,
154  	.irq_set_affinity = imx_mu_msi_parent_set_affinity,
155  };
156  
imx_mu_msi_domain_irq_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)157  static int imx_mu_msi_domain_irq_alloc(struct irq_domain *domain,
158  					unsigned int virq,
159  					unsigned int nr_irqs,
160  					void *args)
161  {
162  	struct imx_mu_msi *msi_data = domain->host_data;
163  	unsigned long flags;
164  	int pos, err = 0;
165  
166  	WARN_ON(nr_irqs != 1);
167  
168  	raw_spin_lock_irqsave(&msi_data->lock, flags);
169  	pos = find_first_zero_bit(&msi_data->used, IMX_MU_CHANS);
170  	if (pos < IMX_MU_CHANS)
171  		__set_bit(pos, &msi_data->used);
172  	else
173  		err = -ENOSPC;
174  	raw_spin_unlock_irqrestore(&msi_data->lock, flags);
175  
176  	if (err)
177  		return err;
178  
179  	irq_domain_set_info(domain, virq, pos,
180  			    &imx_mu_msi_parent_chip, msi_data,
181  			    handle_edge_irq, NULL, NULL);
182  	return 0;
183  }
184  
imx_mu_msi_domain_irq_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)185  static void imx_mu_msi_domain_irq_free(struct irq_domain *domain,
186  				       unsigned int virq, unsigned int nr_irqs)
187  {
188  	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
189  	struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(d);
190  	unsigned long flags;
191  
192  	raw_spin_lock_irqsave(&msi_data->lock, flags);
193  	__clear_bit(d->hwirq, &msi_data->used);
194  	raw_spin_unlock_irqrestore(&msi_data->lock, flags);
195  }
196  
197  static const struct irq_domain_ops imx_mu_msi_domain_ops = {
198  	.alloc	= imx_mu_msi_domain_irq_alloc,
199  	.free	= imx_mu_msi_domain_irq_free,
200  };
201  
imx_mu_msi_irq_handler(struct irq_desc * desc)202  static void imx_mu_msi_irq_handler(struct irq_desc *desc)
203  {
204  	struct imx_mu_msi *msi_data = irq_desc_get_handler_data(desc);
205  	struct irq_chip *chip = irq_desc_get_chip(desc);
206  	u32 status;
207  	int i;
208  
209  	status = imx_mu_read(msi_data, msi_data->cfg->xSR[IMX_MU_RSR]);
210  
211  	chained_irq_enter(chip, desc);
212  	for (i = 0; i < IMX_MU_CHANS; i++) {
213  		if (status & IMX_MU_xSR_RFn(msi_data, i))
214  			generic_handle_domain_irq(msi_data->msi_domain, i);
215  	}
216  	chained_irq_exit(chip, desc);
217  }
218  
imx_mu_msi_domains_init(struct imx_mu_msi * msi_data,struct device * dev)219  static int imx_mu_msi_domains_init(struct imx_mu_msi *msi_data, struct device *dev)
220  {
221  	struct fwnode_handle *fwnodes = dev_fwnode(dev);
222  	struct irq_domain *parent;
223  
224  	/* Initialize MSI domain parent */
225  	parent = irq_domain_create_linear(fwnodes,
226  					    IMX_MU_CHANS,
227  					    &imx_mu_msi_domain_ops,
228  					    msi_data);
229  	if (!parent) {
230  		dev_err(dev, "failed to create IRQ domain\n");
231  		return -ENOMEM;
232  	}
233  
234  	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
235  
236  	msi_data->msi_domain = platform_msi_create_irq_domain(fwnodes,
237  					&imx_mu_msi_domain_info,
238  					parent);
239  
240  	if (!msi_data->msi_domain) {
241  		dev_err(dev, "failed to create MSI domain\n");
242  		irq_domain_remove(parent);
243  		return -ENOMEM;
244  	}
245  
246  	irq_domain_set_pm_device(msi_data->msi_domain, dev);
247  
248  	return 0;
249  }
250  
251  /* Register offset of different version MU IP */
252  static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
253  	.type	= 0,
254  	.xTR    = 0x0,
255  	.xRR    = 0x10,
256  	.xSR    = {
257  			[IMX_MU_SR]  = 0x20,
258  			[IMX_MU_GSR] = 0x20,
259  			[IMX_MU_TSR] = 0x20,
260  			[IMX_MU_RSR] = 0x20,
261  		  },
262  	.xCR    = {
263  			[IMX_MU_GIER] = 0x24,
264  			[IMX_MU_GCR]  = 0x24,
265  			[IMX_MU_TCR]  = 0x24,
266  			[IMX_MU_RCR]  = 0x24,
267  		  },
268  };
269  
270  static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
271  	.type	= 0,
272  	.xTR    = 0x20,
273  	.xRR    = 0x40,
274  	.xSR    = {
275  			[IMX_MU_SR]  = 0x60,
276  			[IMX_MU_GSR] = 0x60,
277  			[IMX_MU_TSR] = 0x60,
278  			[IMX_MU_RSR] = 0x60,
279  		  },
280  	.xCR    = {
281  			[IMX_MU_GIER] = 0x64,
282  			[IMX_MU_GCR]  = 0x64,
283  			[IMX_MU_TCR]  = 0x64,
284  			[IMX_MU_RCR]  = 0x64,
285  		  },
286  };
287  
288  static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
289  	.type   = IMX_MU_V2,
290  	.xTR    = 0x200,
291  	.xRR    = 0x280,
292  	.xSR    = {
293  			[IMX_MU_SR]  = 0xC,
294  			[IMX_MU_GSR] = 0x118,
295  			[IMX_MU_TSR] = 0x124,
296  			[IMX_MU_RSR] = 0x12C,
297  		  },
298  	.xCR    = {
299  			[IMX_MU_GIER] = 0x110,
300  			[IMX_MU_GCR]  = 0x114,
301  			[IMX_MU_TCR]  = 0x120,
302  			[IMX_MU_RCR]  = 0x128
303  		  },
304  };
305  
imx_mu_of_init(struct device_node * dn,struct device_node * parent,const struct imx_mu_dcfg * cfg)306  static int __init imx_mu_of_init(struct device_node *dn,
307  				 struct device_node *parent,
308  				 const struct imx_mu_dcfg *cfg)
309  {
310  	struct platform_device *pdev = of_find_device_by_node(dn);
311  	struct device_link *pd_link_a;
312  	struct device_link *pd_link_b;
313  	struct imx_mu_msi *msi_data;
314  	struct resource *res;
315  	struct device *pd_a;
316  	struct device *pd_b;
317  	struct device *dev;
318  	int ret;
319  	int irq;
320  
321  	dev = &pdev->dev;
322  
323  	msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
324  	if (!msi_data)
325  		return -ENOMEM;
326  
327  	msi_data->cfg = cfg;
328  
329  	msi_data->regs = devm_platform_ioremap_resource_byname(pdev, "processor-a-side");
330  	if (IS_ERR(msi_data->regs)) {
331  		dev_err(&pdev->dev, "failed to initialize 'regs'\n");
332  		return PTR_ERR(msi_data->regs);
333  	}
334  
335  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "processor-b-side");
336  	if (!res)
337  		return -EIO;
338  
339  	msi_data->msiir_addr = res->start + msi_data->cfg->xTR;
340  
341  	irq = platform_get_irq(pdev, 0);
342  	if (irq < 0)
343  		return irq;
344  
345  	platform_set_drvdata(pdev, msi_data);
346  
347  	msi_data->clk = devm_clk_get(dev, NULL);
348  	if (IS_ERR(msi_data->clk))
349  		return PTR_ERR(msi_data->clk);
350  
351  	pd_a = dev_pm_domain_attach_by_name(dev, "processor-a-side");
352  	if (IS_ERR(pd_a))
353  		return PTR_ERR(pd_a);
354  
355  	pd_b = dev_pm_domain_attach_by_name(dev, "processor-b-side");
356  	if (IS_ERR(pd_b))
357  		return PTR_ERR(pd_b);
358  
359  	pd_link_a = device_link_add(dev, pd_a,
360  			DL_FLAG_STATELESS |
361  			DL_FLAG_PM_RUNTIME |
362  			DL_FLAG_RPM_ACTIVE);
363  
364  	if (!pd_link_a) {
365  		dev_err(dev, "Failed to add device_link to mu a.\n");
366  		goto err_pd_a;
367  	}
368  
369  	pd_link_b = device_link_add(dev, pd_b,
370  			DL_FLAG_STATELESS |
371  			DL_FLAG_PM_RUNTIME |
372  			DL_FLAG_RPM_ACTIVE);
373  
374  
375  	if (!pd_link_b) {
376  		dev_err(dev, "Failed to add device_link to mu a.\n");
377  		goto err_pd_b;
378  	}
379  
380  	ret = imx_mu_msi_domains_init(msi_data, dev);
381  	if (ret)
382  		goto err_dm_init;
383  
384  	pm_runtime_enable(dev);
385  
386  	irq_set_chained_handler_and_data(irq,
387  					 imx_mu_msi_irq_handler,
388  					 msi_data);
389  
390  	return 0;
391  
392  err_dm_init:
393  	device_link_remove(dev,	pd_b);
394  err_pd_b:
395  	device_link_remove(dev, pd_a);
396  err_pd_a:
397  	return -EINVAL;
398  }
399  
imx_mu_runtime_suspend(struct device * dev)400  static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
401  {
402  	struct imx_mu_msi *priv = dev_get_drvdata(dev);
403  
404  	clk_disable_unprepare(priv->clk);
405  
406  	return 0;
407  }
408  
imx_mu_runtime_resume(struct device * dev)409  static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
410  {
411  	struct imx_mu_msi *priv = dev_get_drvdata(dev);
412  	int ret;
413  
414  	ret = clk_prepare_enable(priv->clk);
415  	if (ret)
416  		dev_err(dev, "failed to enable clock\n");
417  
418  	return ret;
419  }
420  
421  static const struct dev_pm_ops imx_mu_pm_ops = {
422  	SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
423  			   imx_mu_runtime_resume, NULL)
424  };
425  
imx_mu_imx7ulp_of_init(struct device_node * dn,struct device_node * parent)426  static int __init imx_mu_imx7ulp_of_init(struct device_node *dn,
427  					 struct device_node *parent)
428  {
429  	return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx7ulp);
430  }
431  
imx_mu_imx6sx_of_init(struct device_node * dn,struct device_node * parent)432  static int __init imx_mu_imx6sx_of_init(struct device_node *dn,
433  					struct device_node *parent)
434  {
435  	return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx6sx);
436  }
437  
imx_mu_imx8ulp_of_init(struct device_node * dn,struct device_node * parent)438  static int __init imx_mu_imx8ulp_of_init(struct device_node *dn,
439  					 struct device_node *parent)
440  {
441  	return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx8ulp);
442  }
443  
444  IRQCHIP_PLATFORM_DRIVER_BEGIN(imx_mu_msi)
445  IRQCHIP_MATCH("fsl,imx7ulp-mu-msi", imx_mu_imx7ulp_of_init)
446  IRQCHIP_MATCH("fsl,imx6sx-mu-msi", imx_mu_imx6sx_of_init)
447  IRQCHIP_MATCH("fsl,imx8ulp-mu-msi", imx_mu_imx8ulp_of_init)
448  IRQCHIP_PLATFORM_DRIVER_END(imx_mu_msi, .pm = &imx_mu_pm_ops)
449  
450  
451  MODULE_AUTHOR("Frank Li <Frank.Li@nxp.com>");
452  MODULE_DESCRIPTION("Freescale MU MSI controller driver");
453  MODULE_LICENSE("GPL");
454