xref: /openbmc/linux/arch/ia64/kernel/msi_ia64.c (revision ba61bb17)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * MSI hooks for standard x86 apic
4  */
5 
6 #include <linux/pci.h>
7 #include <linux/irq.h>
8 #include <linux/msi.h>
9 #include <linux/dmar.h>
10 #include <asm/smp.h>
11 #include <asm/msidef.h>
12 
13 static struct irq_chip	ia64_msi_chip;
14 
15 #ifdef CONFIG_SMP
16 static int ia64_set_msi_irq_affinity(struct irq_data *idata,
17 				     const cpumask_t *cpu_mask, bool force)
18 {
19 	struct msi_msg msg;
20 	u32 addr, data;
21 	int cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
22 	unsigned int irq = idata->irq;
23 
24 	if (irq_prepare_move(irq, cpu))
25 		return -1;
26 
27 	__get_cached_msi_msg(irq_data_get_msi_desc(idata), &msg);
28 
29 	addr = msg.address_lo;
30 	addr &= MSI_ADDR_DEST_ID_MASK;
31 	addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
32 	msg.address_lo = addr;
33 
34 	data = msg.data;
35 	data &= MSI_DATA_VECTOR_MASK;
36 	data |= MSI_DATA_VECTOR(irq_to_vector(irq));
37 	msg.data = data;
38 
39 	pci_write_msi_msg(irq, &msg);
40 	cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu));
41 
42 	return 0;
43 }
44 #endif /* CONFIG_SMP */
45 
46 int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
47 {
48 	struct msi_msg	msg;
49 	unsigned long	dest_phys_id;
50 	int	irq, vector;
51 
52 	irq = create_irq();
53 	if (irq < 0)
54 		return irq;
55 
56 	irq_set_msi_desc(irq, desc);
57 	dest_phys_id = cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq)),
58 						       cpu_online_mask));
59 	vector = irq_to_vector(irq);
60 
61 	msg.address_hi = 0;
62 	msg.address_lo =
63 		MSI_ADDR_HEADER |
64 		MSI_ADDR_DEST_MODE_PHYS |
65 		MSI_ADDR_REDIRECTION_CPU |
66 		MSI_ADDR_DEST_ID_CPU(dest_phys_id);
67 
68 	msg.data =
69 		MSI_DATA_TRIGGER_EDGE |
70 		MSI_DATA_LEVEL_ASSERT |
71 		MSI_DATA_DELIVERY_FIXED |
72 		MSI_DATA_VECTOR(vector);
73 
74 	pci_write_msi_msg(irq, &msg);
75 	irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
76 
77 	return 0;
78 }
79 
80 void ia64_teardown_msi_irq(unsigned int irq)
81 {
82 	destroy_irq(irq);
83 }
84 
85 static void ia64_ack_msi_irq(struct irq_data *data)
86 {
87 	irq_complete_move(data->irq);
88 	irq_move_irq(data);
89 	ia64_eoi();
90 }
91 
92 static int ia64_msi_retrigger_irq(struct irq_data *data)
93 {
94 	unsigned int vector = irq_to_vector(data->irq);
95 	ia64_resend_irq(vector);
96 
97 	return 1;
98 }
99 
100 /*
101  * Generic ops used on most IA64 platforms.
102  */
103 static struct irq_chip ia64_msi_chip = {
104 	.name			= "PCI-MSI",
105 	.irq_mask		= pci_msi_mask_irq,
106 	.irq_unmask		= pci_msi_unmask_irq,
107 	.irq_ack		= ia64_ack_msi_irq,
108 #ifdef CONFIG_SMP
109 	.irq_set_affinity	= ia64_set_msi_irq_affinity,
110 #endif
111 	.irq_retrigger		= ia64_msi_retrigger_irq,
112 };
113 
114 
115 int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
116 {
117 	if (platform_setup_msi_irq)
118 		return platform_setup_msi_irq(pdev, desc);
119 
120 	return ia64_setup_msi_irq(pdev, desc);
121 }
122 
123 void arch_teardown_msi_irq(unsigned int irq)
124 {
125 	if (platform_teardown_msi_irq)
126 		return platform_teardown_msi_irq(irq);
127 
128 	return ia64_teardown_msi_irq(irq);
129 }
130 
131 #ifdef CONFIG_INTEL_IOMMU
132 #ifdef CONFIG_SMP
133 static int dmar_msi_set_affinity(struct irq_data *data,
134 				 const struct cpumask *mask, bool force)
135 {
136 	unsigned int irq = data->irq;
137 	struct irq_cfg *cfg = irq_cfg + irq;
138 	struct msi_msg msg;
139 	int cpu = cpumask_first_and(mask, cpu_online_mask);
140 
141 	if (irq_prepare_move(irq, cpu))
142 		return -1;
143 
144 	dmar_msi_read(irq, &msg);
145 
146 	msg.data &= ~MSI_DATA_VECTOR_MASK;
147 	msg.data |= MSI_DATA_VECTOR(cfg->vector);
148 	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
149 	msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
150 
151 	dmar_msi_write(irq, &msg);
152 	cpumask_copy(irq_data_get_affinity_mask(data), mask);
153 
154 	return 0;
155 }
156 #endif /* CONFIG_SMP */
157 
158 static struct irq_chip dmar_msi_type = {
159 	.name = "DMAR_MSI",
160 	.irq_unmask = dmar_msi_unmask,
161 	.irq_mask = dmar_msi_mask,
162 	.irq_ack = ia64_ack_msi_irq,
163 #ifdef CONFIG_SMP
164 	.irq_set_affinity = dmar_msi_set_affinity,
165 #endif
166 	.irq_retrigger = ia64_msi_retrigger_irq,
167 };
168 
169 static void
170 msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
171 {
172 	struct irq_cfg *cfg = irq_cfg + irq;
173 	unsigned dest;
174 
175 	dest = cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq)),
176 						 cpu_online_mask));
177 
178 	msg->address_hi = 0;
179 	msg->address_lo =
180 		MSI_ADDR_HEADER |
181 		MSI_ADDR_DEST_MODE_PHYS |
182 		MSI_ADDR_REDIRECTION_CPU |
183 		MSI_ADDR_DEST_ID_CPU(dest);
184 
185 	msg->data =
186 		MSI_DATA_TRIGGER_EDGE |
187 		MSI_DATA_LEVEL_ASSERT |
188 		MSI_DATA_DELIVERY_FIXED |
189 		MSI_DATA_VECTOR(cfg->vector);
190 }
191 
192 int dmar_alloc_hwirq(int id, int node, void *arg)
193 {
194 	int irq;
195 	struct msi_msg msg;
196 
197 	irq = create_irq();
198 	if (irq > 0) {
199 		irq_set_handler_data(irq, arg);
200 		irq_set_chip_and_handler_name(irq, &dmar_msi_type,
201 					      handle_edge_irq, "edge");
202 		msi_compose_msg(NULL, irq, &msg);
203 		dmar_msi_write(irq, &msg);
204 	}
205 
206 	return irq;
207 }
208 
209 void dmar_free_hwirq(int irq)
210 {
211 	irq_set_handler_data(irq, NULL);
212 	destroy_irq(irq);
213 }
214 #endif /* CONFIG_INTEL_IOMMU */
215 
216