xref: /openbmc/linux/arch/sparc/kernel/pci_msi.c (revision d0b73b48)
1 /* pci_msi.c: Sparc64 MSI support common layer.
2  *
3  * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4  */
5 #include <linux/kernel.h>
6 #include <linux/interrupt.h>
7 #include <linux/slab.h>
8 #include <linux/irq.h>
9 
10 #include "pci_impl.h"
11 
12 static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
13 {
14 	struct sparc64_msiq_cookie *msiq_cookie = cookie;
15 	struct pci_pbm_info *pbm = msiq_cookie->pbm;
16 	unsigned long msiqid = msiq_cookie->msiqid;
17 	const struct sparc64_msiq_ops *ops;
18 	unsigned long orig_head, head;
19 	int err;
20 
21 	ops = pbm->msi_ops;
22 
23 	err = ops->get_head(pbm, msiqid, &head);
24 	if (unlikely(err < 0))
25 		goto err_get_head;
26 
27 	orig_head = head;
28 	for (;;) {
29 		unsigned long msi;
30 
31 		err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
32 		if (likely(err > 0)) {
33 			unsigned int irq;
34 
35 			irq = pbm->msi_irq_table[msi - pbm->msi_first];
36 			generic_handle_irq(irq);
37 		}
38 
39 		if (unlikely(err < 0))
40 			goto err_dequeue;
41 
42 		if (err == 0)
43 			break;
44 	}
45 	if (likely(head != orig_head)) {
46 		err = ops->set_head(pbm, msiqid, head);
47 		if (unlikely(err < 0))
48 			goto err_set_head;
49 	}
50 	return IRQ_HANDLED;
51 
52 err_get_head:
53 	printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n",
54 	       msiqid, err);
55 	goto err_out;
56 
57 err_dequeue:
58 	printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] "
59 	       "gives error %d\n",
60 	       head, msiqid, err);
61 	goto err_out;
62 
63 err_set_head:
64 	printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] "
65 	       "gives error %d\n",
66 	       head, msiqid, err);
67 	goto err_out;
68 
69 err_out:
70 	return IRQ_NONE;
71 }
72 
73 static u32 pick_msiq(struct pci_pbm_info *pbm)
74 {
75 	static DEFINE_SPINLOCK(rotor_lock);
76 	unsigned long flags;
77 	u32 ret, rotor;
78 
79 	spin_lock_irqsave(&rotor_lock, flags);
80 
81 	rotor = pbm->msiq_rotor;
82 	ret = pbm->msiq_first + rotor;
83 
84 	if (++rotor >= pbm->msiq_num)
85 		rotor = 0;
86 	pbm->msiq_rotor = rotor;
87 
88 	spin_unlock_irqrestore(&rotor_lock, flags);
89 
90 	return ret;
91 }
92 
93 
94 static int alloc_msi(struct pci_pbm_info *pbm)
95 {
96 	int i;
97 
98 	for (i = 0; i < pbm->msi_num; i++) {
99 		if (!test_and_set_bit(i, pbm->msi_bitmap))
100 			return i + pbm->msi_first;
101 	}
102 
103 	return -ENOENT;
104 }
105 
106 static void free_msi(struct pci_pbm_info *pbm, int msi_num)
107 {
108 	msi_num -= pbm->msi_first;
109 	clear_bit(msi_num, pbm->msi_bitmap);
110 }
111 
112 static struct irq_chip msi_irq = {
113 	.name		= "PCI-MSI",
114 	.irq_mask	= mask_msi_irq,
115 	.irq_unmask	= unmask_msi_irq,
116 	.irq_enable	= unmask_msi_irq,
117 	.irq_disable	= mask_msi_irq,
118 	/* XXX affinity XXX */
119 };
120 
121 static int sparc64_setup_msi_irq(unsigned int *irq_p,
122 				 struct pci_dev *pdev,
123 				 struct msi_desc *entry)
124 {
125 	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
126 	const struct sparc64_msiq_ops *ops = pbm->msi_ops;
127 	struct msi_msg msg;
128 	int msi, err;
129 	u32 msiqid;
130 
131 	*irq_p = irq_alloc(0, 0);
132 	err = -ENOMEM;
133 	if (!*irq_p)
134 		goto out_err;
135 
136 	irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq,
137 				      "MSI");
138 
139 	err = alloc_msi(pbm);
140 	if (unlikely(err < 0))
141 		goto out_irq_free;
142 
143 	msi = err;
144 
145 	msiqid = pick_msiq(pbm);
146 
147 	err = ops->msi_setup(pbm, msiqid, msi,
148 			     (entry->msi_attrib.is_64 ? 1 : 0));
149 	if (err)
150 		goto out_msi_free;
151 
152 	pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
153 
154 	if (entry->msi_attrib.is_64) {
155 		msg.address_hi = pbm->msi64_start >> 32;
156 		msg.address_lo = pbm->msi64_start & 0xffffffff;
157 	} else {
158 		msg.address_hi = 0;
159 		msg.address_lo = pbm->msi32_start;
160 	}
161 	msg.data = msi;
162 
163 	irq_set_msi_desc(*irq_p, entry);
164 	write_msi_msg(*irq_p, &msg);
165 
166 	return 0;
167 
168 out_msi_free:
169 	free_msi(pbm, msi);
170 
171 out_irq_free:
172 	irq_set_chip(*irq_p, NULL);
173 	irq_free(*irq_p);
174 	*irq_p = 0;
175 
176 out_err:
177 	return err;
178 }
179 
180 static void sparc64_teardown_msi_irq(unsigned int irq,
181 				     struct pci_dev *pdev)
182 {
183 	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
184 	const struct sparc64_msiq_ops *ops = pbm->msi_ops;
185 	unsigned int msi_num;
186 	int i, err;
187 
188 	for (i = 0; i < pbm->msi_num; i++) {
189 		if (pbm->msi_irq_table[i] == irq)
190 			break;
191 	}
192 	if (i >= pbm->msi_num) {
193 		printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
194 		       pbm->name, irq);
195 		return;
196 	}
197 
198 	msi_num = pbm->msi_first + i;
199 	pbm->msi_irq_table[i] = ~0U;
200 
201 	err = ops->msi_teardown(pbm, msi_num);
202 	if (err) {
203 		printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
204 		       "irq %u, gives error %d\n",
205 		       pbm->name, msi_num, irq, err);
206 		return;
207 	}
208 
209 	free_msi(pbm, msi_num);
210 
211 	irq_set_chip(irq, NULL);
212 	irq_free(irq);
213 }
214 
215 static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
216 {
217 	unsigned long size, bits_per_ulong;
218 
219 	bits_per_ulong = sizeof(unsigned long) * 8;
220 	size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
221 	size /= 8;
222 	BUG_ON(size % sizeof(unsigned long));
223 
224 	pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
225 	if (!pbm->msi_bitmap)
226 		return -ENOMEM;
227 
228 	return 0;
229 }
230 
231 static void msi_bitmap_free(struct pci_pbm_info *pbm)
232 {
233 	kfree(pbm->msi_bitmap);
234 	pbm->msi_bitmap = NULL;
235 }
236 
237 static int msi_table_alloc(struct pci_pbm_info *pbm)
238 {
239 	int size, i;
240 
241 	size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie);
242 	pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL);
243 	if (!pbm->msiq_irq_cookies)
244 		return -ENOMEM;
245 
246 	for (i = 0; i < pbm->msiq_num; i++) {
247 		struct sparc64_msiq_cookie *p;
248 
249 		p = &pbm->msiq_irq_cookies[i];
250 		p->pbm = pbm;
251 		p->msiqid = pbm->msiq_first + i;
252 	}
253 
254 	size = pbm->msi_num * sizeof(unsigned int);
255 	pbm->msi_irq_table = kzalloc(size, GFP_KERNEL);
256 	if (!pbm->msi_irq_table) {
257 		kfree(pbm->msiq_irq_cookies);
258 		pbm->msiq_irq_cookies = NULL;
259 		return -ENOMEM;
260 	}
261 
262 	return 0;
263 }
264 
265 static void msi_table_free(struct pci_pbm_info *pbm)
266 {
267 	kfree(pbm->msiq_irq_cookies);
268 	pbm->msiq_irq_cookies = NULL;
269 
270 	kfree(pbm->msi_irq_table);
271 	pbm->msi_irq_table = NULL;
272 }
273 
274 static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
275 				 const struct sparc64_msiq_ops *ops,
276 				 unsigned long msiqid,
277 				 unsigned long devino)
278 {
279 	int irq = ops->msiq_build_irq(pbm, msiqid, devino);
280 	int err, nid;
281 
282 	if (irq < 0)
283 		return irq;
284 
285 	nid = pbm->numa_node;
286 	if (nid != -1) {
287 		cpumask_t numa_mask;
288 
289 		cpumask_copy(&numa_mask, cpumask_of_node(nid));
290 		irq_set_affinity(irq, &numa_mask);
291 	}
292 	err = request_irq(irq, sparc64_msiq_interrupt, 0,
293 			  "MSIQ",
294 			  &pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]);
295 	if (err)
296 		return err;
297 
298 	return 0;
299 }
300 
301 static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm,
302 				      const struct sparc64_msiq_ops *ops)
303 {
304 	int i;
305 
306 	for (i = 0; i < pbm->msiq_num; i++) {
307 		unsigned long msiqid = i + pbm->msiq_first;
308 		unsigned long devino = i + pbm->msiq_first_devino;
309 		int err;
310 
311 		err = bringup_one_msi_queue(pbm, ops, msiqid, devino);
312 		if (err)
313 			return err;
314 	}
315 
316 	return 0;
317 }
318 
319 void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
320 			  const struct sparc64_msiq_ops *ops)
321 {
322 	const u32 *val;
323 	int len;
324 
325 	val = of_get_property(pbm->op->dev.of_node, "#msi-eqs", &len);
326 	if (!val || len != 4)
327 		goto no_msi;
328 	pbm->msiq_num = *val;
329 	if (pbm->msiq_num) {
330 		const struct msiq_prop {
331 			u32 first_msiq;
332 			u32 num_msiq;
333 			u32 first_devino;
334 		} *mqp;
335 		const struct msi_range_prop {
336 			u32 first_msi;
337 			u32 num_msi;
338 		} *mrng;
339 		const struct addr_range_prop {
340 			u32 msi32_high;
341 			u32 msi32_low;
342 			u32 msi32_len;
343 			u32 msi64_high;
344 			u32 msi64_low;
345 			u32 msi64_len;
346 		} *arng;
347 
348 		val = of_get_property(pbm->op->dev.of_node, "msi-eq-size", &len);
349 		if (!val || len != 4)
350 			goto no_msi;
351 
352 		pbm->msiq_ent_count = *val;
353 
354 		mqp = of_get_property(pbm->op->dev.of_node,
355 				      "msi-eq-to-devino", &len);
356 		if (!mqp)
357 			mqp = of_get_property(pbm->op->dev.of_node,
358 					      "msi-eq-devino", &len);
359 		if (!mqp || len != sizeof(struct msiq_prop))
360 			goto no_msi;
361 
362 		pbm->msiq_first = mqp->first_msiq;
363 		pbm->msiq_first_devino = mqp->first_devino;
364 
365 		val = of_get_property(pbm->op->dev.of_node, "#msi", &len);
366 		if (!val || len != 4)
367 			goto no_msi;
368 		pbm->msi_num = *val;
369 
370 		mrng = of_get_property(pbm->op->dev.of_node, "msi-ranges", &len);
371 		if (!mrng || len != sizeof(struct msi_range_prop))
372 			goto no_msi;
373 		pbm->msi_first = mrng->first_msi;
374 
375 		val = of_get_property(pbm->op->dev.of_node, "msi-data-mask", &len);
376 		if (!val || len != 4)
377 			goto no_msi;
378 		pbm->msi_data_mask = *val;
379 
380 		val = of_get_property(pbm->op->dev.of_node, "msix-data-width", &len);
381 		if (!val || len != 4)
382 			goto no_msi;
383 		pbm->msix_data_width = *val;
384 
385 		arng = of_get_property(pbm->op->dev.of_node, "msi-address-ranges",
386 				       &len);
387 		if (!arng || len != sizeof(struct addr_range_prop))
388 			goto no_msi;
389 		pbm->msi32_start = ((u64)arng->msi32_high << 32) |
390 			(u64) arng->msi32_low;
391 		pbm->msi64_start = ((u64)arng->msi64_high << 32) |
392 			(u64) arng->msi64_low;
393 		pbm->msi32_len = arng->msi32_len;
394 		pbm->msi64_len = arng->msi64_len;
395 
396 		if (msi_bitmap_alloc(pbm))
397 			goto no_msi;
398 
399 		if (msi_table_alloc(pbm)) {
400 			msi_bitmap_free(pbm);
401 			goto no_msi;
402 		}
403 
404 		if (ops->msiq_alloc(pbm)) {
405 			msi_table_free(pbm);
406 			msi_bitmap_free(pbm);
407 			goto no_msi;
408 		}
409 
410 		if (sparc64_bringup_msi_queues(pbm, ops)) {
411 			ops->msiq_free(pbm);
412 			msi_table_free(pbm);
413 			msi_bitmap_free(pbm);
414 			goto no_msi;
415 		}
416 
417 		printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
418 		       "devino[0x%x]\n",
419 		       pbm->name,
420 		       pbm->msiq_first, pbm->msiq_num,
421 		       pbm->msiq_ent_count,
422 		       pbm->msiq_first_devino);
423 		printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
424 		       "width[%u]\n",
425 		       pbm->name,
426 		       pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
427 		       pbm->msix_data_width);
428 		printk(KERN_INFO "%s: MSI addr32[0x%llx:0x%x] "
429 		       "addr64[0x%llx:0x%x]\n",
430 		       pbm->name,
431 		       pbm->msi32_start, pbm->msi32_len,
432 		       pbm->msi64_start, pbm->msi64_len);
433 		printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
434 		       pbm->name,
435 		       __pa(pbm->msi_queues));
436 
437 		pbm->msi_ops = ops;
438 		pbm->setup_msi_irq = sparc64_setup_msi_irq;
439 		pbm->teardown_msi_irq = sparc64_teardown_msi_irq;
440 	}
441 	return;
442 
443 no_msi:
444 	pbm->msiq_num = 0;
445 	printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
446 }
447