1 /* pci_msi.c: Sparc64 MSI support common layer. 2 * 3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 4 */ 5 #include <linux/kernel.h> 6 #include <linux/interrupt.h> 7 #include <linux/irq.h> 8 9 #include "pci_impl.h" 10 11 static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie) 12 { 13 struct sparc64_msiq_cookie *msiq_cookie = cookie; 14 struct pci_pbm_info *pbm = msiq_cookie->pbm; 15 unsigned long msiqid = msiq_cookie->msiqid; 16 const struct sparc64_msiq_ops *ops; 17 unsigned long orig_head, head; 18 int err; 19 20 ops = pbm->msi_ops; 21 22 err = ops->get_head(pbm, msiqid, &head); 23 if (unlikely(err < 0)) 24 goto err_get_head; 25 26 orig_head = head; 27 for (;;) { 28 unsigned long msi; 29 30 err = ops->dequeue_msi(pbm, msiqid, &head, &msi); 31 if (likely(err > 0)) { 32 struct irq_desc *desc; 33 unsigned int virt_irq; 34 35 virt_irq = pbm->msi_irq_table[msi - pbm->msi_first]; 36 desc = irq_desc + virt_irq; 37 38 desc->handle_irq(virt_irq, desc); 39 } 40 41 if (unlikely(err < 0)) 42 goto err_dequeue; 43 44 if (err == 0) 45 break; 46 } 47 if (likely(head != orig_head)) { 48 err = ops->set_head(pbm, msiqid, head); 49 if (unlikely(err < 0)) 50 goto err_set_head; 51 } 52 return IRQ_HANDLED; 53 54 err_get_head: 55 printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n", 56 msiqid, err); 57 goto err_out; 58 59 err_dequeue: 60 printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] " 61 "gives error %d\n", 62 head, msiqid, err); 63 goto err_out; 64 65 err_set_head: 66 printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] " 67 "gives error %d\n", 68 head, msiqid, err); 69 goto err_out; 70 71 err_out: 72 return IRQ_NONE; 73 } 74 75 static u32 pick_msiq(struct pci_pbm_info *pbm) 76 { 77 static DEFINE_SPINLOCK(rotor_lock); 78 unsigned long flags; 79 u32 ret, rotor; 80 81 spin_lock_irqsave(&rotor_lock, flags); 82 83 rotor = pbm->msiq_rotor; 84 ret = pbm->msiq_first + rotor; 85 86 if (++rotor >= pbm->msiq_num) 87 rotor = 0; 88 pbm->msiq_rotor = rotor; 89 90 spin_unlock_irqrestore(&rotor_lock, flags); 91 92 return ret; 93 } 94 95 96 static int alloc_msi(struct pci_pbm_info *pbm) 97 { 98 int i; 99 100 for (i = 0; i < pbm->msi_num; i++) { 101 if (!test_and_set_bit(i, pbm->msi_bitmap)) 102 return i + pbm->msi_first; 103 } 104 105 return -ENOENT; 106 } 107 108 static void free_msi(struct pci_pbm_info *pbm, int msi_num) 109 { 110 msi_num -= pbm->msi_first; 111 clear_bit(msi_num, pbm->msi_bitmap); 112 } 113 114 static struct irq_chip msi_irq = { 115 .typename = "PCI-MSI", 116 .mask = mask_msi_irq, 117 .unmask = unmask_msi_irq, 118 .enable = unmask_msi_irq, 119 .disable = mask_msi_irq, 120 /* XXX affinity XXX */ 121 }; 122 123 static int sparc64_setup_msi_irq(unsigned int *virt_irq_p, 124 struct pci_dev *pdev, 125 struct msi_desc *entry) 126 { 127 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 128 const struct sparc64_msiq_ops *ops = pbm->msi_ops; 129 struct msi_msg msg; 130 int msi, err; 131 u32 msiqid; 132 133 *virt_irq_p = virt_irq_alloc(0, 0); 134 err = -ENOMEM; 135 if (!*virt_irq_p) 136 goto out_err; 137 138 set_irq_chip_and_handler_name(*virt_irq_p, &msi_irq, 139 handle_simple_irq, "MSI"); 140 141 err = alloc_msi(pbm); 142 if (unlikely(err < 0)) 143 goto out_virt_irq_free; 144 145 msi = err; 146 147 msiqid = pick_msiq(pbm); 148 149 err = ops->msi_setup(pbm, msiqid, msi, 150 (entry->msi_attrib.is_64 ? 1 : 0)); 151 if (err) 152 goto out_msi_free; 153 154 pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p; 155 156 if (entry->msi_attrib.is_64) { 157 msg.address_hi = pbm->msi64_start >> 32; 158 msg.address_lo = pbm->msi64_start & 0xffffffff; 159 } else { 160 msg.address_hi = 0; 161 msg.address_lo = pbm->msi32_start; 162 } 163 msg.data = msi; 164 165 set_irq_msi(*virt_irq_p, entry); 166 write_msi_msg(*virt_irq_p, &msg); 167 168 return 0; 169 170 out_msi_free: 171 free_msi(pbm, msi); 172 173 out_virt_irq_free: 174 set_irq_chip(*virt_irq_p, NULL); 175 virt_irq_free(*virt_irq_p); 176 *virt_irq_p = 0; 177 178 out_err: 179 return err; 180 } 181 182 static void sparc64_teardown_msi_irq(unsigned int virt_irq, 183 struct pci_dev *pdev) 184 { 185 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 186 const struct sparc64_msiq_ops *ops = pbm->msi_ops; 187 unsigned int msi_num; 188 int i, err; 189 190 for (i = 0; i < pbm->msi_num; i++) { 191 if (pbm->msi_irq_table[i] == virt_irq) 192 break; 193 } 194 if (i >= pbm->msi_num) { 195 printk(KERN_ERR "%s: teardown: No MSI for irq %u\n", 196 pbm->name, virt_irq); 197 return; 198 } 199 200 msi_num = pbm->msi_first + i; 201 pbm->msi_irq_table[i] = ~0U; 202 203 err = ops->msi_teardown(pbm, msi_num); 204 if (err) { 205 printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, " 206 "irq %u, gives error %d\n", 207 pbm->name, msi_num, virt_irq, err); 208 return; 209 } 210 211 free_msi(pbm, msi_num); 212 213 set_irq_chip(virt_irq, NULL); 214 virt_irq_free(virt_irq); 215 } 216 217 static int msi_bitmap_alloc(struct pci_pbm_info *pbm) 218 { 219 unsigned long size, bits_per_ulong; 220 221 bits_per_ulong = sizeof(unsigned long) * 8; 222 size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1); 223 size /= 8; 224 BUG_ON(size % sizeof(unsigned long)); 225 226 pbm->msi_bitmap = kzalloc(size, GFP_KERNEL); 227 if (!pbm->msi_bitmap) 228 return -ENOMEM; 229 230 return 0; 231 } 232 233 static void msi_bitmap_free(struct pci_pbm_info *pbm) 234 { 235 kfree(pbm->msi_bitmap); 236 pbm->msi_bitmap = NULL; 237 } 238 239 static int msi_table_alloc(struct pci_pbm_info *pbm) 240 { 241 int size, i; 242 243 size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie); 244 pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL); 245 if (!pbm->msiq_irq_cookies) 246 return -ENOMEM; 247 248 for (i = 0; i < pbm->msiq_num; i++) { 249 struct sparc64_msiq_cookie *p; 250 251 p = &pbm->msiq_irq_cookies[i]; 252 p->pbm = pbm; 253 p->msiqid = pbm->msiq_first + i; 254 } 255 256 size = pbm->msi_num * sizeof(unsigned int); 257 pbm->msi_irq_table = kzalloc(size, GFP_KERNEL); 258 if (!pbm->msi_irq_table) { 259 kfree(pbm->msiq_irq_cookies); 260 pbm->msiq_irq_cookies = NULL; 261 return -ENOMEM; 262 } 263 264 return 0; 265 } 266 267 static void msi_table_free(struct pci_pbm_info *pbm) 268 { 269 kfree(pbm->msiq_irq_cookies); 270 pbm->msiq_irq_cookies = NULL; 271 272 kfree(pbm->msi_irq_table); 273 pbm->msi_irq_table = NULL; 274 } 275 276 static int bringup_one_msi_queue(struct pci_pbm_info *pbm, 277 const struct sparc64_msiq_ops *ops, 278 unsigned long msiqid, 279 unsigned long devino) 280 { 281 int irq = ops->msiq_build_irq(pbm, msiqid, devino); 282 int err, nid; 283 284 if (irq < 0) 285 return irq; 286 287 nid = pbm->numa_node; 288 if (nid != -1) { 289 cpumask_t numa_mask = *cpumask_of_node(nid); 290 291 irq_set_affinity(irq, &numa_mask); 292 } 293 err = request_irq(irq, sparc64_msiq_interrupt, 0, 294 "MSIQ", 295 &pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]); 296 if (err) 297 return err; 298 299 return 0; 300 } 301 302 static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm, 303 const struct sparc64_msiq_ops *ops) 304 { 305 int i; 306 307 for (i = 0; i < pbm->msiq_num; i++) { 308 unsigned long msiqid = i + pbm->msiq_first; 309 unsigned long devino = i + pbm->msiq_first_devino; 310 int err; 311 312 err = bringup_one_msi_queue(pbm, ops, msiqid, devino); 313 if (err) 314 return err; 315 } 316 317 return 0; 318 } 319 320 void sparc64_pbm_msi_init(struct pci_pbm_info *pbm, 321 const struct sparc64_msiq_ops *ops) 322 { 323 const u32 *val; 324 int len; 325 326 val = of_get_property(pbm->op->node, "#msi-eqs", &len); 327 if (!val || len != 4) 328 goto no_msi; 329 pbm->msiq_num = *val; 330 if (pbm->msiq_num) { 331 const struct msiq_prop { 332 u32 first_msiq; 333 u32 num_msiq; 334 u32 first_devino; 335 } *mqp; 336 const struct msi_range_prop { 337 u32 first_msi; 338 u32 num_msi; 339 } *mrng; 340 const struct addr_range_prop { 341 u32 msi32_high; 342 u32 msi32_low; 343 u32 msi32_len; 344 u32 msi64_high; 345 u32 msi64_low; 346 u32 msi64_len; 347 } *arng; 348 349 val = of_get_property(pbm->op->node, "msi-eq-size", &len); 350 if (!val || len != 4) 351 goto no_msi; 352 353 pbm->msiq_ent_count = *val; 354 355 mqp = of_get_property(pbm->op->node, 356 "msi-eq-to-devino", &len); 357 if (!mqp) 358 mqp = of_get_property(pbm->op->node, 359 "msi-eq-devino", &len); 360 if (!mqp || len != sizeof(struct msiq_prop)) 361 goto no_msi; 362 363 pbm->msiq_first = mqp->first_msiq; 364 pbm->msiq_first_devino = mqp->first_devino; 365 366 val = of_get_property(pbm->op->node, "#msi", &len); 367 if (!val || len != 4) 368 goto no_msi; 369 pbm->msi_num = *val; 370 371 mrng = of_get_property(pbm->op->node, "msi-ranges", &len); 372 if (!mrng || len != sizeof(struct msi_range_prop)) 373 goto no_msi; 374 pbm->msi_first = mrng->first_msi; 375 376 val = of_get_property(pbm->op->node, "msi-data-mask", &len); 377 if (!val || len != 4) 378 goto no_msi; 379 pbm->msi_data_mask = *val; 380 381 val = of_get_property(pbm->op->node, "msix-data-width", &len); 382 if (!val || len != 4) 383 goto no_msi; 384 pbm->msix_data_width = *val; 385 386 arng = of_get_property(pbm->op->node, "msi-address-ranges", 387 &len); 388 if (!arng || len != sizeof(struct addr_range_prop)) 389 goto no_msi; 390 pbm->msi32_start = ((u64)arng->msi32_high << 32) | 391 (u64) arng->msi32_low; 392 pbm->msi64_start = ((u64)arng->msi64_high << 32) | 393 (u64) arng->msi64_low; 394 pbm->msi32_len = arng->msi32_len; 395 pbm->msi64_len = arng->msi64_len; 396 397 if (msi_bitmap_alloc(pbm)) 398 goto no_msi; 399 400 if (msi_table_alloc(pbm)) { 401 msi_bitmap_free(pbm); 402 goto no_msi; 403 } 404 405 if (ops->msiq_alloc(pbm)) { 406 msi_table_free(pbm); 407 msi_bitmap_free(pbm); 408 goto no_msi; 409 } 410 411 if (sparc64_bringup_msi_queues(pbm, ops)) { 412 ops->msiq_free(pbm); 413 msi_table_free(pbm); 414 msi_bitmap_free(pbm); 415 goto no_msi; 416 } 417 418 printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] " 419 "devino[0x%x]\n", 420 pbm->name, 421 pbm->msiq_first, pbm->msiq_num, 422 pbm->msiq_ent_count, 423 pbm->msiq_first_devino); 424 printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] " 425 "width[%u]\n", 426 pbm->name, 427 pbm->msi_first, pbm->msi_num, pbm->msi_data_mask, 428 pbm->msix_data_width); 429 printk(KERN_INFO "%s: MSI addr32[0x%llx:0x%x] " 430 "addr64[0x%llx:0x%x]\n", 431 pbm->name, 432 pbm->msi32_start, pbm->msi32_len, 433 pbm->msi64_start, pbm->msi64_len); 434 printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n", 435 pbm->name, 436 __pa(pbm->msi_queues)); 437 438 pbm->msi_ops = ops; 439 pbm->setup_msi_irq = sparc64_setup_msi_irq; 440 pbm->teardown_msi_irq = sparc64_teardown_msi_irq; 441 } 442 return; 443 444 no_msi: 445 pbm->msiq_num = 0; 446 printk(KERN_INFO "%s: No MSI support.\n", pbm->name); 447 } 448