Lines Matching refs:dd

16 int msix_initialize(struct hfi1_devdata *dd)  in msix_initialize()  argument
30 total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_netdev_contexts; in msix_initialize()
35 ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX); in msix_initialize()
37 dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret); in msix_initialize()
41 entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries), in msix_initialize()
44 pci_free_irq_vectors(dd->pcidev); in msix_initialize()
48 dd->msix_info.msix_entries = entries; in msix_initialize()
49 spin_lock_init(&dd->msix_info.msix_lock); in msix_initialize()
50 bitmap_zero(dd->msix_info.in_use_msix, total); in msix_initialize()
51 dd->msix_info.max_requested = total; in msix_initialize()
52 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total); in msix_initialize()
74 static int msix_request_irq(struct hfi1_devdata *dd, void *arg, in msix_request_irq() argument
84 spin_lock(&dd->msix_info.msix_lock); in msix_request_irq()
85 nr = find_first_zero_bit(dd->msix_info.in_use_msix, in msix_request_irq()
86 dd->msix_info.max_requested); in msix_request_irq()
87 if (nr < dd->msix_info.max_requested) in msix_request_irq()
88 __set_bit(nr, dd->msix_info.in_use_msix); in msix_request_irq()
89 spin_unlock(&dd->msix_info.msix_lock); in msix_request_irq()
91 if (nr == dd->msix_info.max_requested) in msix_request_irq()
97 irq = pci_irq_vector(dd->pcidev, nr); in msix_request_irq()
98 ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name); in msix_request_irq()
100 dd_dev_err(dd, in msix_request_irq()
103 spin_lock(&dd->msix_info.msix_lock); in msix_request_irq()
104 __clear_bit(nr, dd->msix_info.in_use_msix); in msix_request_irq()
105 spin_unlock(&dd->msix_info.msix_lock); in msix_request_irq()
113 me = &dd->msix_info.msix_entries[nr]; in msix_request_irq()
119 ret = hfi1_get_irq_affinity(dd, me); in msix_request_irq()
121 dd_dev_err(dd, "%s: unable to pin IRQ %d\n", name, ret); in msix_request_irq()
131 int nr = msix_request_irq(rcd->dd, rcd, handler, thread, in msix_request_rcd_irq_common()
144 remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr); in msix_request_rcd_irq_common()
159 rcd->dd->unit, rcd->ctxt); in msix_request_rcd_irq()
175 rcd->dd->unit, rcd->ctxt); in msix_netdev_request_rcd_irq()
191 sde->dd->unit, sde->this_idx); in msix_request_sdma_irq()
192 nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL, in msix_request_sdma_irq()
197 remap_sdma_interrupts(sde->dd, sde->this_idx, nr); in msix_request_sdma_irq()
207 int msix_request_general_irq(struct hfi1_devdata *dd) in msix_request_general_irq() argument
212 snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit); in msix_request_general_irq()
213 nr = msix_request_irq(dd, dd, general_interrupt, NULL, IRQ_GENERAL, in msix_request_general_irq()
220 msix_free_irq(dd, (u8)nr); in msix_request_general_irq()
221 dd_dev_err(dd, "Invalid index %d for GENERAL IRQ\n", nr); in msix_request_general_irq()
233 static void enable_sdma_srcs(struct hfi1_devdata *dd, int i) in enable_sdma_srcs() argument
235 set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true); in enable_sdma_srcs()
236 set_intr_bits(dd, IS_SDMA_PROGRESS_START + i, in enable_sdma_srcs()
238 set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true); in enable_sdma_srcs()
239 set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i, in enable_sdma_srcs()
250 int msix_request_irqs(struct hfi1_devdata *dd) in msix_request_irqs() argument
253 int ret = msix_request_general_irq(dd); in msix_request_irqs()
258 for (i = 0; i < dd->num_sdma; i++) { in msix_request_irqs()
259 struct sdma_engine *sde = &dd->per_sdma[i]; in msix_request_irqs()
264 enable_sdma_srcs(sde->dd, i); in msix_request_irqs()
267 for (i = 0; i < dd->n_krcv_queues; i++) { in msix_request_irqs()
268 struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i); in msix_request_irqs()
286 void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr) in msix_free_irq() argument
290 if (msix_intr >= dd->msix_info.max_requested) in msix_free_irq()
293 me = &dd->msix_info.msix_entries[msix_intr]; in msix_free_irq()
298 hfi1_put_irq_affinity(dd, me); in msix_free_irq()
299 pci_free_irq(dd->pcidev, msix_intr, me->arg); in msix_free_irq()
303 spin_lock(&dd->msix_info.msix_lock); in msix_free_irq()
304 __clear_bit(msix_intr, dd->msix_info.in_use_msix); in msix_free_irq()
305 spin_unlock(&dd->msix_info.msix_lock); in msix_free_irq()
314 void msix_clean_up_interrupts(struct hfi1_devdata *dd) in msix_clean_up_interrupts() argument
317 struct hfi1_msix_entry *me = dd->msix_info.msix_entries; in msix_clean_up_interrupts()
320 for (i = 0; i < dd->msix_info.max_requested; i++, me++) in msix_clean_up_interrupts()
321 msix_free_irq(dd, i); in msix_clean_up_interrupts()
324 kfree(dd->msix_info.msix_entries); in msix_clean_up_interrupts()
325 dd->msix_info.msix_entries = NULL; in msix_clean_up_interrupts()
326 dd->msix_info.max_requested = 0; in msix_clean_up_interrupts()
328 pci_free_irq_vectors(dd->pcidev); in msix_clean_up_interrupts()
335 void msix_netdev_synchronize_irq(struct hfi1_devdata *dd) in msix_netdev_synchronize_irq() argument
338 int ctxt_count = hfi1_netdev_ctxt_count(dd); in msix_netdev_synchronize_irq()
341 struct hfi1_ctxtdata *rcd = hfi1_netdev_get_ctxt(dd, i); in msix_netdev_synchronize_irq()
344 me = &dd->msix_info.msix_entries[rcd->msix_intr]; in msix_netdev_synchronize_irq()