Lines Matching refs:dd

61 void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)  in qib_disarm_piobufs()  argument
68 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_disarm_piobufs()
70 __clear_bit(i, dd->pio_need_disarm); in qib_disarm_piobufs()
71 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs()
73 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_disarm_piobufs()
82 struct qib_devdata *dd = rcd->dd; in qib_disarm_piobufs_ifneeded() local
102 spin_lock_irq(&dd->pioavail_lock); in qib_disarm_piobufs_ifneeded()
104 if (__test_and_clear_bit(i, dd->pio_need_disarm)) in qib_disarm_piobufs_ifneeded()
105 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_ifneeded()
107 spin_unlock_irq(&dd->pioavail_lock); in qib_disarm_piobufs_ifneeded()
111 static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i) in is_sdma_buf() argument
116 for (pidx = 0; pidx < dd->num_pports; pidx++) { in is_sdma_buf()
117 ppd = dd->pport + pidx; in is_sdma_buf()
129 static int find_ctxt(struct qib_devdata *dd, unsigned bufn) in find_ctxt() argument
135 spin_lock(&dd->uctxt_lock); in find_ctxt()
136 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in find_ctxt()
137 rcd = dd->rcd[ctxt]; in find_ctxt()
156 spin_unlock(&dd->uctxt_lock); in find_ctxt()
168 void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, in qib_disarm_piobufs_set() argument
175 for (i = 0; i < dd->num_pports; i++) in qib_disarm_piobufs_set()
185 ppd = is_sdma_buf(dd, i); in qib_disarm_piobufs_set()
194 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_disarm_piobufs_set()
195 if (test_bit(i, dd->pio_writing) || in qib_disarm_piobufs_set()
196 (!test_bit(i << 1, dd->pioavailkernel) && in qib_disarm_piobufs_set()
197 find_ctxt(dd, i))) { in qib_disarm_piobufs_set()
198 __set_bit(i, dd->pio_need_disarm); in qib_disarm_piobufs_set()
200 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_set()
202 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_disarm_piobufs_set()
206 for (i = 0; i < dd->num_pports; i++) in qib_disarm_piobufs_set()
217 static void update_send_bufs(struct qib_devdata *dd) in update_send_bufs() argument
221 const unsigned piobregs = dd->pioavregs; in update_send_bufs()
241 if (!dd->pioavailregs_dma) in update_send_bufs()
243 spin_lock_irqsave(&dd->pioavail_lock, flags); in update_send_bufs()
247 piov = le64_to_cpu(dd->pioavailregs_dma[i]); in update_send_bufs()
248 pchg = dd->pioavailkernel[i] & in update_send_bufs()
249 ~(dd->pioavailshadow[i] ^ piov); in update_send_bufs()
251 if (pchg && (pchbusy & dd->pioavailshadow[i])) { in update_send_bufs()
252 pnew = dd->pioavailshadow[i] & ~pchbusy; in update_send_bufs()
254 dd->pioavailshadow[i] = pnew; in update_send_bufs()
257 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in update_send_bufs()
263 static noinline void no_send_bufs(struct qib_devdata *dd) in no_send_bufs() argument
265 dd->upd_pio_shadow = 1; in no_send_bufs()
278 u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum, in qib_getsendbuf_range() argument
284 unsigned long *shadow = dd->pioavailshadow; in qib_getsendbuf_range()
287 if (!(dd->flags & QIB_PRESENT)) in qib_getsendbuf_range()
291 if (dd->upd_pio_shadow) { in qib_getsendbuf_range()
298 update_send_bufs(dd); in qib_getsendbuf_range()
307 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_getsendbuf_range()
308 if (dd->last_pio >= first && dd->last_pio <= last) in qib_getsendbuf_range()
309 i = dd->last_pio + 1; in qib_getsendbuf_range()
312 nbufs = last - dd->min_kernel_pio + 1; in qib_getsendbuf_range()
315 i = !first ? dd->min_kernel_pio : first; in qib_getsendbuf_range()
321 __set_bit(i, dd->pio_writing); in qib_getsendbuf_range()
323 dd->last_pio = i; in qib_getsendbuf_range()
326 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_getsendbuf_range()
335 no_send_bufs(dd); in qib_getsendbuf_range()
338 if (i < dd->piobcnt2k) in qib_getsendbuf_range()
339 buf = (u32 __iomem *)(dd->pio2kbase + in qib_getsendbuf_range()
340 i * dd->palign); in qib_getsendbuf_range()
341 else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base) in qib_getsendbuf_range()
342 buf = (u32 __iomem *)(dd->pio4kbase + in qib_getsendbuf_range()
343 (i - dd->piobcnt2k) * dd->align4k); in qib_getsendbuf_range()
345 buf = (u32 __iomem *)(dd->piovl15base + in qib_getsendbuf_range()
346 (i - (dd->piobcnt2k + dd->piobcnt4k)) * in qib_getsendbuf_range()
347 dd->align4k); in qib_getsendbuf_range()
350 dd->upd_pio_shadow = 0; in qib_getsendbuf_range()
360 void qib_sendbuf_done(struct qib_devdata *dd, unsigned n) in qib_sendbuf_done() argument
364 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_sendbuf_done()
365 __clear_bit(n, dd->pio_writing); in qib_sendbuf_done()
366 if (__test_and_clear_bit(n, dd->pio_need_disarm)) in qib_sendbuf_done()
367 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n)); in qib_sendbuf_done()
368 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_sendbuf_done()
379 void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start, in qib_chg_pioavailkernel() argument
390 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_chg_pioavailkernel()
412 dd->pioavailshadow); in qib_chg_pioavailkernel()
414 le64_to_cpu(dd->pioavailregs_dma[i]); in qib_chg_pioavailkernel()
418 start, dd->pioavailshadow); in qib_chg_pioavailkernel()
421 + start, dd->pioavailshadow); in qib_chg_pioavailkernel()
422 __set_bit(start, dd->pioavailkernel); in qib_chg_pioavailkernel()
423 if ((start >> 1) < dd->min_kernel_pio) in qib_chg_pioavailkernel()
424 dd->min_kernel_pio = start >> 1; in qib_chg_pioavailkernel()
427 dd->pioavailshadow); in qib_chg_pioavailkernel()
428 __clear_bit(start, dd->pioavailkernel); in qib_chg_pioavailkernel()
429 if ((start >> 1) > dd->min_kernel_pio) in qib_chg_pioavailkernel()
430 dd->min_kernel_pio = start >> 1; in qib_chg_pioavailkernel()
435 if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1) in qib_chg_pioavailkernel()
436 dd->last_pio = dd->min_kernel_pio - 1; in qib_chg_pioavailkernel()
437 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_chg_pioavailkernel()
439 dd->f_txchk_change(dd, ostart, len, avail, rcd); in qib_chg_pioavailkernel()
453 struct qib_devdata *dd = ppd->dd; in qib_cancel_sends() local
468 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in qib_cancel_sends()
469 spin_lock_irqsave(&dd->uctxt_lock, flags); in qib_cancel_sends()
470 rcd = dd->rcd[ctxt]; in qib_cancel_sends()
486 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_cancel_sends()
487 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_cancel_sends()
489 __set_bit(i, dd->pio_need_disarm); in qib_cancel_sends()
490 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_cancel_sends()
492 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_cancel_sends()
495 if (!(dd->flags & QIB_HAS_SEND_DMA)) in qib_cancel_sends()
496 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL | in qib_cancel_sends()
507 void qib_force_pio_avail_update(struct qib_devdata *dd) in qib_force_pio_avail_update() argument
509 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_force_pio_avail_update()
554 if (!(ppd->dd->flags & QIB_INITTED)) in qib_hol_event()