Lines Matching +full:wake +full:- +full:on +full:- +full:motion
2 * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved.
3 * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
15 * - Redistributions of source code must retain the above
19 * - Redistributions in binary form must reproduce the above
88 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
95 /* Below is special-purpose, so only really works for the IB SerDes blocks. */
112 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
125 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
140 MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
144 MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
162 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
164 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
172 (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
237 * This file contains almost all the chip-specific register information and
238 * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
241 /* Use defines to tie machine-generated names to lower-case names */
267 #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
295 * per-port kernel registers. Access only with qib_read_kreg_port()
350 * Per-context kernel registers. Access only with qib_read_kreg_ctxt()
358 * number of hdrq updates to one per flow (or on errors).
361 * on kernel contexts, we don't worry about that (we initialize
362 * those entries for ctxt 0/1 on driver load twice, for example).
365 #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
374 /* Most (not all) Counters are per-IBport.
378 ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
424 /* these are the (few) counters that are not port-specific */
425 #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
449 /* values for vl and port fields in PBC, 7322-specific */
617 #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
618 #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
619 #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
620 #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
633 * errors and linkrecovery errors. They can be reported on
635 * DDR when faking DDR negotiations with non-IBTA switches.
637 * a non-zero delta.
657 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
666 char sdmamsgbuf[192]; /* for per-port sdma error messages */
673 int port; /* 0 if not port-specific, else port # */
676 { "", qib_7322intr, -1, 0, 0 },
748 /* wait for TS1, then go on */
773 * qib_read_ureg32 - read 32-bit virtualized per-context register
779 * Returns -1 on errors (not distinguishable from valid contents at
785 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_ureg32()
788 (dd->ureg_align * ctxt) + (dd->userbase ? in qib_read_ureg32()
789 (char __iomem *)dd->userbase : in qib_read_ureg32()
790 (char __iomem *)dd->kregbase + dd->uregbase))); in qib_read_ureg32()
794 * qib_write_ureg - write virtualized per-context register
807 if (dd->userbase) in qib_write_ureg()
809 ((char __iomem *) dd->userbase + in qib_write_ureg()
810 dd->ureg_align * ctxt); in qib_write_ureg()
813 (dd->uregbase + in qib_write_ureg()
814 (char __iomem *) dd->kregbase + in qib_write_ureg()
815 dd->ureg_align * ctxt); in qib_write_ureg()
817 if (dd->kregbase && (dd->flags & QIB_PRESENT)) in qib_write_ureg()
824 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_kreg32()
825 return -1; in qib_read_kreg32()
826 return readl((u32 __iomem *) &dd->kregbase[regno]); in qib_read_kreg32()
832 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_kreg64()
833 return -1; in qib_read_kreg64()
834 return readq(&dd->kregbase[regno]); in qib_read_kreg64()
840 if (dd->kregbase && (dd->flags & QIB_PRESENT)) in qib_write_kreg()
841 writeq(value, &dd->kregbase[regno]); in qib_write_kreg()
845 * not many sanity checks for the port-specific kernel register routines,
851 if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT)) in qib_read_kreg_port()
853 return readq(&ppd->cpspec->kpregbase[regno]); in qib_read_kreg_port()
859 if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase && in qib_write_kreg_port()
860 (ppd->dd->flags & QIB_PRESENT)) in qib_write_kreg_port()
861 writeq(value, &ppd->cpspec->kpregbase[regno]); in qib_write_kreg_port()
865 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
880 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) in read_7322_creg()
882 return readq(&dd->cspec->cregbase[regno]); in read_7322_creg()
889 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) in read_7322_creg32()
891 return readl(&dd->cspec->cregbase[regno]); in read_7322_creg32()
899 if (ppd->cpspec && ppd->cpspec->cpregbase && in write_7322_creg_port()
900 (ppd->dd->flags & QIB_PRESENT)) in write_7322_creg_port()
901 writeq(value, &ppd->cpspec->cpregbase[regno]); in write_7322_creg_port()
907 if (!ppd->cpspec || !ppd->cpspec->cpregbase || in read_7322_creg_port()
908 !(ppd->dd->flags & QIB_PRESENT)) in read_7322_creg_port()
910 return readq(&ppd->cpspec->cpregbase[regno]); in read_7322_creg_port()
916 if (!ppd->cpspec || !ppd->cpspec->cpregbase || in read_7322_creg32_port()
917 !(ppd->dd->flags & QIB_PRESENT)) in read_7322_creg32_port()
919 return readl(&ppd->cpspec->cpregbase[regno]); in read_7322_creg32_port()
1011 * Per chip (rather than per-port) errors. Most either do
1012 * nothing but trigger a print (because they self-recover, or
1029 * E_AUTO mechanism. This is true of most of the per-port fatal errors
1030 * as well, but since this is port-independent, by definition, it's
1033 * per-packet errors.
1049 /* Error Bits that Packet-related (Receive, per-port) */
1059 * Error bits that are Send-related (per port)
1272 * Below generates "auto-message" for interrupts not specific to any port or
1277 /* Below generates "auto-message" for interrupts specific to a port */
1288 * Below generates "auto-message" for interrupts specific to a context,
1289 * with ctxt-number appended
1314 * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1319 struct qib_devdata *dd = ppd->dd; in qib_disarm_7322_senderrbufs()
1322 u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; in qib_disarm_7322_senderrbufs()
1323 u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG; in qib_disarm_7322_senderrbufs()
1352 while (errs && msp && msp->mask) { in err_decode()
1353 multi = (msp->mask & (msp->mask - 1)); in err_decode()
1354 while (errs & msp->mask) { in err_decode()
1355 these = (errs & msp->mask); in err_decode()
1356 lmask = (these & (these - 1)) ^ these; in err_decode()
1361 len--; in err_decode()
1363 /* msp->sz counts the nul */ in err_decode()
1364 took = min_t(size_t, msp->sz - (size_t)1, len); in err_decode()
1365 memcpy(msg, msp->msg, took); in err_decode()
1366 len -= took; in err_decode()
1374 int idx = -1; in err_decode()
1376 while (lmask & msp->mask) { in err_decode()
1381 len -= took; in err_decode()
1396 struct qib_devdata *dd = ppd->dd; in flush_fifo()
1420 (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) | in flush_fifo()
1427 if (dd->flags & QIB_PIO_FLUSH_WC) { in flush_fifo()
1429 qib_pio_copy(piobuf + 2, hdr, hdrwords - 1); in flush_fifo()
1431 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1); in flush_fifo()
1443 struct qib_devdata *dd = ppd->dd; in qib_7322_sdma_sendctrl()
1471 spin_lock(&dd->sendctrl_lock); in qib_7322_sdma_sendctrl()
1475 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable); in qib_7322_sdma_sendctrl()
1476 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); in qib_7322_sdma_sendctrl()
1480 ppd->p_sendctrl |= set_sendctrl; in qib_7322_sdma_sendctrl()
1481 ppd->p_sendctrl &= ~clr_sendctrl; in qib_7322_sdma_sendctrl()
1485 ppd->p_sendctrl | in qib_7322_sdma_sendctrl()
1488 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); in qib_7322_sdma_sendctrl()
1492 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable); in qib_7322_sdma_sendctrl()
1493 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); in qib_7322_sdma_sendctrl()
1497 spin_unlock(&dd->sendctrl_lock); in qib_7322_sdma_sendctrl()
1499 if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1) in qib_7322_sdma_sendctrl()
1515 qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt); in qib_sdma_7322_setlengen()
1517 ppd->sdma_descq_cnt | in qib_sdma_7322_setlengen()
1526 /* Commit writes to memory and advance the tail on the chip */ in qib_sdma_update_7322_tail()
1528 ppd->sdma_descq_tail = tail; in qib_sdma_update_7322_tail()
1547 ppd->sdma_head_dma[0] = 0; in qib_7322_sdma_hw_start_up()
1549 ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP); in qib_7322_sdma_hw_start_up()
1567 struct qib_devdata *dd = ppd->dd; in sdma_7322_p_errors()
1570 err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf), in sdma_7322_p_errors()
1574 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, in sdma_7322_p_errors()
1575 ppd->port); in sdma_7322_p_errors()
1577 spin_lock_irqsave(&ppd->sdma_lock, flags); in sdma_7322_p_errors()
1581 qib_dev_porterr(dd, ppd->port, in sdma_7322_p_errors()
1583 qib_sdma_state_names[ppd->sdma_state.current_state], in sdma_7322_p_errors()
1584 errs, ppd->cpspec->sdmamsgbuf); in sdma_7322_p_errors()
1588 switch (ppd->sdma_state.current_state) { in sdma_7322_p_errors()
1622 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in sdma_7322_p_errors()
1626 * handle per-device errors (not per-port errors)
1638 qib_devinfo(dd->pcidev, in handle_7322_errors()
1644 errs &= dd->cspec->errormask; in handle_7322_errors()
1645 msg = dd->cspec->emsgbuf; in handle_7322_errors()
1650 qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf)); in handle_7322_errors()
1654 qib_disarm_7322_senderrbufs(dd->pport); in handle_7322_errors()
1660 qib_disarm_7322_senderrbufs(dd->pport); in handle_7322_errors()
1672 err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask, in handle_7322_errors()
1683 "Got reset, requires re-init (unload and reload driver)\n"); in handle_7322_errors()
1684 dd->flags &= ~QIB_INITTED; /* needs re-init */ in handle_7322_errors()
1686 *dd->devstatusp |= QIB_STATUS_HWERROR; in handle_7322_errors()
1687 for (pidx = 0; pidx < dd->num_pports; ++pidx) in handle_7322_errors()
1688 if (dd->pport[pidx].link_speed_supported) in handle_7322_errors()
1689 *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF; in handle_7322_errors()
1696 * If there were hdrq or egrfull errors, wake up any processes in handle_7322_errors()
1699 * to support it, it's better to just wake everybody up if we in handle_7322_errors()
1719 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); in qib_error_tasklet()
1725 struct qib_pportdata *ppd = cp->ppd; in reenable_chase()
1727 ppd->cpspec->chase_timer.expires = 0; in reenable_chase()
1735 ppd->cpspec->chase_end = 0; in disable_chase()
1742 ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME; in disable_chase()
1743 add_timer(&ppd->cpspec->chase_timer); in disable_chase()
1755 * get stuck if we are unlucky on timing on both sides of in handle_serdes_issues()
1757 * then re-enable. in handle_serdes_issues()
1765 if (ppd->cpspec->chase_end && in handle_serdes_issues()
1766 time_after(tnow, ppd->cpspec->chase_end)) in handle_serdes_issues()
1768 else if (!ppd->cpspec->chase_end) in handle_serdes_issues()
1769 ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME; in handle_serdes_issues()
1772 ppd->cpspec->chase_end = 0; in handle_serdes_issues()
1781 ppd->cpspec->qdr_reforce = 1; in handle_serdes_issues()
1782 if (!ppd->dd->cspec->r1) in handle_serdes_issues()
1784 } else if (ppd->cpspec->qdr_reforce && in handle_serdes_issues()
1791 if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) && in handle_serdes_issues()
1792 ppd->link_speed_enabled == QIB_IB_QDR && in handle_serdes_issues()
1801 u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, in handle_serdes_issues()
1803 if (!ppd->dd->cspec->r1 && in handle_serdes_issues()
1810 * turn LOS back on */ in handle_serdes_issues()
1812 if (!ppd->cpspec->qdr_dfe_on && in handle_serdes_issues()
1814 ppd->cpspec->qdr_dfe_on = 1; in handle_serdes_issues()
1815 ppd->cpspec->qdr_dfe_time = 0; in handle_serdes_issues()
1816 /* On link down, reenable QDR adaptation */ in handle_serdes_issues()
1818 ppd->dd->cspec->r1 ? in handle_serdes_issues()
1822 "IB%u:%u re-enabled QDR adaptation ibclt %x\n", in handle_serdes_issues()
1823 ppd->dd->unit, ppd->port, ibclt); in handle_serdes_issues()
1831 * This is per-pport error handling.
1839 struct qib_devdata *dd = ppd->dd; in handle_7322_p_errors()
1848 qib_devinfo(dd->pcidev, in handle_7322_p_errors()
1850 ppd->port); in handle_7322_p_errors()
1856 msg = ppd->cpspec->epmsgbuf; in handle_7322_p_errors()
1860 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), in handle_7322_p_errors()
1863 snprintf(msg, sizeof(ppd->cpspec->epmsgbuf), in handle_7322_p_errors()
1865 qib_dev_porterr(dd, ppd->port, in handle_7322_p_errors()
1877 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom, in handle_7322_p_errors()
1885 !(ppd->lflags & QIBL_LINKACTIVE)) { in handle_7322_p_errors()
1893 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), in handle_7322_p_errors()
1901 !(ppd->lflags & QIBL_LINKACTIVE)) { in handle_7322_p_errors()
1909 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs, in handle_7322_p_errors()
1938 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) in handle_7322_p_errors()
1940 if (!(ppd->cpspec->ibcctrl_a & in handle_7322_p_errors()
1945 * other "chatter" from link-negotiation (pre Init) in handle_7322_p_errors()
1947 ppd->cpspec->ibcctrl_a |= in handle_7322_p_errors()
1950 ppd->cpspec->ibcctrl_a); in handle_7322_p_errors()
1954 ppd->link_width_active = in handle_7322_p_errors()
1957 ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0, in handle_7322_p_errors()
1962 if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate != in handle_7322_p_errors()
1982 qib_dev_porterr(dd, ppd->port, "%s error\n", msg); in handle_7322_p_errors()
1984 if (ppd->state_wanted & ppd->lflags) in handle_7322_p_errors()
1985 wake_up_interruptible(&ppd->state_wait); in handle_7322_p_errors()
1994 if (dd->flags & QIB_BADINTR) in qib_7322_set_intr_state()
1996 qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask); in qib_7322_set_intr_state()
1997 /* cause any pending enabled interrupts to be re-delivered */ in qib_7322_set_intr_state()
1999 if (dd->cspec->num_msix_entries) { in qib_7322_set_intr_state()
2015 * Forcibly update the in-memory pioavail register copies after cleanup
2022 * This is in chip-specific code because of all of the register accesses,
2023 * even though the details are similar on most chips.
2032 for (pidx = 0; pidx < dd->num_pports; ++pidx) in qib_7322_clear_freeze()
2033 if (dd->pport[pidx].link_speed_supported) in qib_7322_clear_freeze()
2034 qib_write_kreg_port(dd->pport + pidx, krp_errmask, in qib_7322_clear_freeze()
2041 qib_write_kreg(dd, kr_control, dd->control); in qib_7322_clear_freeze()
2047 * and cancelling sends. Re-enable error interrupts before possible in qib_7322_clear_freeze()
2048 * force of re-interrupt on pending interrupts. in qib_7322_clear_freeze()
2052 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); in qib_7322_clear_freeze()
2053 /* We need to purge per-port errs and reset mask, too */ in qib_7322_clear_freeze()
2054 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_7322_clear_freeze()
2055 if (!dd->pport[pidx].link_speed_supported) in qib_7322_clear_freeze()
2057 qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull); in qib_7322_clear_freeze()
2058 qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull); in qib_7322_clear_freeze()
2065 * qib_7322_handle_hwerrors - display hardware errors.
2096 hwerrs &= dd->cspec->hwerrmask; in qib_7322_handle_hwerrors()
2101 qib_devinfo(dd->pcidev, in qib_7322_handle_hwerrors()
2106 if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) { in qib_7322_handle_hwerrors()
2111 dd->cspec->stay_in_freeze) { in qib_7322_handle_hwerrors()
2119 if (dd->flags & QIB_INITTED) in qib_7322_handle_hwerrors()
2130 /* ignore from now on, so disable until driver reloaded */ in qib_7322_handle_hwerrors()
2131 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); in qib_7322_handle_hwerrors()
2132 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); in qib_7322_handle_hwerrors()
2147 struct qib_pportdata *ppd = dd->pport; in qib_7322_handle_hwerrors()
2149 for (; pidx < dd->num_pports; ++pidx, ppd++) { in qib_7322_handle_hwerrors()
2158 spin_lock_irqsave(&ppd->sdma_lock, flags); in qib_7322_handle_hwerrors()
2160 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_7322_handle_hwerrors()
2165 if (isfatal && !dd->diag_client) { in qib_7322_handle_hwerrors()
2168 dd->serial); in qib_7322_handle_hwerrors()
2173 if (dd->freezemsg) in qib_7322_handle_hwerrors()
2174 snprintf(dd->freezemsg, dd->freezelen, in qib_7322_handle_hwerrors()
2182 * qib_7322_init_hwerrors - enable hardware errors
2201 /* never clear BIST failure, so reported on each driver load */ in qib_7322_init_hwerrors()
2203 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); in qib_7322_init_hwerrors()
2209 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); in qib_7322_init_hwerrors()
2210 for (pidx = 0; pidx < dd->num_pports; ++pidx) in qib_7322_init_hwerrors()
2211 if (dd->pport[pidx].link_speed_supported) in qib_7322_init_hwerrors()
2212 qib_write_kreg_port(dd->pport + pidx, krp_errmask, in qib_7322_init_hwerrors()
2218 * on chips that are count-based, rather than trigger-based. There is no
2220 * Only chip-specific because it's all register accesses
2226 dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH; in qib_set_7322_armlaunch()
2228 dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH; in qib_set_7322_armlaunch()
2229 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); in qib_set_7322_armlaunch()
2233 * Formerly took parameter <which> in pre-shifted,
2234 * pre-merged form with LinkCmd and LinkInitCmd
2241 struct qib_devdata *dd = ppd->dd; in qib_set_ib_7322_lstate()
2246 * If we are told to disable, note that so link-recovery in qib_set_ib_7322_lstate()
2249 * completely clean when re-enabled (before we in qib_set_ib_7322_lstate()
2253 spin_lock_irqsave(&ppd->lflags_lock, flags); in qib_set_ib_7322_lstate()
2254 ppd->lflags |= QIBL_IB_LINK_DISABLED; in qib_set_ib_7322_lstate()
2255 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in qib_set_ib_7322_lstate()
2260 * link-recovery code attempt to bring us back up. in qib_set_ib_7322_lstate()
2262 spin_lock_irqsave(&ppd->lflags_lock, flags); in qib_set_ib_7322_lstate()
2263 ppd->lflags &= ~QIBL_IB_LINK_DISABLED; in qib_set_ib_7322_lstate()
2264 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in qib_set_ib_7322_lstate()
2269 ppd->cpspec->ibcctrl_a &= in qib_set_ib_7322_lstate()
2276 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a | in qib_set_ib_7322_lstate()
2278 /* write to chip to prevent back-to-back writes of ibc reg */ in qib_set_ib_7322_lstate()
2292 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2297 struct qib_devdata *dd = ppd->dd; in set_vls()
2300 numvls = qib_num_vls(ppd->vls_operational); in set_vls()
2303 * Set up per-VL credits. Below is kluge based on these assumptions: in set_vls()
2305 * 2) give VL15 17 credits, for two max-plausible packets. in set_vls()
2306 * 3) Give VL0-N the rest, with any rounding excess used for VL0 in set_vls()
2310 cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ; in set_vls()
2311 totcred -= cred_vl; in set_vls()
2314 vl0extra = totcred - cred_vl * numvls; in set_vls()
2334 ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a & in set_vls()
2336 ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane)); in set_vls()
2337 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); in set_vls()
2348 * qib_7322_bringup_serdes - bring up the serdes
2349 * @ppd: physical port on the qlogic_ib device
2353 struct qib_devdata *dd = ppd->dd; in qib_7322_bringup_serdes()
2363 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn); in qib_7322_bringup_serdes()
2364 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); in qib_7322_bringup_serdes()
2373 ppd->cpspec->ibdeltainprog = 1; in qib_7322_bringup_serdes()
2374 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, in qib_7322_bringup_serdes()
2376 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd, in qib_7322_bringup_serdes()
2396 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << in qib_7322_bringup_serdes()
2398 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */ in qib_7322_bringup_serdes()
2406 if (!ppd->cpspec->ibcctrl_b) { in qib_7322_bringup_serdes()
2407 unsigned lse = ppd->link_speed_enabled; in qib_7322_bringup_serdes()
2410 * Not on re-init after reset, establish shadow in qib_7322_bringup_serdes()
2413 ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd, in qib_7322_bringup_serdes()
2415 ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR | in qib_7322_bringup_serdes()
2420 if (lse & (lse - 1)) /* Muliple speeds enabled */ in qib_7322_bringup_serdes()
2421 ppd->cpspec->ibcctrl_b |= in qib_7322_bringup_serdes()
2426 ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ? in qib_7322_bringup_serdes()
2432 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) == in qib_7322_bringup_serdes()
2434 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG; in qib_7322_bringup_serdes()
2436 ppd->cpspec->ibcctrl_b |= in qib_7322_bringup_serdes()
2437 ppd->link_width_enabled == IB_WIDTH_4X ? in qib_7322_bringup_serdes()
2441 /* always enable these on driver reload, not sticky */ in qib_7322_bringup_serdes()
2442 ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK | in qib_7322_bringup_serdes()
2445 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); in qib_7322_bringup_serdes()
2455 guid = be64_to_cpu(ppd->guid); in qib_7322_bringup_serdes()
2457 if (dd->base_guid) in qib_7322_bringup_serdes()
2458 guid = be64_to_cpu(dd->base_guid) + ppd->port - 1; in qib_7322_bringup_serdes()
2459 ppd->guid = cpu_to_be64(guid); in qib_7322_bringup_serdes()
2463 /* write to chip to prevent back-to-back writes of ibc reg */ in qib_7322_bringup_serdes()
2467 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn); in qib_7322_bringup_serdes()
2471 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << in qib_7322_bringup_serdes()
2476 ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd); in qib_7322_bringup_serdes()
2478 /* be paranoid against later code motion, etc. */ in qib_7322_bringup_serdes()
2479 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); in qib_7322_bringup_serdes()
2480 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable); in qib_7322_bringup_serdes()
2481 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); in qib_7322_bringup_serdes()
2482 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); in qib_7322_bringup_serdes()
2494 * qib_7322_mini_quiet_serdes - set serdes to txidle
2505 spin_lock_irqsave(&ppd->lflags_lock, flags); in qib_7322_mini_quiet_serdes()
2506 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; in qib_7322_mini_quiet_serdes()
2507 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in qib_7322_mini_quiet_serdes()
2508 wake_up(&ppd->cpspec->autoneg_wait); in qib_7322_mini_quiet_serdes()
2509 cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); in qib_7322_mini_quiet_serdes()
2510 if (ppd->dd->cspec->r1) in qib_7322_mini_quiet_serdes()
2511 cancel_delayed_work_sync(&ppd->cpspec->ipg_work); in qib_7322_mini_quiet_serdes()
2513 ppd->cpspec->chase_end = 0; in qib_7322_mini_quiet_serdes()
2514 if (ppd->cpspec->chase_timer.function) /* if initted */ in qib_7322_mini_quiet_serdes()
2515 del_timer_sync(&ppd->cpspec->chase_timer); in qib_7322_mini_quiet_serdes()
2524 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn); in qib_7322_mini_quiet_serdes()
2531 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta || in qib_7322_mini_quiet_serdes()
2532 ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) { in qib_7322_mini_quiet_serdes()
2533 struct qib_devdata *dd = ppd->dd; in qib_7322_mini_quiet_serdes()
2541 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) { in qib_7322_mini_quiet_serdes()
2543 if (ppd->cpspec->ibdeltainprog) in qib_7322_mini_quiet_serdes()
2544 val -= val - ppd->cpspec->ibsymsnap; in qib_7322_mini_quiet_serdes()
2545 val -= ppd->cpspec->ibsymdelta; in qib_7322_mini_quiet_serdes()
2548 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) { in qib_7322_mini_quiet_serdes()
2550 if (ppd->cpspec->ibdeltainprog) in qib_7322_mini_quiet_serdes()
2551 val -= val - ppd->cpspec->iblnkerrsnap; in qib_7322_mini_quiet_serdes()
2552 val -= ppd->cpspec->iblnkerrdelta; in qib_7322_mini_quiet_serdes()
2555 if (ppd->cpspec->iblnkdowndelta) { in qib_7322_mini_quiet_serdes()
2557 val += ppd->cpspec->iblnkdowndelta; in qib_7322_mini_quiet_serdes()
2562 * are cleared on driver reload. in qib_7322_mini_quiet_serdes()
2571 * qib_setup_7322_setextled - set the state of the two external LEDs
2572 * @ppd: physical port on the qlogic_ib device
2573 * @on: whether the link is up or not
2575 * The exact combo of LEDs if on is true is determined by looking
2588 * require waking up every 10-20 msecs and checking the counters
2589 * on the chip, and then turning the LED off if appropriate. That's
2592 static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on) in qib_setup_7322_setextled() argument
2594 struct qib_devdata *dd = ppd->dd; in qib_setup_7322_setextled()
2603 if (dd->diag_client) in qib_setup_7322_setextled()
2607 if (ppd->led_override) { in qib_setup_7322_setextled()
2608 grn = (ppd->led_override & QIB_LED_PHYS); in qib_setup_7322_setextled()
2609 yel = (ppd->led_override & QIB_LED_LOG); in qib_setup_7322_setextled()
2610 } else if (on) { in qib_setup_7322_setextled()
2620 spin_lock_irqsave(&dd->cspec->gpio_lock, flags); in qib_setup_7322_setextled()
2621 extctl = dd->cspec->extctrl & (ppd->port == 1 ? in qib_setup_7322_setextled()
2624 extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN; in qib_setup_7322_setextled()
2627 * This is 1/16 sec (66.6ms) on, in qib_setup_7322_setextled()
2634 extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL; in qib_setup_7322_setextled()
2635 dd->cspec->extctrl = extctl; in qib_setup_7322_setextled()
2636 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); in qib_setup_7322_setextled()
2637 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); in qib_setup_7322_setextled()
2639 if (ledblink) /* blink the LED on packet receive */ in qib_setup_7322_setextled()
2649 if (dd->flags & QIB_DCA_ENABLED) in qib_7322_notify_dca()
2651 if (!dca_add_requester(&dd->pcidev->dev)) { in qib_7322_notify_dca()
2652 qib_devinfo(dd->pcidev, "DCA enabled\n"); in qib_7322_notify_dca()
2653 dd->flags |= QIB_DCA_ENABLED; in qib_7322_notify_dca()
2658 if (dd->flags & QIB_DCA_ENABLED) { in qib_7322_notify_dca()
2659 dca_remove_requester(&dd->pcidev->dev); in qib_7322_notify_dca()
2660 dd->flags &= ~QIB_DCA_ENABLED; in qib_7322_notify_dca()
2661 dd->cspec->dca_ctrl = 0; in qib_7322_notify_dca()
2663 dd->cspec->dca_ctrl); in qib_7322_notify_dca()
2672 struct qib_devdata *dd = rcd->dd; in qib_update_rhdrq_dca()
2673 struct qib_chip_specific *cspec = dd->cspec; in qib_update_rhdrq_dca()
2675 if (!(dd->flags & QIB_DCA_ENABLED)) in qib_update_rhdrq_dca()
2677 if (cspec->rhdr_cpu[rcd->ctxt] != cpu) { in qib_update_rhdrq_dca()
2680 cspec->rhdr_cpu[rcd->ctxt] = cpu; in qib_update_rhdrq_dca()
2681 rmp = &dca_rcvhdr_reg_map[rcd->ctxt]; in qib_update_rhdrq_dca()
2682 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask; in qib_update_rhdrq_dca()
2683 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |= in qib_update_rhdrq_dca()
2684 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb; in qib_update_rhdrq_dca()
2685 qib_devinfo(dd->pcidev, in qib_update_rhdrq_dca()
2686 "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu, in qib_update_rhdrq_dca()
2687 (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]); in qib_update_rhdrq_dca()
2688 qib_write_kreg(dd, rmp->regno, in qib_update_rhdrq_dca()
2689 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]); in qib_update_rhdrq_dca()
2690 cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable); in qib_update_rhdrq_dca()
2691 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); in qib_update_rhdrq_dca()
2697 struct qib_devdata *dd = ppd->dd; in qib_update_sdma_dca()
2698 struct qib_chip_specific *cspec = dd->cspec; in qib_update_sdma_dca()
2699 unsigned pidx = ppd->port - 1; in qib_update_sdma_dca()
2701 if (!(dd->flags & QIB_DCA_ENABLED)) in qib_update_sdma_dca()
2703 if (cspec->sdma_cpu[pidx] != cpu) { in qib_update_sdma_dca()
2704 cspec->sdma_cpu[pidx] = cpu; in qib_update_sdma_dca()
2705 cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ? in qib_update_sdma_dca()
2708 cspec->dca_rcvhdr_ctrl[4] |= in qib_update_sdma_dca()
2709 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << in qib_update_sdma_dca()
2710 (ppd->hw_pidx ? in qib_update_sdma_dca()
2713 qib_devinfo(dd->pcidev, in qib_update_sdma_dca()
2714 "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu, in qib_update_sdma_dca()
2715 (long long) cspec->dca_rcvhdr_ctrl[4]); in qib_update_sdma_dca()
2717 cspec->dca_rcvhdr_ctrl[4]); in qib_update_sdma_dca()
2718 cspec->dca_ctrl |= ppd->hw_pidx ? in qib_update_sdma_dca()
2721 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); in qib_update_sdma_dca()
2727 struct qib_chip_specific *cspec = dd->cspec; in qib_setup_dca()
2730 for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++) in qib_setup_dca()
2731 cspec->rhdr_cpu[i] = -1; in qib_setup_dca()
2732 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) in qib_setup_dca()
2733 cspec->sdma_cpu[i] = -1; in qib_setup_dca()
2734 cspec->dca_rcvhdr_ctrl[0] = in qib_setup_dca()
2739 cspec->dca_rcvhdr_ctrl[1] = in qib_setup_dca()
2744 cspec->dca_rcvhdr_ctrl[2] = in qib_setup_dca()
2749 cspec->dca_rcvhdr_ctrl[3] = in qib_setup_dca()
2754 cspec->dca_rcvhdr_ctrl[4] = in qib_setup_dca()
2757 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) in qib_setup_dca()
2759 cspec->dca_rcvhdr_ctrl[i]); in qib_setup_dca()
2760 for (i = 0; i < cspec->num_msix_entries; i++) in qib_setup_dca()
2771 if (n->rcv) { in qib_irq_notifier_notify()
2772 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; in qib_irq_notifier_notify()
2776 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; in qib_irq_notifier_notify()
2788 if (n->rcv) { in qib_irq_notifier_release()
2789 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; in qib_irq_notifier_release()
2791 dd = rcd->dd; in qib_irq_notifier_release()
2793 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; in qib_irq_notifier_release()
2795 dd = ppd->dd; in qib_irq_notifier_release()
2797 qib_devinfo(dd->pcidev, in qib_irq_notifier_release()
2798 "release on HCA notify 0x%p n 0x%p\n", ref, n); in qib_irq_notifier_release()
2808 dd->cspec->main_int_mask = ~0ULL; in qib_7322_free_irq()
2810 for (i = 0; i < dd->cspec->num_msix_entries; i++) { in qib_7322_free_irq()
2812 if (dd->cspec->msix_entries[i].arg) { in qib_7322_free_irq()
2816 irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i), in qib_7322_free_irq()
2818 free_cpumask_var(dd->cspec->msix_entries[i].mask); in qib_7322_free_irq()
2819 pci_free_irq(dd->pcidev, i, in qib_7322_free_irq()
2820 dd->cspec->msix_entries[i].arg); in qib_7322_free_irq()
2825 if (!dd->cspec->num_msix_entries) in qib_7322_free_irq()
2826 pci_free_irq(dd->pcidev, 0, dd); in qib_7322_free_irq()
2828 dd->cspec->num_msix_entries = 0; in qib_7322_free_irq()
2830 pci_free_irq_vectors(dd->pcidev); in qib_7322_free_irq()
2843 if (dd->flags & QIB_DCA_ENABLED) { in qib_setup_7322_cleanup()
2844 dca_remove_requester(&dd->pcidev->dev); in qib_setup_7322_cleanup()
2845 dd->flags &= ~QIB_DCA_ENABLED; in qib_setup_7322_cleanup()
2846 dd->cspec->dca_ctrl = 0; in qib_setup_7322_cleanup()
2847 qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl); in qib_setup_7322_cleanup()
2852 kfree(dd->cspec->cntrs); in qib_setup_7322_cleanup()
2853 bitmap_free(dd->cspec->sendchkenable); in qib_setup_7322_cleanup()
2854 bitmap_free(dd->cspec->sendgrhchk); in qib_setup_7322_cleanup()
2855 bitmap_free(dd->cspec->sendibchk); in qib_setup_7322_cleanup()
2856 kfree(dd->cspec->msix_entries); in qib_setup_7322_cleanup()
2857 for (i = 0; i < dd->num_pports; i++) { in qib_setup_7322_cleanup()
2862 kfree(dd->pport[i].cpspec->portcntrs); in qib_setup_7322_cleanup()
2863 if (dd->flags & QIB_HAS_QSFP) { in qib_setup_7322_cleanup()
2864 spin_lock_irqsave(&dd->cspec->gpio_lock, flags); in qib_setup_7322_cleanup()
2865 dd->cspec->gpio_mask &= ~mask; in qib_setup_7322_cleanup()
2866 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); in qib_setup_7322_cleanup()
2867 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); in qib_setup_7322_cleanup()
2875 struct qib_pportdata *ppd0 = &dd->pport[0]; in sdma_7322_intr()
2876 struct qib_pportdata *ppd1 = &dd->pport[1]; in sdma_7322_intr()
2900 spin_lock_irqsave(&dd->sendctrl_lock, flags); in qib_wantpiobuf_7322_intr()
2902 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail); in qib_wantpiobuf_7322_intr()
2904 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail); in qib_wantpiobuf_7322_intr()
2905 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); in qib_wantpiobuf_7322_intr()
2907 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in qib_wantpiobuf_7322_intr()
2913 * keep mainline interrupt handler cache-friendly
2924 qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills)); in unknown_7322_ibits()
2927 /* keep mainline interrupt handler cache-friendly */
2944 * have a bad side-effect on some diagnostic that wanted in unknown_7322_gpio_intr()
2945 * to poll for a status-change, but the various shadows in unknown_7322_gpio_intr()
2954 for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP); in unknown_7322_gpio_intr()
2960 if (!dd->pport[pidx].link_speed_supported) in unknown_7322_gpio_intr()
2963 ppd = dd->pport + pidx; in unknown_7322_gpio_intr()
2964 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); in unknown_7322_gpio_intr()
2965 if (gpiostatus & dd->cspec->gpio_mask & mask) { in unknown_7322_gpio_intr()
2968 qd = &ppd->cpspec->qsfp_data; in unknown_7322_gpio_intr()
2974 qd->t_insert = jiffies; in unknown_7322_gpio_intr()
2975 queue_work(ib_wq, &qd->work); in unknown_7322_gpio_intr()
2987 dd->cspec->gpio_mask &= ~gpio_irq; in unknown_7322_gpio_intr()
2988 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); in unknown_7322_gpio_intr()
3004 tasklet_schedule(&dd->error_tasklet); in unlikely_7322_intr()
3006 if (istat & INT_MASK_P(Err, 0) && dd->rcd[0]) in unlikely_7322_intr()
3007 handle_7322_p_errors(dd->rcd[0]->ppd); in unlikely_7322_intr()
3008 if (istat & INT_MASK_P(Err, 1) && dd->rcd[1]) in unlikely_7322_intr()
3009 handle_7322_p_errors(dd->rcd[1]->ppd); in unlikely_7322_intr()
3013 * Dynamically adjust the rcv int timeout for a context based on incoming
3018 struct qib_devdata *dd = rcd->dd; in adjust_rcv_timeout()
3019 u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt]; in adjust_rcv_timeout()
3022 * Dynamically adjust idle timeout on chip in adjust_rcv_timeout()
3023 * based on number of packets processed. in adjust_rcv_timeout()
3032 dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout; in adjust_rcv_timeout()
3033 qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout); in adjust_rcv_timeout()
3054 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { in qib_7322intr()
3075 istat &= dd->cspec->main_int_mask; in qib_7322intr()
3082 this_cpu_inc(*dd->int_counter); in qib_7322intr()
3093 * the queue, and will re-interrupt if necessary. The processor in qib_7322intr()
3107 for (i = 0; i < dd->first_user_ctxt; i++) { in qib_7322intr()
3110 if (dd->rcd[i]) in qib_7322intr()
3111 qib_kreceive(dd->rcd[i], NULL, &npkts); in qib_7322intr()
3125 if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) in qib_7322intr()
3139 struct qib_devdata *dd = rcd->dd; in qib_7322pintr()
3142 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in qib_7322pintr()
3151 this_cpu_inc(*dd->int_counter); in qib_7322pintr()
3155 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); in qib_7322pintr()
3169 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in qib_7322bufavail()
3178 this_cpu_inc(*dd->int_counter); in qib_7322bufavail()
3184 if (dd->flags & QIB_INITTED) in qib_7322bufavail()
3198 struct qib_devdata *dd = ppd->dd; in sdma_intr()
3200 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in sdma_intr()
3209 this_cpu_inc(*dd->int_counter); in sdma_intr()
3212 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? in sdma_intr()
3225 struct qib_devdata *dd = ppd->dd; in sdma_idle_intr()
3227 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in sdma_idle_intr()
3236 this_cpu_inc(*dd->int_counter); in sdma_idle_intr()
3239 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? in sdma_idle_intr()
3252 struct qib_devdata *dd = ppd->dd; in sdma_progress_intr()
3254 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in sdma_progress_intr()
3263 this_cpu_inc(*dd->int_counter); in sdma_progress_intr()
3266 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? in sdma_progress_intr()
3280 struct qib_devdata *dd = ppd->dd; in sdma_cleanup_intr()
3282 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in sdma_cleanup_intr()
3291 this_cpu_inc(*dd->int_counter); in sdma_cleanup_intr()
3294 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? in sdma_cleanup_intr()
3306 if (!dd->cspec->msix_entries[msixnum].dca) in reset_dca_notifier()
3309 qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n", in reset_dca_notifier()
3310 dd->unit, pci_irq_vector(dd->pcidev, msixnum)); in reset_dca_notifier()
3311 irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL); in reset_dca_notifier()
3312 dd->cspec->msix_entries[msixnum].notifier = NULL; in reset_dca_notifier()
3317 struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum]; in setup_dca_notifier()
3320 if (!m->dca) in setup_dca_notifier()
3326 m->notifier = n; in setup_dca_notifier()
3327 n->notify.irq = pci_irq_vector(dd->pcidev, msixnum); in setup_dca_notifier()
3328 n->notify.notify = qib_irq_notifier_notify; in setup_dca_notifier()
3329 n->notify.release = qib_irq_notifier_release; in setup_dca_notifier()
3330 n->arg = m->arg; in setup_dca_notifier()
3331 n->rcv = m->rcv; in setup_dca_notifier()
3332 qib_devinfo(dd->pcidev, in setup_dca_notifier()
3334 n->notify.irq, n->rcv, &n->notify); in setup_dca_notifier()
3336 n->notify.irq, in setup_dca_notifier()
3337 &n->notify); in setup_dca_notifier()
3339 m->notifier = NULL; in setup_dca_notifier()
3348 * Set up our chip-specific interrupt handler.
3363 if (!dd->num_pports) in qib_setup_7322_interrupt()
3385 if (!dd->cspec->num_msix_entries) { in qib_setup_7322_interrupt()
3388 ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd, in qib_setup_7322_interrupt()
3394 pci_irq_vector(dd->pcidev, 0), ret); in qib_setup_7322_interrupt()
3397 dd->cspec->main_int_mask = ~0ULL; in qib_setup_7322_interrupt()
3405 local_mask = cpumask_of_pcibus(dd->pcidev->bus); in qib_setup_7322_interrupt()
3418 for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) { in qib_setup_7322_interrupt()
3427 /* skip if for a non-configured port */ in qib_setup_7322_interrupt()
3428 if (irq_table[i].port > dd->num_pports) in qib_setup_7322_interrupt()
3430 arg = dd->pport + irq_table[i].port - 1; in qib_setup_7322_interrupt()
3438 ret = pci_request_irq(dd->pcidev, msixnum, handler, in qib_setup_7322_interrupt()
3440 dd->unit, in qib_setup_7322_interrupt()
3445 ctxt = i - ARRAY_SIZE(irq_table); in qib_setup_7322_interrupt()
3447 arg = dd->rcd[ctxt]; in qib_setup_7322_interrupt()
3457 ret = pci_request_irq(dd->pcidev, msixnum, handler, in qib_setup_7322_interrupt()
3460 dd->unit); in qib_setup_7322_interrupt()
3471 pci_irq_vector(dd->pcidev, msixnum), in qib_setup_7322_interrupt()
3474 pci_alloc_irq_vectors(dd->pcidev, 1, 1, in qib_setup_7322_interrupt()
3478 dd->cspec->msix_entries[msixnum].arg = arg; in qib_setup_7322_interrupt()
3480 dd->cspec->msix_entries[msixnum].dca = dca; in qib_setup_7322_interrupt()
3481 dd->cspec->msix_entries[msixnum].rcv = in qib_setup_7322_interrupt()
3495 &dd->cspec->msix_entries[msixnum].mask, in qib_setup_7322_interrupt()
3499 dd->cspec->msix_entries[msixnum].mask); in qib_setup_7322_interrupt()
3506 dd->cspec->msix_entries[msixnum].mask); in qib_setup_7322_interrupt()
3509 pci_irq_vector(dd->pcidev, msixnum), in qib_setup_7322_interrupt()
3510 dd->cspec->msix_entries[msixnum].mask); in qib_setup_7322_interrupt()
3517 dd->cspec->main_int_mask = mask; in qib_setup_7322_interrupt()
3518 tasklet_setup(&dd->error_tasklet, qib_error_tasklet); in qib_setup_7322_interrupt()
3522 * qib_7322_boardname - fill in the board name and note features
3525 * info will be based on the board revision register
3529 /* Will need enumeration of board-types here */ in qib_7322_boardname()
3533 boardid = SYM_FIELD(dd->revision, Revision, BoardID); in qib_7322_boardname()
3537 dd->boardname = "InfiniPath_QLE7342_Emulation"; in qib_7322_boardname()
3540 dd->boardname = "InfiniPath_QLE7340"; in qib_7322_boardname()
3541 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3545 dd->boardname = "InfiniPath_QLE7342"; in qib_7322_boardname()
3546 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3549 dd->boardname = "InfiniPath_QMI7342"; in qib_7322_boardname()
3552 dd->boardname = "InfiniPath_Unsupported7342"; in qib_7322_boardname()
3557 dd->boardname = "InfiniPath_QMH7342"; in qib_7322_boardname()
3561 dd->boardname = "InfiniPath_QME7342"; in qib_7322_boardname()
3564 dd->boardname = "InfiniPath_QME7362"; in qib_7322_boardname()
3565 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3568 dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr"; in qib_7322_boardname()
3569 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3572 dd->boardname = "InfiniPath_QLE7342_TEST"; in qib_7322_boardname()
3573 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3576 dd->boardname = "InfiniPath_QLE73xy_UNKNOWN"; in qib_7322_boardname()
3580 dd->board_atten = 1; /* index into txdds_Xdr */ in qib_7322_boardname()
3582 snprintf(dd->boardversion, sizeof(dd->boardversion), in qib_7322_boardname()
3584 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, in qib_7322_boardname()
3585 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch), in qib_7322_boardname()
3586 dd->majrev, dd->minrev, in qib_7322_boardname()
3587 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW)); in qib_7322_boardname()
3590 qib_devinfo(dd->pcidev, in qib_7322_boardname()
3592 dd->unit); in qib_7322_boardname()
3613 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); in qib_do_7322_reset()
3617 msix_entries = dd->cspec->num_msix_entries; in qib_do_7322_reset()
3619 /* no interrupts till re-initted */ in qib_do_7322_reset()
3626 msix_vecsave = kmalloc_array(2 * dd->cspec->num_msix_entries, in qib_do_7322_reset()
3652 dd->pport->cpspec->ibdeltainprog = 0; in qib_do_7322_reset()
3653 dd->pport->cpspec->ibsymdelta = 0; in qib_do_7322_reset()
3654 dd->pport->cpspec->iblnkerrdelta = 0; in qib_do_7322_reset()
3655 dd->pport->cpspec->ibmalfdelta = 0; in qib_do_7322_reset()
3657 dd->z_int_counter = qib_int_counter(dd); in qib_do_7322_reset()
3664 dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); in qib_do_7322_reset()
3665 dd->flags |= QIB_DOING_RESET; in qib_do_7322_reset()
3666 val = dd->control | QLOGIC_IB_C_RESET; in qib_do_7322_reset()
3667 writeq(val, &dd->kregbase[kr_control]); in qib_do_7322_reset()
3671 * Allow MBIST, etc. to complete; longer on each retry. in qib_do_7322_reset()
3683 val = readq(&dd->kregbase[kr_revision]); in qib_do_7322_reset()
3684 if (val == dd->revision) in qib_do_7322_reset()
3694 dd->flags |= QIB_PRESENT; /* it's back */ in qib_do_7322_reset()
3711 for (i = 0; i < dd->num_pports; ++i) in qib_do_7322_reset()
3712 write_7322_init_portregs(&dd->pport[i]); in qib_do_7322_reset()
3715 if (qib_pcie_params(dd, dd->lbus_width, &msix_entries)) in qib_do_7322_reset()
3719 dd->cspec->num_msix_entries = msix_entries; in qib_do_7322_reset()
3722 for (i = 0; i < dd->num_pports; ++i) { in qib_do_7322_reset()
3723 struct qib_pportdata *ppd = &dd->pport[i]; in qib_do_7322_reset()
3725 spin_lock_irqsave(&ppd->lflags_lock, flags); in qib_do_7322_reset()
3726 ppd->lflags |= QIBL_IB_FORCE_NOTIFY; in qib_do_7322_reset()
3727 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; in qib_do_7322_reset()
3728 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in qib_do_7322_reset()
3732 dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */ in qib_do_7322_reset()
3738 * qib_7322_put_tid - write a TID to the chip
3747 if (!(dd->flags & QIB_PRESENT)) in qib_7322_put_tid()
3749 if (pa != dd->tidinvalid) { in qib_7322_put_tid()
3766 chippa |= dd->tidtemplate; in qib_7322_put_tid()
3775 * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3790 if (!dd->kregbase || !rcd) in qib_7322_clear_tids()
3793 ctxt = rcd->ctxt; in qib_7322_clear_tids()
3795 tidinv = dd->tidinvalid; in qib_7322_clear_tids()
3797 ((char __iomem *) dd->kregbase + in qib_7322_clear_tids()
3798 dd->rcvtidbase + in qib_7322_clear_tids()
3799 ctxt * dd->rcvtidcnt * sizeof(*tidbase)); in qib_7322_clear_tids()
3801 for (i = 0; i < dd->rcvtidcnt; i++) in qib_7322_clear_tids()
3806 ((char __iomem *) dd->kregbase + in qib_7322_clear_tids()
3807 dd->rcvegrbase + in qib_7322_clear_tids()
3808 rcd->rcvegr_tid_base * sizeof(*tidbase)); in qib_7322_clear_tids()
3810 for (i = 0; i < rcd->rcvegrcnt; i++) in qib_7322_clear_tids()
3816 * qib_7322_tidtemplate - setup constants for TID updates
3832 if (dd->rcvegrbufsize == 2048) in qib_7322_tidtemplate()
3833 dd->tidtemplate = IBA7322_TID_SZ_2K; in qib_7322_tidtemplate()
3834 else if (dd->rcvegrbufsize == 4096) in qib_7322_tidtemplate()
3835 dd->tidtemplate = IBA7322_TID_SZ_4K; in qib_7322_tidtemplate()
3836 dd->tidinvalid = 0; in qib_7322_tidtemplate()
3840 * qib_7322_get_base_info - set chip-specific flags for user code
3844 * We set the PCIE flag because the lower bandwidth on PCIe vs
3851 kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP | in qib_7322_get_base_info()
3854 if (rcd->dd->cspec->r1) in qib_7322_get_base_info()
3855 kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK; in qib_7322_get_base_info()
3856 if (rcd->dd->flags & QIB_USE_SPCL_TRIG) in qib_7322_get_base_info()
3857 kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER; in qib_7322_get_base_info()
3868 (rhf_addr - dd->rhf_offset + offset); in qib_7322_get_msgheader()
3880 dd->cspec->numctxts = nchipctxts; in qib_7322_config_ctxts()
3881 if (qib_n_krcv_queues > 1 && dd->num_pports) { in qib_7322_config_ctxts()
3882 dd->first_user_ctxt = NUM_IB_PORTS + in qib_7322_config_ctxts()
3883 (qib_n_krcv_queues - 1) * dd->num_pports; in qib_7322_config_ctxts()
3884 if (dd->first_user_ctxt > nchipctxts) in qib_7322_config_ctxts()
3885 dd->first_user_ctxt = nchipctxts; in qib_7322_config_ctxts()
3886 dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports; in qib_7322_config_ctxts()
3888 dd->first_user_ctxt = NUM_IB_PORTS; in qib_7322_config_ctxts()
3889 dd->n_krcv_queues = 1; in qib_7322_config_ctxts()
3893 int nctxts = dd->first_user_ctxt + num_online_cpus(); in qib_7322_config_ctxts()
3896 dd->ctxtcnt = 6; in qib_7322_config_ctxts()
3898 dd->ctxtcnt = 10; in qib_7322_config_ctxts()
3900 dd->ctxtcnt = nchipctxts; in qib_7322_config_ctxts()
3901 } else if (qib_cfgctxts < dd->num_pports) in qib_7322_config_ctxts()
3902 dd->ctxtcnt = dd->num_pports; in qib_7322_config_ctxts()
3904 dd->ctxtcnt = qib_cfgctxts; in qib_7322_config_ctxts()
3905 if (!dd->ctxtcnt) /* none of the above, set to max */ in qib_7322_config_ctxts()
3906 dd->ctxtcnt = nchipctxts; in qib_7322_config_ctxts()
3911 * Lock to be paranoid about later motion, etc. in qib_7322_config_ctxts()
3913 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); in qib_7322_config_ctxts()
3914 if (dd->ctxtcnt > 10) in qib_7322_config_ctxts()
3915 dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg); in qib_7322_config_ctxts()
3916 else if (dd->ctxtcnt > 6) in qib_7322_config_ctxts()
3917 dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg); in qib_7322_config_ctxts()
3921 dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode); in qib_7322_config_ctxts()
3927 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); in qib_7322_config_ctxts()
3928 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); in qib_7322_config_ctxts()
3930 /* kr_rcvegrcnt changes based on the number of contexts enabled */ in qib_7322_config_ctxts()
3931 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); in qib_7322_config_ctxts()
3933 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt); in qib_7322_config_ctxts()
3935 dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt, in qib_7322_config_ctxts()
3936 dd->num_pports > 1 ? 1024U : 2048U); in qib_7322_config_ctxts()
3943 u64 maskr; /* right-justified mask */ in qib_7322_get_ib_cfg()
3947 case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */ in qib_7322_get_ib_cfg()
3948 ret = ppd->link_width_enabled; in qib_7322_get_ib_cfg()
3951 case QIB_IB_CFG_LWID: /* Get currently active Link-width */ in qib_7322_get_ib_cfg()
3952 ret = ppd->link_width_active; in qib_7322_get_ib_cfg()
3956 ret = ppd->link_speed_enabled; in qib_7322_get_ib_cfg()
3960 ret = ppd->link_speed_active; in qib_7322_get_ib_cfg()
3963 case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */ in qib_7322_get_ib_cfg()
3968 case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */ in qib_7322_get_ib_cfg()
3979 ret = ppd->vls_operational; in qib_7322_get_ib_cfg()
3991 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, in qib_7322_get_ib_cfg()
3996 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, in qib_7322_get_ib_cfg()
4002 ret = (ppd->cpspec->ibcctrl_a & in qib_7322_get_ib_cfg()
4017 if (ppd->link_speed_active == QIB_IB_QDR) in qib_7322_get_ib_cfg()
4019 else if (ppd->link_speed_active == QIB_IB_DDR) in qib_7322_get_ib_cfg()
4026 ret = -EINVAL; in qib_7322_get_ib_cfg()
4029 ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr); in qib_7322_get_ib_cfg()
4036 * heavily on it.
4044 struct qib_devdata *dd = ppd->dd; in qib_7322_set_ib_cfg()
4045 u64 maskr; /* right-justified mask */ in qib_7322_set_ib_cfg()
4059 * For header-checking, the SLID in the packet will in qib_7322_set_ib_cfg()
4063 * false-positives. in qib_7322_set_ib_cfg()
4071 case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */ in qib_7322_set_ib_cfg()
4072 ppd->link_width_enabled = val; in qib_7322_set_ib_cfg()
4087 * link is currently down, otherwise takes effect on next in qib_7322_set_ib_cfg()
4092 ppd->link_speed_enabled = val; in qib_7322_set_ib_cfg()
4096 if (val & (val - 1)) { in qib_7322_set_ib_cfg()
4100 spin_lock_irqsave(&ppd->lflags_lock, flags); in qib_7322_set_ib_cfg()
4101 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; in qib_7322_set_ib_cfg()
4102 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in qib_7322_set_ib_cfg()
4109 case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */ in qib_7322_set_ib_cfg()
4114 case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */ in qib_7322_set_ib_cfg()
4120 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, in qib_7322_set_ib_cfg()
4123 ppd->cpspec->ibcctrl_a &= in qib_7322_set_ib_cfg()
4125 ppd->cpspec->ibcctrl_a |= (u64) val << in qib_7322_set_ib_cfg()
4128 ppd->cpspec->ibcctrl_a); in qib_7322_set_ib_cfg()
4134 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, in qib_7322_set_ib_cfg()
4137 ppd->cpspec->ibcctrl_a &= in qib_7322_set_ib_cfg()
4139 ppd->cpspec->ibcctrl_a |= (u64) val << in qib_7322_set_ib_cfg()
4142 ppd->cpspec->ibcctrl_a); in qib_7322_set_ib_cfg()
4148 maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) | in qib_7322_set_ib_cfg()
4149 ((u64) ppd->pkeys[2] << 32) | in qib_7322_set_ib_cfg()
4150 ((u64) ppd->pkeys[3] << 48); in qib_7322_set_ib_cfg()
4157 ppd->cpspec->ibcctrl_a &= in qib_7322_set_ib_cfg()
4160 ppd->cpspec->ibcctrl_a |= in qib_7322_set_ib_cfg()
4162 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); in qib_7322_set_ib_cfg()
4172 * on changes. in qib_7322_set_ib_cfg()
4174 val = (ppd->ibmaxlen >> 2) + 1; in qib_7322_set_ib_cfg()
4175 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen); in qib_7322_set_ib_cfg()
4176 ppd->cpspec->ibcctrl_a |= (u64)val << in qib_7322_set_ib_cfg()
4179 ppd->cpspec->ibcctrl_a); in qib_7322_set_ib_cfg()
4187 ppd->cpspec->ibmalfusesnap = 1; in qib_7322_set_ib_cfg()
4188 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd, in qib_7322_set_ib_cfg()
4190 if (!ppd->cpspec->ibdeltainprog && in qib_7322_set_ib_cfg()
4192 ppd->cpspec->ibdeltainprog = 1; in qib_7322_set_ib_cfg()
4193 ppd->cpspec->ibsymsnap = in qib_7322_set_ib_cfg()
4196 ppd->cpspec->iblnkerrsnap = in qib_7322_set_ib_cfg()
4204 if (ppd->cpspec->ibmalfusesnap) { in qib_7322_set_ib_cfg()
4205 ppd->cpspec->ibmalfusesnap = 0; in qib_7322_set_ib_cfg()
4206 ppd->cpspec->ibmalfdelta += in qib_7322_set_ib_cfg()
4208 crp_errlink) - in qib_7322_set_ib_cfg()
4209 ppd->cpspec->ibmalfsnap; in qib_7322_set_ib_cfg()
4218 ret = -EINVAL; in qib_7322_set_ib_cfg()
4237 ppd->cpspec->chase_end = 0; in qib_7322_set_ib_cfg()
4242 if (ppd->cpspec->chase_timer.expires) { in qib_7322_set_ib_cfg()
4243 del_timer_sync(&ppd->cpspec->chase_timer); in qib_7322_set_ib_cfg()
4244 ppd->cpspec->chase_timer.expires = 0; in qib_7322_set_ib_cfg()
4249 ret = -EINVAL; in qib_7322_set_ib_cfg()
4258 if (ppd->vls_operational != val) { in qib_7322_set_ib_cfg()
4259 ppd->vls_operational = val; in qib_7322_set_ib_cfg()
4270 ret = -EINVAL; in qib_7322_set_ib_cfg()
4279 if (ppd->dd->cspec->r1) { in qib_7322_set_ib_cfg()
4280 cancel_delayed_work(&ppd->cpspec->ipg_work); in qib_7322_set_ib_cfg()
4281 ppd->cpspec->ipg_tries = 0; in qib_7322_set_ib_cfg()
4286 ret = -EINVAL; in qib_7322_set_ib_cfg()
4289 ppd->cpspec->ibcctrl_b &= ~(maskr << lsb); in qib_7322_set_ib_cfg()
4290 ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb); in qib_7322_set_ib_cfg()
4291 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); in qib_7322_set_ib_cfg()
4304 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, in qib_7322_set_loopback()
4307 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", in qib_7322_set_loopback()
4308 ppd->dd->unit, ppd->port); in qib_7322_set_loopback()
4310 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, in qib_7322_set_loopback()
4314 qib_devinfo(ppd->dd->pcidev, in qib_7322_set_loopback()
4316 ppd->dd->unit, ppd->port); in qib_7322_set_loopback()
4318 ret = -EINVAL; in qib_7322_set_loopback()
4321 ppd->cpspec->ibcctrl_a); in qib_7322_set_loopback()
4322 ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK in qib_7322_set_loopback()
4324 ppd->cpspec->ibcctrl_b = ctrlb | val; in qib_7322_set_loopback()
4326 ppd->cpspec->ibcctrl_b); in qib_7322_set_loopback()
4327 qib_write_kreg(ppd->dd, kr_scratch, 0); in qib_7322_set_loopback()
4340 vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) & in get_vl_weights()
4342 vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) & in get_vl_weights()
4355 val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) << in set_vl_weights()
4357 ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) << in set_vl_weights()
4361 if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) { in set_vl_weights()
4362 struct qib_devdata *dd = ppd->dd; in set_vl_weights()
4365 spin_lock_irqsave(&dd->sendctrl_lock, flags); in set_vl_weights()
4366 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn); in set_vl_weights()
4367 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); in set_vl_weights()
4369 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in set_vl_weights()
4385 return -EINVAL; in qib_7322_get_ib_table()
4402 return -EINVAL; in qib_7322_set_ib_table()
4412 * that the timer is enabled on reception of a packet. in qib_update_7322_usrhead()
4417 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); in qib_update_7322_usrhead()
4418 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); in qib_update_7322_usrhead()
4419 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); in qib_update_7322_usrhead()
4426 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); in qib_7322_hdrqempty()
4427 if (rcd->rcvhdrtail_kvaddr) in qib_7322_hdrqempty()
4430 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); in qib_7322_hdrqempty()
4451 * Modify the RCVCTRL register in chip-specific way. This
4453 * location is chip-specifc, but the needed operations are
4454 * generic. <op> is a bit-mask because we often want to
4460 struct qib_devdata *dd = ppd->dd; in rcvctrl_7322_mod()
4465 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); in rcvctrl_7322_mod()
4468 dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable); in rcvctrl_7322_mod()
4470 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable); in rcvctrl_7322_mod()
4472 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); in rcvctrl_7322_mod()
4474 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd); in rcvctrl_7322_mod()
4476 ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable); in rcvctrl_7322_mod()
4478 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable); in rcvctrl_7322_mod()
4480 mask = (1ULL << dd->ctxtcnt) - 1; in rcvctrl_7322_mod()
4484 rcd = dd->rcd[ctxt]; in rcvctrl_7322_mod()
4487 ppd->p_rcvctrl |= in rcvctrl_7322_mod()
4489 if (!(dd->flags & QIB_NODMA_RTAIL)) { in rcvctrl_7322_mod()
4491 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); in rcvctrl_7322_mod()
4495 rcd->rcvhdrqtailaddr_phys); in rcvctrl_7322_mod()
4497 rcd->rcvhdrq_phys); in rcvctrl_7322_mod()
4498 rcd->seq_cnt = 1; in rcvctrl_7322_mod()
4501 ppd->p_rcvctrl &= in rcvctrl_7322_mod()
4504 dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull); in rcvctrl_7322_mod()
4506 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull)); in rcvctrl_7322_mod()
4508 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail)); in rcvctrl_7322_mod()
4510 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail)); in rcvctrl_7322_mod()
4512 * Decide which registers to write depending on the ops enabled. in rcvctrl_7322_mod()
4517 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); in rcvctrl_7322_mod()
4519 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); in rcvctrl_7322_mod()
4520 if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) { in rcvctrl_7322_mod()
4533 dd->rcd[ctxt]->head = val; in rcvctrl_7322_mod()
4534 /* If kctxt, interrupt on next receive. */ in rcvctrl_7322_mod()
4535 if (ctxt < dd->first_user_ctxt) in rcvctrl_7322_mod()
4536 val |= dd->rhdrhead_intr_off; in rcvctrl_7322_mod()
4539 dd->rcd[ctxt] && dd->rhdrhead_intr_off) { in rcvctrl_7322_mod()
4541 val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off; in rcvctrl_7322_mod()
4557 for (i = 0; i < dd->cfgctxts; i++) { in rcvctrl_7322_mod()
4567 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); in rcvctrl_7322_mod()
4571 * Modify the SENDCTRL register in chip-specific way. This
4574 * The chip doesn't allow back-to-back sendctrl writes, so write
4577 * Which register is written depends on the operation.
4578 * Most operate on the common register, while
4579 * SEND_ENB and SEND_DIS operate on the per-port ones.
4599 struct qib_devdata *dd = ppd->dd; in sendctrl_7322_mod()
4603 spin_lock_irqsave(&dd->sendctrl_lock, flags); in sendctrl_7322_mod()
4607 dd->sendctrl = 0; in sendctrl_7322_mod()
4609 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); in sendctrl_7322_mod()
4611 dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd); in sendctrl_7322_mod()
4612 if (dd->flags & QIB_USE_SPCL_TRIG) in sendctrl_7322_mod()
4613 dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn); in sendctrl_7322_mod()
4618 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable); in sendctrl_7322_mod()
4620 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable); in sendctrl_7322_mod()
4625 tmp_dd_sendctrl = dd->sendctrl; in sendctrl_7322_mod()
4626 last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; in sendctrl_7322_mod()
4641 u64 tmp_ppd_sendctrl = ppd->p_sendctrl; in sendctrl_7322_mod()
4655 tmp_dd_sendctrl = dd->sendctrl; in sendctrl_7322_mod()
4662 (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) in sendctrl_7322_mod()
4671 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); in sendctrl_7322_mod()
4676 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); in sendctrl_7322_mod()
4680 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in sendctrl_7322_mod()
4687 * to occur, so in-memory copy is in sync with in sendctrl_7322_mod()
4703 * qib_portcntr_7322 - read a per-port chip counter
4709 struct qib_devdata *dd = ppd->dd; in qib_portcntr_7322()
4752 /* pseudo-counter, summed for all ports */ in qib_portcntr_7322()
4757 qib_devinfo(ppd->dd->pcidev, in qib_portcntr_7322()
4763 /* handle non-counters and special cases first */ in qib_portcntr_7322()
4768 for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) { in qib_portcntr_7322()
4769 struct qib_ctxtdata *rcd = dd->rcd[i]; in qib_portcntr_7322()
4771 if (!rcd || rcd->ppd != ppd) in qib_portcntr_7322()
4785 /* were counters in older chips, now per-port kernel regs */ in qib_portcntr_7322()
4792 * avoid two independent reads when on Opteron. in qib_portcntr_7322()
4799 if (ppd->cpspec->ibdeltainprog) in qib_portcntr_7322()
4800 ret -= ret - ppd->cpspec->ibsymsnap; in qib_portcntr_7322()
4801 ret -= ppd->cpspec->ibsymdelta; in qib_portcntr_7322()
4803 if (ppd->cpspec->ibdeltainprog) in qib_portcntr_7322()
4804 ret -= ret - ppd->cpspec->iblnkerrsnap; in qib_portcntr_7322()
4805 ret -= ppd->cpspec->iblnkerrdelta; in qib_portcntr_7322()
4807 ret -= ppd->cpspec->ibmalfdelta; in qib_portcntr_7322()
4809 ret += ppd->cpspec->iblnkdowndelta; in qib_portcntr_7322()
4815 * Device counter names (not port-specific), one line per stat,
4820 * Non-error counters are first.
4821 * Start of "error" conters is indicated by a leading "E " on the first
4880 * same as cntr7322names and cntr7322indices, but for port-specific counters.
4892 "TxDmaDesc\n" /* 7220 and 7322-only */
4893 "E RxDlidFltr\n" /* 7220 and 7322-only */
4916 "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4920 "RxQPBadCtxt\n" /* 7322-only from here down */
4970 for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts; in init_7322_cntrnames()
4979 dd->cspec->ncntrs = i; in init_7322_cntrnames()
4982 dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1; in init_7322_cntrnames()
4984 dd->cspec->cntrnamelen = 1 + s - cntr7322names; in init_7322_cntrnames()
4985 dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64), in init_7322_cntrnames()
4990 dd->cspec->nportcntrs = i - 1; in init_7322_cntrnames()
4991 dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1; in init_7322_cntrnames()
4992 for (i = 0; i < dd->num_pports; ++i) { in init_7322_cntrnames()
4993 dd->pport[i].cpspec->portcntrs = in init_7322_cntrnames()
4994 kmalloc_array(dd->cspec->nportcntrs, sizeof(u64), in init_7322_cntrnames()
5005 ret = dd->cspec->cntrnamelen; in qib_read_7322cntrs()
5011 u64 *cntr = dd->cspec->cntrs; in qib_read_7322cntrs()
5014 ret = dd->cspec->ncntrs * sizeof(u64); in qib_read_7322cntrs()
5021 for (i = 0; i < dd->cspec->ncntrs; i++) in qib_read_7322cntrs()
5040 ret = dd->cspec->portcntrnamelen; in qib_read_7322portcntrs()
5046 struct qib_pportdata *ppd = &dd->pport[port]; in qib_read_7322portcntrs()
5047 u64 *cntr = ppd->cpspec->portcntrs; in qib_read_7322portcntrs()
5050 ret = dd->cspec->nportcntrs * sizeof(u64); in qib_read_7322portcntrs()
5057 for (i = 0; i < dd->cspec->nportcntrs; i++) { in qib_read_7322portcntrs()
5076 * qib_get_7322_faststats - get word counters from chip before they overflow
5082 * which we don;t have, yet, for 7322-based boards.
5094 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_get_7322_faststats()
5095 ppd = dd->pport + pidx; in qib_get_7322_faststats()
5102 if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED) in qib_get_7322_faststats()
5103 || dd->diag_client) in qib_get_7322_faststats()
5107 * Maintain an activity timer, based on traffic in qib_get_7322_faststats()
5108 * exceeding a threshold, so we need to check the word-counts in qib_get_7322_faststats()
5109 * even if they are 64-bit. in qib_get_7322_faststats()
5113 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); in qib_get_7322_faststats()
5114 traffic_wds -= ppd->dd->traffic_wds; in qib_get_7322_faststats()
5115 ppd->dd->traffic_wds += traffic_wds; in qib_get_7322_faststats()
5116 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); in qib_get_7322_faststats()
5117 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & in qib_get_7322_faststats()
5119 (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | in qib_get_7322_faststats()
5121 ppd->cpspec->qdr_dfe_time && in qib_get_7322_faststats()
5122 time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) { in qib_get_7322_faststats()
5123 ppd->cpspec->qdr_dfe_on = 0; in qib_get_7322_faststats()
5126 ppd->dd->cspec->r1 ? in qib_get_7322_faststats()
5132 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); in qib_get_7322_faststats()
5140 if (!dd->cspec->num_msix_entries) in qib_7322_intr_fallback()
5143 qib_devinfo(dd->pcidev, in qib_7322_intr_fallback()
5146 if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0) in qib_7322_intr_fallback()
5158 * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5164 struct qib_devdata *dd = ppd->dd; in qib_7322_mini_pcs_reset()
5171 dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop)); in qib_7322_mini_pcs_reset()
5173 ppd->cpspec->ibcctrl_a & in qib_7322_mini_pcs_reset()
5179 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); in qib_7322_mini_pcs_reset()
5183 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); in qib_7322_mini_pcs_reset()
5187 * This code for non-IBTA-compliant IB speed negotiation is only known to
5189 * with recent firmware. It is based on observed heuristics, rather than
5190 * actual knowledge of the non-compliant speed negotiation.
5191 * It has a number of hard-coded fields, since the hope is to rewrite this
5192 * when a spec is available on how the negoation is intended to work.
5201 struct qib_devdata *dd = ppd->dd; in autoneg_7322_sendpkt()
5212 /* disable header check on this packet, since it can't be valid */ in autoneg_7322_sendpkt()
5213 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL); in autoneg_7322_sendpkt()
5218 if (dd->flags & QIB_USE_SPCL_TRIG) { in autoneg_7322_sendpkt()
5219 u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023; in autoneg_7322_sendpkt()
5226 /* and re-enable hdr check */ in autoneg_7322_sendpkt()
5227 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL); in autoneg_7322_sendpkt()
5235 struct qib_devdata *dd = ppd->dd; in qib_autoneg_7322_send()
5295 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK | in set_7322_ibspeed_fast()
5299 if (speed & (speed - 1)) /* multiple speeds */ in set_7322_ibspeed_fast()
5309 if (newctrlb == ppd->cpspec->ibcctrl_b) in set_7322_ibspeed_fast()
5312 ppd->cpspec->ibcctrl_b = newctrlb; in set_7322_ibspeed_fast()
5313 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); in set_7322_ibspeed_fast()
5314 qib_write_kreg(ppd->dd, kr_scratch, 0); in set_7322_ibspeed_fast()
5319 * IB 1.2-compliant device that we think can do DDR.
5321 * 1.2-compliant devices go directly to DDR prior to reaching INIT
5327 spin_lock_irqsave(&ppd->lflags_lock, flags); in try_7322_autoneg()
5328 ppd->lflags |= QIBL_IB_AUTONEG_INPROG; in try_7322_autoneg()
5329 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in try_7322_autoneg()
5334 queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, in try_7322_autoneg()
5339 * Handle the empirically determined mechanism for auto-negotiation
5349 autoneg_work.work)->ppd; in autoneg_7322_work()
5356 if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState) in autoneg_7322_work()
5364 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) in autoneg_7322_work()
5368 if (wait_event_timeout(ppd->cpspec->autoneg_wait, in autoneg_7322_work()
5369 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), in autoneg_7322_work()
5375 if (wait_event_timeout(ppd->cpspec->autoneg_wait, in autoneg_7322_work()
5376 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), in autoneg_7322_work()
5387 wait_event_timeout(ppd->cpspec->autoneg_wait, in autoneg_7322_work()
5388 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), in autoneg_7322_work()
5391 if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) { in autoneg_7322_work()
5392 spin_lock_irqsave(&ppd->lflags_lock, flags); in autoneg_7322_work()
5393 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; in autoneg_7322_work()
5394 if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) { in autoneg_7322_work()
5395 ppd->lflags |= QIBL_IB_AUTONEG_FAILED; in autoneg_7322_work()
5396 ppd->cpspec->autoneg_tries = 0; in autoneg_7322_work()
5398 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in autoneg_7322_work()
5399 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); in autoneg_7322_work()
5409 struct qib_ibport *ibp = &ppd->ibport_data; in try_7322_ipg()
5416 agent = ibp->rvp.send_agent; in try_7322_ipg()
5426 if (!ibp->smi_ah) { in try_7322_ipg()
5433 send_buf->ah = ah; in try_7322_ipg()
5434 ibp->smi_ah = ibah_to_rvtah(ah); in try_7322_ipg()
5438 send_buf->ah = &ibp->smi_ah->ibah; in try_7322_ipg()
5442 smp = send_buf->mad; in try_7322_ipg()
5443 smp->base_version = IB_MGMT_BASE_VERSION; in try_7322_ipg()
5444 smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE; in try_7322_ipg()
5445 smp->class_version = 1; in try_7322_ipg()
5446 smp->method = IB_MGMT_METHOD_SEND; in try_7322_ipg()
5447 smp->hop_cnt = 1; in try_7322_ipg()
5448 smp->attr_id = QIB_VENDOR_IPG; in try_7322_ipg()
5449 smp->attr_mod = 0; in try_7322_ipg()
5456 delay = 2 << ppd->cpspec->ipg_tries; in try_7322_ipg()
5457 queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work, in try_7322_ipg()
5470 ipg_work.work)->ppd; in ipg_7322_work()
5471 if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE)) in ipg_7322_work()
5472 && ++ppd->cpspec->ipg_tries <= 10) in ipg_7322_work()
5513 spin_lock_irqsave(&ppd->lflags_lock, flags); in qib_7322_ib_updown()
5514 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY; in qib_7322_ib_updown()
5515 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in qib_7322_ib_updown()
5519 ppd->link_speed_active = QIB_IB_QDR; in qib_7322_ib_updown()
5522 ppd->link_speed_active = QIB_IB_DDR; in qib_7322_ib_updown()
5525 ppd->link_speed_active = QIB_IB_SDR; in qib_7322_ib_updown()
5529 ppd->link_width_active = IB_WIDTH_4X; in qib_7322_ib_updown()
5532 ppd->link_width_active = IB_WIDTH_1X; in qib_7322_ib_updown()
5533 ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)]; in qib_7322_ib_updown()
5540 ppd->cpspec->ipg_tries = 0; in qib_7322_ib_updown()
5546 if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | in qib_7322_ib_updown()
5548 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); in qib_7322_ib_updown()
5549 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { in qib_7322_ib_updown()
5551 &ppd->cpspec->qsfp_data; in qib_7322_ib_updown()
5557 /* on link down, ensure sane pcs state */ in qib_7322_ib_updown()
5561 if (ppd->dd->flags & QIB_HAS_QSFP) { in qib_7322_ib_updown()
5562 qd->t_insert = jiffies; in qib_7322_ib_updown()
5563 queue_work(ib_wq, &qd->work); in qib_7322_ib_updown()
5565 spin_lock_irqsave(&ppd->sdma_lock, flags); in qib_7322_ib_updown()
5569 spin_unlock_irqrestore(&ppd->sdma_lock, flags); in qib_7322_ib_updown()
5572 if (clr == ppd->cpspec->iblnkdownsnap) in qib_7322_ib_updown()
5573 ppd->cpspec->iblnkdowndelta++; in qib_7322_ib_updown()
5576 !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | in qib_7322_ib_updown()
5578 ppd->link_speed_active == QIB_IB_SDR && in qib_7322_ib_updown()
5579 (ppd->link_speed_enabled & QIB_IB_DDR) in qib_7322_ib_updown()
5580 && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) { in qib_7322_ib_updown()
5581 /* we are SDR, and auto-negotiation enabled */ in qib_7322_ib_updown()
5582 ++ppd->cpspec->autoneg_tries; in qib_7322_ib_updown()
5583 if (!ppd->cpspec->ibdeltainprog) { in qib_7322_ib_updown()
5584 ppd->cpspec->ibdeltainprog = 1; in qib_7322_ib_updown()
5585 ppd->cpspec->ibsymdelta += in qib_7322_ib_updown()
5587 crp_ibsymbolerr) - in qib_7322_ib_updown()
5588 ppd->cpspec->ibsymsnap; in qib_7322_ib_updown()
5589 ppd->cpspec->iblnkerrdelta += in qib_7322_ib_updown()
5591 crp_iblinkerrrecov) - in qib_7322_ib_updown()
5592 ppd->cpspec->iblnkerrsnap; in qib_7322_ib_updown()
5596 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && in qib_7322_ib_updown()
5597 ppd->link_speed_active == QIB_IB_SDR) { in qib_7322_ib_updown()
5603 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && in qib_7322_ib_updown()
5604 (ppd->link_speed_active & QIB_IB_DDR)) { in qib_7322_ib_updown()
5605 spin_lock_irqsave(&ppd->lflags_lock, flags); in qib_7322_ib_updown()
5606 ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG | in qib_7322_ib_updown()
5608 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in qib_7322_ib_updown()
5609 ppd->cpspec->autoneg_tries = 0; in qib_7322_ib_updown()
5610 /* re-enable SDR, for next link down */ in qib_7322_ib_updown()
5611 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); in qib_7322_ib_updown()
5612 wake_up(&ppd->cpspec->autoneg_wait); in qib_7322_ib_updown()
5614 } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) { in qib_7322_ib_updown()
5621 spin_lock_irqsave(&ppd->lflags_lock, flags); in qib_7322_ib_updown()
5622 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; in qib_7322_ib_updown()
5623 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in qib_7322_ib_updown()
5624 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK; in qib_7322_ib_updown()
5627 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { in qib_7322_ib_updown()
5629 if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10) in qib_7322_ib_updown()
5631 if (!ppd->cpspec->recovery_init) in qib_7322_ib_updown()
5633 ppd->cpspec->qdr_dfe_time = jiffies + in qib_7322_ib_updown()
5636 ppd->cpspec->ibmalfusesnap = 0; in qib_7322_ib_updown()
5637 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd, in qib_7322_ib_updown()
5641 ppd->cpspec->iblnkdownsnap = in qib_7322_ib_updown()
5643 if (ppd->cpspec->ibdeltainprog) { in qib_7322_ib_updown()
5644 ppd->cpspec->ibdeltainprog = 0; in qib_7322_ib_updown()
5645 ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd, in qib_7322_ib_updown()
5646 crp_ibsymbolerr) - ppd->cpspec->ibsymsnap; in qib_7322_ib_updown()
5647 ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd, in qib_7322_ib_updown()
5648 crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap; in qib_7322_ib_updown()
5651 !ppd->cpspec->ibdeltainprog && in qib_7322_ib_updown()
5652 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { in qib_7322_ib_updown()
5653 ppd->cpspec->ibdeltainprog = 1; in qib_7322_ib_updown()
5654 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, in qib_7322_ib_updown()
5656 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd, in qib_7322_ib_updown()
5669 * dir will end up in D48 of extctrl on existing chips).
5681 spin_lock_irqsave(&dd->cspec->gpio_lock, flags); in gpio_7322_mod()
5682 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); in gpio_7322_mod()
5683 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); in gpio_7322_mod()
5684 new_out = (dd->cspec->gpio_out & ~mask) | out; in gpio_7322_mod()
5686 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); in gpio_7322_mod()
5688 dd->cspec->gpio_out = new_out; in gpio_7322_mod()
5689 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); in gpio_7322_mod()
5693 * data on a pin whose direction line was set in the same in gpio_7322_mod()
5695 * that allows us to potentially combine a change on one pin with in gpio_7322_mod()
5696 * a read on another, and because the old code did something like in gpio_7322_mod()
5727 dd->palign = qib_read_kreg32(dd, kr_pagealign); in get_7322_chip_params()
5729 dd->uregbase = qib_read_kreg32(dd, kr_userregbase); in get_7322_chip_params()
5731 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); in get_7322_chip_params()
5732 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); in get_7322_chip_params()
5733 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); in get_7322_chip_params()
5734 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); in get_7322_chip_params()
5735 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; in get_7322_chip_params()
5738 dd->piobcnt2k = val & ~0U; in get_7322_chip_params()
5739 dd->piobcnt4k = val >> 32; in get_7322_chip_params()
5741 dd->piosize2k = val & ~0U; in get_7322_chip_params()
5742 dd->piosize4k = val >> 32; in get_7322_chip_params()
5745 if (mtu == -1) in get_7322_chip_params()
5747 dd->pport[0].ibmtu = (u32)mtu; in get_7322_chip_params()
5748 dd->pport[1].ibmtu = (u32)mtu; in get_7322_chip_params()
5751 dd->pio2kbase = (u32 __iomem *) in get_7322_chip_params()
5752 ((char __iomem *) dd->kregbase + dd->pio2k_bufbase); in get_7322_chip_params()
5753 dd->pio4kbase = (u32 __iomem *) in get_7322_chip_params()
5754 ((char __iomem *) dd->kregbase + in get_7322_chip_params()
5755 (dd->piobufbase >> 32)); in get_7322_chip_params()
5758 * paranoid; we calculate it once here, rather than on in get_7322_chip_params()
5761 dd->align4k = ALIGN(dd->piosize4k, dd->palign); in get_7322_chip_params()
5763 piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS; in get_7322_chip_params()
5765 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / in get_7322_chip_params()
5780 dd->cspec->cregbase = (u64 __iomem *)(cregbase + in qib_7322_set_baseaddrs()
5781 (char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5783 dd->egrtidbase = (u64 __iomem *) in qib_7322_set_baseaddrs()
5784 ((char __iomem *) dd->kregbase + dd->rcvegrbase); in qib_7322_set_baseaddrs()
5787 dd->pport[0].cpspec->kpregbase = in qib_7322_set_baseaddrs()
5788 (u64 __iomem *)((char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5789 dd->pport[1].cpspec->kpregbase = in qib_7322_set_baseaddrs()
5790 (u64 __iomem *)(dd->palign + in qib_7322_set_baseaddrs()
5791 (char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5792 dd->pport[0].cpspec->cpregbase = in qib_7322_set_baseaddrs()
5793 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0], in qib_7322_set_baseaddrs()
5794 kr_counterregbase) + (char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5795 dd->pport[1].cpspec->cpregbase = in qib_7322_set_baseaddrs()
5796 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1], in qib_7322_set_baseaddrs()
5797 kr_counterregbase) + (char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5801 * This is a fairly special-purpose observer, so we only support
5802 * the port-specific parts of SendCtrl
5829 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in sendctrl_hook()
5833 ppd = dd->pport + pidx; in sendctrl_hook()
5834 if (!ppd->cpspec->kpregbase) in sendctrl_hook()
5837 psptr = ppd->cpspec->kpregbase + krp_sendctrl; in sendctrl_hook()
5838 psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr); in sendctrl_hook()
5844 if (pidx >= dd->num_pports) in sendctrl_hook()
5854 spin_lock_irqsave(&dd->sendctrl_lock, flags); in sendctrl_hook()
5859 * reg or shadow. First-cut: read reg, and complain in sendctrl_hook()
5881 sval = ppd->p_sendctrl & ~mask; in sendctrl_hook()
5883 ppd->p_sendctrl = sval; in sendctrl_hook()
5890 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in sendctrl_hook()
5921 ppd = qd->ppd; in qsfp_7322_event()
5922 pwrup = qd->t_insert + in qsfp_7322_event()
5923 msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC); in qsfp_7322_event()
5929 ppd->cpspec->qsfp_data.modpresent = 0; in qsfp_7322_event()
5933 spin_lock_irqsave(&ppd->lflags_lock, flags); in qsfp_7322_event()
5934 ppd->lflags &= ~QIBL_LINKV; in qsfp_7322_event()
5935 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in qsfp_7322_event()
5938 * Some QSFP's not only do not respond until the full power-up in qsfp_7322_event()
5948 ret = qib_refresh_qsfp_cache(ppd, &qd->cache); in qsfp_7322_event()
5953 * even on failure to read cable information. We don't in qsfp_7322_event()
5956 if (!ret && !ppd->dd->cspec->r1) { in qsfp_7322_event()
5957 if (QSFP_IS_ACTIVE_FAR(qd->cache.tech)) in qsfp_7322_event()
5959 else if (qd->cache.atten[1] >= qib_long_atten && in qsfp_7322_event()
5960 QSFP_IS_CU(qd->cache.tech)) in qsfp_7322_event()
5974 /* The physical link is being re-enabled only when the in qsfp_7322_event()
5978 if (!ppd->cpspec->qsfp_data.modpresent && in qsfp_7322_event()
5979 (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) { in qsfp_7322_event()
5980 ppd->cpspec->qsfp_data.modpresent = 1; in qsfp_7322_event()
5983 spin_lock_irqsave(&ppd->lflags_lock, flags); in qsfp_7322_event()
5984 ppd->lflags |= QIBL_LINKV; in qsfp_7322_event()
5985 spin_unlock_irqrestore(&ppd->lflags_lock, flags); in qsfp_7322_event()
5997 struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data; in qib_init_7322_qsfp()
5998 struct qib_devdata *dd = ppd->dd; in qib_init_7322_qsfp()
6001 mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); in qib_init_7322_qsfp()
6002 qd->ppd = ppd; in qib_init_7322_qsfp()
6004 spin_lock_irqsave(&dd->cspec->gpio_lock, flags); in qib_init_7322_qsfp()
6005 dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert)); in qib_init_7322_qsfp()
6006 dd->cspec->gpio_mask |= mod_prs_bit; in qib_init_7322_qsfp()
6007 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); in qib_init_7322_qsfp()
6008 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); in qib_init_7322_qsfp()
6009 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); in qib_init_7322_qsfp()
6023 * one is the one that winds up set); if none at all, fall back on default.
6037 for (pidx = 0; pidx < dd->num_pports; ++pidx) in set_no_qsfp_atten()
6038 dd->pport[pidx].cpspec->no_eep = deflt; in set_no_qsfp_atten()
6079 for (pidx = 0; dd->unit == unit && pidx < dd->num_pports; in set_no_qsfp_atten()
6081 struct qib_pportdata *ppd = &dd->pport[pidx]; in set_no_qsfp_atten()
6083 if (ppd->port != port || !ppd->link_speed_supported) in set_no_qsfp_atten()
6085 ppd->cpspec->no_eep = val; in set_no_qsfp_atten()
6087 ppd->cpspec->h1_val = h1; in set_no_qsfp_atten()
6090 /* Re-enable the physical state machine on mezz boards in set_no_qsfp_atten()
6106 for (pidx = 0; pidx < dd->num_pports; ++pidx) in set_no_qsfp_atten()
6107 if (dd->pport[pidx].link_speed_supported) in set_no_qsfp_atten()
6108 init_txdds_table(&dd->pport[pidx], 0); in set_no_qsfp_atten()
6121 return -ENOSPC; in setup_txselect()
6128 return -EINVAL; in setup_txselect()
6130 strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1); in setup_txselect()
6133 if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) in setup_txselect()
6139 * Write the final few registers that depend on some of the
6148 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); in qib_late_7322_initreg()
6149 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); in qib_late_7322_initreg()
6150 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); in qib_late_7322_initreg()
6151 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); in qib_late_7322_initreg()
6153 if (val != dd->pioavailregs_phys) { in qib_late_7322_initreg()
6156 (unsigned long) dd->pioavailregs_phys, in qib_late_7322_initreg()
6158 ret = -EINVAL; in qib_late_7322_initreg()
6161 n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; in qib_late_7322_initreg()
6169 dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN; in qib_late_7322_initreg()
6170 qib_write_kreg(dd, kr_control, dd->control); in qib_late_7322_initreg()
6173 * QSFP handler on boards that have QSFP. in qib_late_7322_initreg()
6178 for (n = 0; n < dd->num_pports; ++n) { in qib_late_7322_initreg()
6179 struct qib_pportdata *ppd = dd->pport + n; in qib_late_7322_initreg()
6183 /* Initialize qsfp if present on board. */ in qib_late_7322_initreg()
6184 if (dd->flags & QIB_HAS_QSFP) in qib_late_7322_initreg()
6187 dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN; in qib_late_7322_initreg()
6188 qib_write_kreg(dd, kr_control, dd->control); in qib_late_7322_initreg()
6202 * Write the initialization per-port registers that need to be done at
6205 * Some of these should be redundant on reset, but play safe.
6212 if (!ppd->link_speed_supported) { in write_7322_init_portregs()
6217 qib_write_kreg(ppd->dd, kr_scratch, 0); in write_7322_init_portregs()
6223 * for flow control packet handling on unsupported VLs in write_7322_init_portregs()
6227 val |= (u64)(ppd->vls_supported - 1) << in write_7322_init_portregs()
6248 if (ppd->dd->cspec->r1) in write_7322_init_portregs()
6249 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate); in write_7322_init_portregs()
6253 * Write the initialization per-device registers that need to be done at
6255 * of other init procedures called from qib_init.c). Also write per-port
6257 * Some of these should be redundant on reset, but play safe.
6268 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in write_7322_initregs()
6272 if (dd->n_krcv_queues < 2 || in write_7322_initregs()
6273 !dd->pport[pidx].link_speed_supported) in write_7322_initregs()
6276 ppd = &dd->pport[pidx]; in write_7322_initregs()
6278 /* be paranoid against later code motion, etc. */ in write_7322_initregs()
6279 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); in write_7322_initregs()
6280 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable); in write_7322_initregs()
6281 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); in write_7322_initregs()
6286 if (dd->num_pports > 1) in write_7322_initregs()
6287 n = dd->first_user_ctxt / dd->num_pports; in write_7322_initregs()
6289 n = dd->first_user_ctxt - 1; in write_7322_initregs()
6293 if (dd->num_pports > 1) in write_7322_initregs()
6294 ctxt = (i % n) * dd->num_pports + pidx; in write_7322_initregs()
6298 ctxt = ppd->hw_pidx; in write_7322_initregs()
6316 for (i = 0; i < dd->first_user_ctxt; i++) { in write_7322_initregs()
6317 dd->cspec->rcvavail_timeout[i] = rcv_int_timeout; in write_7322_initregs()
6327 for (i = 0; i < dd->cfgctxts; i++) { in write_7322_initregs()
6339 if (dd->num_pports) in write_7322_initregs()
6340 setup_7322_link_recovery(dd->pport, dd->num_pports > 1); in write_7322_initregs()
6353 dd->pport = ppd; in qib_init_7322_variables()
6357 dd->cspec = (struct qib_chip_specific *)(ppd + 2); in qib_init_7322_variables()
6359 ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1); in qib_init_7322_variables()
6361 ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */ in qib_init_7322_variables()
6362 ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */ in qib_init_7322_variables()
6364 spin_lock_init(&dd->cspec->rcvmod_lock); in qib_init_7322_variables()
6365 spin_lock_init(&dd->cspec->gpio_lock); in qib_init_7322_variables()
6368 dd->revision = readq(&dd->kregbase[kr_revision]); in qib_init_7322_variables()
6370 if ((dd->revision & 0xffffffffU) == 0xffffffffU) { in qib_init_7322_variables()
6373 ret = -ENODEV; in qib_init_7322_variables()
6376 dd->flags |= QIB_PRESENT; /* now register routines work */ in qib_init_7322_variables()
6378 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor); in qib_init_7322_variables()
6379 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor); in qib_init_7322_variables()
6380 dd->cspec->r1 = dd->minrev == 1; in qib_init_7322_variables()
6386 sbufcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; in qib_init_7322_variables()
6388 dd->cspec->sendchkenable = bitmap_zalloc(sbufcnt, GFP_KERNEL); in qib_init_7322_variables()
6389 dd->cspec->sendgrhchk = bitmap_zalloc(sbufcnt, GFP_KERNEL); in qib_init_7322_variables()
6390 dd->cspec->sendibchk = bitmap_zalloc(sbufcnt, GFP_KERNEL); in qib_init_7322_variables()
6391 if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk || in qib_init_7322_variables()
6392 !dd->cspec->sendibchk) { in qib_init_7322_variables()
6393 ret = -ENOMEM; in qib_init_7322_variables()
6397 ppd = dd->pport; in qib_init_7322_variables()
6403 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; in qib_init_7322_variables()
6404 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; in qib_init_7322_variables()
6405 dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV; in qib_init_7322_variables()
6407 dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY | in qib_init_7322_variables()
6411 dd->flags |= qib_special_trigger ? in qib_init_7322_variables()
6421 if (mtu == -1) in qib_init_7322_variables()
6424 dd->cspec->int_enable_mask = QIB_I_BITSEXTANT; in qib_init_7322_variables()
6426 dd->cspec->hwerrmask = ~0ULL; in qib_init_7322_variables()
6429 dd->cspec->hwerrmask &= in qib_init_7322_variables()
6435 struct qib_chippport_specific *cp = ppd->cpspec; in qib_init_7322_variables()
6437 ppd->link_speed_supported = features & PORT_SPD_CAP; in qib_init_7322_variables()
6439 if (!ppd->link_speed_supported) { in qib_init_7322_variables()
6441 dd->skip_kctxt_mask |= 1 << pidx; in qib_init_7322_variables()
6447 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, in qib_init_7322_variables()
6451 dd->cspec->int_enable_mask &= ~( in qib_init_7322_variables()
6462 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, in qib_init_7322_variables()
6466 dd->cspec->int_enable_mask &= ~( in qib_init_7322_variables()
6477 dd->num_pports++; in qib_init_7322_variables()
6478 ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports); in qib_init_7322_variables()
6480 dd->num_pports--; in qib_init_7322_variables()
6484 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; in qib_init_7322_variables()
6485 ppd->link_width_enabled = IB_WIDTH_4X; in qib_init_7322_variables()
6486 ppd->link_speed_enabled = ppd->link_speed_supported; in qib_init_7322_variables()
6491 ppd->link_width_active = IB_WIDTH_4X; in qib_init_7322_variables()
6492 ppd->link_speed_active = QIB_IB_SDR; in qib_init_7322_variables()
6493 ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS]; in qib_init_7322_variables()
6496 ppd->vls_supported = IB_VL_VL0; in qib_init_7322_variables()
6499 ppd->vls_supported = IB_VL_VL0_1; in qib_init_7322_variables()
6502 qib_devinfo(dd->pcidev, in qib_init_7322_variables()
6508 ppd->vls_supported = IB_VL_VL0_3; in qib_init_7322_variables()
6512 ppd->vls_supported = IB_VL_VL0_7; in qib_init_7322_variables()
6514 qib_devinfo(dd->pcidev, in qib_init_7322_variables()
6517 ppd->vls_supported = IB_VL_VL0_3; in qib_init_7322_variables()
6522 ppd->vls_operational = ppd->vls_supported; in qib_init_7322_variables()
6524 init_waitqueue_head(&cp->autoneg_wait); in qib_init_7322_variables()
6525 INIT_DELAYED_WORK(&cp->autoneg_work, in qib_init_7322_variables()
6527 if (ppd->dd->cspec->r1) in qib_init_7322_variables()
6528 INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work); in qib_init_7322_variables()
6533 * in adapter-specific routines. in qib_init_7322_variables()
6535 if (!(dd->flags & QIB_HAS_QSFP)) { in qib_init_7322_variables()
6537 qib_devinfo(dd->pcidev, in qib_init_7322_variables()
6539 dd->unit, ppd->port); in qib_init_7322_variables()
6540 cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; in qib_init_7322_variables()
6545 ppd->cpspec->no_eep = IS_QMH(dd) ? in qib_init_7322_variables()
6548 cp->h1_val = H1_FORCE_VAL; in qib_init_7322_variables()
6554 timer_setup(&cp->chase_timer, reenable_chase, 0); in qib_init_7322_variables()
6559 dd->rcvhdrentsize = qib_rcvhdrentsize ? in qib_init_7322_variables()
6561 dd->rcvhdrsize = qib_rcvhdrsize ? in qib_init_7322_variables()
6563 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); in qib_init_7322_variables()
6566 dd->rcvegrbufsize = max(mtu, 2048); in qib_init_7322_variables()
6567 dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); in qib_init_7322_variables()
6575 dd->rhdrhead_intr_off = in qib_init_7322_variables()
6579 timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0); in qib_init_7322_variables()
6581 dd->ureg_align = 0x10000; /* 64KB alignment */ in qib_init_7322_variables()
6583 dd->piosize2kmax_dwords = dd->piosize2k >> 2; in qib_init_7322_variables()
6589 * We do not set WC on the VL15 buffers to avoid in qib_init_7322_variables()
6591 * interrupt-flushed store buffers, so we need in qib_init_7322_variables()
6600 vl15off = dd->physaddr + (dd->piobufbase >> 32) + in qib_init_7322_variables()
6601 dd->piobcnt4k * dd->align4k; in qib_init_7322_variables()
6602 dd->piovl15base = ioremap(vl15off, in qib_init_7322_variables()
6603 NUM_VL15_BUFS * dd->align4k); in qib_init_7322_variables()
6604 if (!dd->piovl15base) { in qib_init_7322_variables()
6605 ret = -ENOMEM; in qib_init_7322_variables()
6614 if (!dd->num_pports) { in qib_init_7322_variables()
6635 if (dd->flags & QIB_HAS_SEND_DMA) { in qib_init_7322_variables()
6636 dd->cspec->sdmabufcnt = dd->piobcnt4k; in qib_init_7322_variables()
6639 dd->cspec->sdmabufcnt = 0; in qib_init_7322_variables()
6640 sbufs = dd->piobcnt4k; in qib_init_7322_variables()
6642 dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k - in qib_init_7322_variables()
6643 dd->cspec->sdmabufcnt; in qib_init_7322_variables()
6644 dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; in qib_init_7322_variables()
6645 dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ in qib_init_7322_variables()
6646 dd->last_pio = dd->cspec->lastbuf_for_pio; in qib_init_7322_variables()
6647 dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ? in qib_init_7322_variables()
6648 dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0; in qib_init_7322_variables()
6656 if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh) in qib_init_7322_variables()
6657 updthresh = dd->pbufsctxt - 2; in qib_init_7322_variables()
6658 dd->cspec->updthresh_dflt = updthresh; in qib_init_7322_variables()
6659 dd->cspec->updthresh = updthresh; in qib_init_7322_variables()
6662 dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) in qib_init_7322_variables()
6666 dd->psxmitwait_supported = 1; in qib_init_7322_variables()
6667 dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE; in qib_init_7322_variables()
6669 if (!dd->ctxtcnt) in qib_init_7322_variables()
6670 dd->ctxtcnt = 1; /* for other initialization code */ in qib_init_7322_variables()
6679 struct qib_devdata *dd = ppd->dd; in qib_7322_getsendbuf()
6683 first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx; in qib_7322_getsendbuf()
6686 if ((plen + 1) > dd->piosize2kmax_dwords) in qib_7322_getsendbuf()
6687 first = dd->piobcnt2k; in qib_7322_getsendbuf()
6690 last = dd->cspec->lastbuf_for_pio; in qib_7322_getsendbuf()
6718 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6722 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6726 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6732 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6736 /* get bufuse bits, clear them, and print them again if non-zero */ in dump_sdma_7322_state()
6744 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6751 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6756 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6760 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6764 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6768 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6772 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6776 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6780 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6784 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6839 ppd->sdma_state.set_state_action = sdma_7322_action_table; in qib_7322_sdma_init_early()
6844 struct qib_devdata *dd = ppd->dd; in init_sdma_7322_regs()
6849 qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys); in init_sdma_7322_regs()
6854 qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys); in init_sdma_7322_regs()
6856 if (dd->num_pports) in init_sdma_7322_regs()
6857 n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */ in init_sdma_7322_regs()
6859 n = dd->cspec->sdmabufcnt; /* failsafe for init */ in init_sdma_7322_regs()
6860 erstbuf = (dd->piobcnt2k + dd->piobcnt4k) - in init_sdma_7322_regs()
6861 ((dd->num_pports == 1 || ppd->port == 2) ? n : in init_sdma_7322_regs()
6862 dd->cspec->sdmabufcnt); in init_sdma_7322_regs()
6865 ppd->sdma_state.first_sendbuf = erstbuf; in init_sdma_7322_regs()
6866 ppd->sdma_state.last_sendbuf = lastbuf; in init_sdma_7322_regs()
6869 unsigned bit = erstbuf & (BITS_PER_LONG - 1); in init_sdma_7322_regs()
6882 struct qib_devdata *dd = ppd->dd; in qib_sdma_7322_gethead()
6891 (dd->flags & QIB_HAS_SDMA_TIMEOUT); in qib_sdma_7322_gethead()
6894 (u16) le64_to_cpu(*ppd->sdma_head_dma) : in qib_sdma_7322_gethead()
6897 swhead = ppd->sdma_descq_head; in qib_sdma_7322_gethead()
6898 swtail = ppd->sdma_descq_tail; in qib_sdma_7322_gethead()
6899 cnt = ppd->sdma_descq_cnt; in qib_sdma_7322_gethead()
6939 * based on the length of the this packet.
6944 u8 snd_mult = ppd->delay_mult; in qib_7322_setpbc_control()
6955 ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB; in qib_7322_setpbc_control()
6961 * Enable the per-port VL15 send buffers for use.
6971 vl15bufs = dd->piobcnt2k + dd->piobcnt4k; in qib_7322_initvl15_bufs()
6978 if (rcd->ctxt < NUM_IB_PORTS) { in qib_7322_init_ctxt()
6979 if (rcd->dd->num_pports > 1) { in qib_7322_init_ctxt()
6980 rcd->rcvegrcnt = KCTXT0_EGRCNT / 2; in qib_7322_init_ctxt()
6981 rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0; in qib_7322_init_ctxt()
6983 rcd->rcvegrcnt = KCTXT0_EGRCNT; in qib_7322_init_ctxt()
6984 rcd->rcvegr_tid_base = 0; in qib_7322_init_ctxt()
6987 rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt; in qib_7322_init_ctxt()
6988 rcd->rcvegr_tid_base = KCTXT0_EGRCNT + in qib_7322_init_ctxt()
6989 (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt; in qib_7322_init_ctxt()
6998 const int last = start + len - 1; in qib_7322_txchk_change()
7006 int cstart, previ = -1; in qib_7322_txchk_change()
7021 le64_to_cpu(dd->pioavailregs_dma[i]); in qib_7322_txchk_change()
7036 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_7322_txchk_change()
7044 * disable checking on a range; used by diags; just in qib_7322_txchk_change()
7048 clear_bit(i, dd->cspec->sendchkenable); in qib_7322_txchk_change()
7053 * (re)enable checking on a range; used by diags; just in qib_7322_txchk_change()
7060 set_bit(i, dd->cspec->sendchkenable); in qib_7322_txchk_change()
7066 set_bit(i, dd->cspec->sendibchk); in qib_7322_txchk_change()
7067 clear_bit(i, dd->cspec->sendgrhchk); in qib_7322_txchk_change()
7069 spin_lock_irqsave(&dd->uctxt_lock, flags); in qib_7322_txchk_change()
7071 for (i = dd->first_user_ctxt; in qib_7322_txchk_change()
7072 dd->cspec->updthresh != dd->cspec->updthresh_dflt in qib_7322_txchk_change()
7073 && i < dd->cfgctxts; i++) in qib_7322_txchk_change()
7074 if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt && in qib_7322_txchk_change()
7075 ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1) in qib_7322_txchk_change()
7076 < dd->cspec->updthresh_dflt) in qib_7322_txchk_change()
7078 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_7322_txchk_change()
7079 if (i == dd->cfgctxts) { in qib_7322_txchk_change()
7080 spin_lock_irqsave(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7081 dd->cspec->updthresh = dd->cspec->updthresh_dflt; in qib_7322_txchk_change()
7082 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); in qib_7322_txchk_change()
7083 dd->sendctrl |= (dd->cspec->updthresh & in qib_7322_txchk_change()
7086 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7087 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_7322_txchk_change()
7094 clear_bit(i, dd->cspec->sendibchk); in qib_7322_txchk_change()
7095 set_bit(i, dd->cspec->sendgrhchk); in qib_7322_txchk_change()
7097 spin_lock_irqsave(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7098 if (rcd && rcd->subctxt_cnt && ((rcd->piocnt in qib_7322_txchk_change()
7099 / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) { in qib_7322_txchk_change()
7100 dd->cspec->updthresh = (rcd->piocnt / in qib_7322_txchk_change()
7101 rcd->subctxt_cnt) - 1; in qib_7322_txchk_change()
7102 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); in qib_7322_txchk_change()
7103 dd->sendctrl |= (dd->cspec->updthresh & in qib_7322_txchk_change()
7106 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7107 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_7322_txchk_change()
7109 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7118 dd->cspec->sendchkenable[i]); in qib_7322_txchk_change()
7122 dd->cspec->sendgrhchk[i]); in qib_7322_txchk_change()
7124 dd->cspec->sendibchk[i]); in qib_7322_txchk_change()
7144 return -ENXIO; in qib_7322_tempsense_rd()
7148 * qib_init_iba7322_funcs - set up the chip-specific function pointers
7156 * chip-specific function pointers for later use.
7172 dd->f_bringup_serdes = qib_7322_bringup_serdes; in qib_init_iba7322_funcs()
7173 dd->f_cleanup = qib_setup_7322_cleanup; in qib_init_iba7322_funcs()
7174 dd->f_clear_tids = qib_7322_clear_tids; in qib_init_iba7322_funcs()
7175 dd->f_free_irq = qib_7322_free_irq; in qib_init_iba7322_funcs()
7176 dd->f_get_base_info = qib_7322_get_base_info; in qib_init_iba7322_funcs()
7177 dd->f_get_msgheader = qib_7322_get_msgheader; in qib_init_iba7322_funcs()
7178 dd->f_getsendbuf = qib_7322_getsendbuf; in qib_init_iba7322_funcs()
7179 dd->f_gpio_mod = gpio_7322_mod; in qib_init_iba7322_funcs()
7180 dd->f_eeprom_wen = qib_7322_eeprom_wen; in qib_init_iba7322_funcs()
7181 dd->f_hdrqempty = qib_7322_hdrqempty; in qib_init_iba7322_funcs()
7182 dd->f_ib_updown = qib_7322_ib_updown; in qib_init_iba7322_funcs()
7183 dd->f_init_ctxt = qib_7322_init_ctxt; in qib_init_iba7322_funcs()
7184 dd->f_initvl15_bufs = qib_7322_initvl15_bufs; in qib_init_iba7322_funcs()
7185 dd->f_intr_fallback = qib_7322_intr_fallback; in qib_init_iba7322_funcs()
7186 dd->f_late_initreg = qib_late_7322_initreg; in qib_init_iba7322_funcs()
7187 dd->f_setpbc_control = qib_7322_setpbc_control; in qib_init_iba7322_funcs()
7188 dd->f_portcntr = qib_portcntr_7322; in qib_init_iba7322_funcs()
7189 dd->f_put_tid = qib_7322_put_tid; in qib_init_iba7322_funcs()
7190 dd->f_quiet_serdes = qib_7322_mini_quiet_serdes; in qib_init_iba7322_funcs()
7191 dd->f_rcvctrl = rcvctrl_7322_mod; in qib_init_iba7322_funcs()
7192 dd->f_read_cntrs = qib_read_7322cntrs; in qib_init_iba7322_funcs()
7193 dd->f_read_portcntrs = qib_read_7322portcntrs; in qib_init_iba7322_funcs()
7194 dd->f_reset = qib_do_7322_reset; in qib_init_iba7322_funcs()
7195 dd->f_init_sdma_regs = init_sdma_7322_regs; in qib_init_iba7322_funcs()
7196 dd->f_sdma_busy = qib_sdma_7322_busy; in qib_init_iba7322_funcs()
7197 dd->f_sdma_gethead = qib_sdma_7322_gethead; in qib_init_iba7322_funcs()
7198 dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl; in qib_init_iba7322_funcs()
7199 dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt; in qib_init_iba7322_funcs()
7200 dd->f_sdma_update_tail = qib_sdma_update_7322_tail; in qib_init_iba7322_funcs()
7201 dd->f_sendctrl = sendctrl_7322_mod; in qib_init_iba7322_funcs()
7202 dd->f_set_armlaunch = qib_set_7322_armlaunch; in qib_init_iba7322_funcs()
7203 dd->f_set_cntr_sample = qib_set_cntr_7322_sample; in qib_init_iba7322_funcs()
7204 dd->f_iblink_state = qib_7322_iblink_state; in qib_init_iba7322_funcs()
7205 dd->f_ibphys_portstate = qib_7322_phys_portstate; in qib_init_iba7322_funcs()
7206 dd->f_get_ib_cfg = qib_7322_get_ib_cfg; in qib_init_iba7322_funcs()
7207 dd->f_set_ib_cfg = qib_7322_set_ib_cfg; in qib_init_iba7322_funcs()
7208 dd->f_set_ib_loopback = qib_7322_set_loopback; in qib_init_iba7322_funcs()
7209 dd->f_get_ib_table = qib_7322_get_ib_table; in qib_init_iba7322_funcs()
7210 dd->f_set_ib_table = qib_7322_set_ib_table; in qib_init_iba7322_funcs()
7211 dd->f_set_intr_state = qib_7322_set_intr_state; in qib_init_iba7322_funcs()
7212 dd->f_setextled = qib_setup_7322_setextled; in qib_init_iba7322_funcs()
7213 dd->f_txchk_change = qib_7322_txchk_change; in qib_init_iba7322_funcs()
7214 dd->f_update_usrhead = qib_update_7322_usrhead; in qib_init_iba7322_funcs()
7215 dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr; in qib_init_iba7322_funcs()
7216 dd->f_xgxs_reset = qib_7322_mini_pcs_reset; in qib_init_iba7322_funcs()
7217 dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up; in qib_init_iba7322_funcs()
7218 dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up; in qib_init_iba7322_funcs()
7219 dd->f_sdma_init_early = qib_7322_sdma_init_early; in qib_init_iba7322_funcs()
7220 dd->f_writescratch = writescratch; in qib_init_iba7322_funcs()
7221 dd->f_tempsense_rd = qib_7322_tempsense_rd; in qib_init_iba7322_funcs()
7223 dd->f_notify_dca = qib_7322_notify_dca; in qib_init_iba7322_funcs()
7228 * On return, we have the chip mapped, but chip registers in qib_init_iba7322_funcs()
7235 /* initialize chip-specific variables */ in qib_init_iba7322_funcs()
7240 if (qib_mini_init || !dd->num_pports) in qib_init_iba7322_funcs()
7244 * Determine number of vectors we want; depends on port count in qib_init_iba7322_funcs()
7246 * Should also depend on whether sdma is enabled or not, but in qib_init_iba7322_funcs()
7249 tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table); in qib_init_iba7322_funcs()
7252 irq_table[i].port <= dd->num_pports) || in qib_init_iba7322_funcs()
7254 dd->rcd[i - ARRAY_SIZE(irq_table)])) in qib_init_iba7322_funcs()
7258 actual_cnt -= dd->num_pports; in qib_init_iba7322_funcs()
7261 dd->cspec->msix_entries = kcalloc(tabsize, in qib_init_iba7322_funcs()
7264 if (!dd->cspec->msix_entries) in qib_init_iba7322_funcs()
7271 dd->cspec->num_msix_entries = tabsize; in qib_init_iba7322_funcs()
7279 if (!dca_add_requester(&pdev->dev)) { in qib_init_iba7322_funcs()
7280 qib_devinfo(dd->pcidev, "DCA enabled\n"); in qib_init_iba7322_funcs()
7281 dd->flags |= QIB_DCA_ENABLED; in qib_init_iba7322_funcs()
7317 struct qib_devdata *dd = ppd->dd; in set_txdds()
7321 /* Get correct offset in chip-space, and in source table */ in set_txdds()
7328 if (ppd->hw_pidx) in set_txdds()
7329 regidx += (dd->palign / sizeof(u64)); in set_txdds()
7331 pack_ent = tp->amp << DDS_ENT_AMP_LSB; in set_txdds()
7332 pack_ent |= tp->main << DDS_ENT_MAIN_LSB; in set_txdds()
7333 pack_ent |= tp->pre << DDS_ENT_PRE_LSB; in set_txdds()
7334 pack_ent |= tp->post << DDS_ENT_POST_LSB; in set_txdds()
7336 /* Prevent back-to-back writes by hitting scratch */ in set_txdds()
7337 qib_write_kreg(ppd->dd, kr_scratch, 0); in set_txdds()
7350 { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7354 { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7362 { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
7366 { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
7370 { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
7374 { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
7378 { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
7382 { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
7386 { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
7390 { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
7418 { 0x00, 0x09, 0x3A }, "74763-0025 ",
7422 { 0x00, 0x09, 0x3A }, "74757-2201 ",
7575 atten = TXDDS_TABLE_SZ - 1; in get_atten_table()
7577 atten--; in get_atten_table()
7590 struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache; in find_best_ent()
7597 if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) && in find_best_ent()
7598 (!v->partnum || in find_best_ent()
7599 !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) { in find_best_ent()
7600 *sdr_dds = &v->sdr; in find_best_ent()
7601 *ddr_dds = &v->ddr; in find_best_ent()
7602 *qdr_dds = &v->qdr; in find_best_ent()
7609 if (!override && QSFP_IS_ACTIVE(qd->tech)) { in find_best_ent()
7610 *sdr_dds = txdds_sdr + ppd->dd->board_atten; in find_best_ent()
7611 *ddr_dds = txdds_ddr + ppd->dd->board_atten; in find_best_ent()
7612 *qdr_dds = txdds_qdr + ppd->dd->board_atten; in find_best_ent()
7616 if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] || in find_best_ent()
7617 qd->atten[1])) { in find_best_ent()
7618 *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]); in find_best_ent()
7619 *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]); in find_best_ent()
7620 *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]); in find_best_ent()
7622 } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) { in find_best_ent()
7629 idx = ppd->cpspec->no_eep; in find_best_ent()
7633 } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { in find_best_ent()
7635 idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ; in find_best_ent()
7639 } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) && in find_best_ent()
7640 ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + in find_best_ent()
7642 idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); in find_best_ent()
7644 ppd->dd->unit, ppd->port, idx); in find_best_ent()
7666 if (!(ppd->dd->flags & QIB_HAS_QSFP) || override) in init_txdds_table()
7673 if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | in init_txdds_table()
7675 dds = (struct txdds_ent *)(ppd->link_speed_active == in init_txdds_table()
7677 (ppd->link_speed_active == in init_txdds_table()
7714 /* From this point on, make sure we return access */ in ahb_mod()
7731 sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1; in ahb_mod()
7747 /* Re-read in case host split reads and read data first */ in ahb_mod()
7780 struct qib_devdata *dd = ppd->dd; in ibsd_wr_allchans()
7784 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr, in ibsd_wr_allchans()
7786 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr, in ibsd_wr_allchans()
7797 pr_info("IB%u:%u Turning LOS on\n", in serdes_7322_los_enable()
7798 ppd->dd->unit, ppd->port); in serdes_7322_los_enable()
7802 ppd->dd->unit, ppd->port); in serdes_7322_los_enable()
7812 if (ppd->dd->cspec->r1) in serdes_7322_init()
7844 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; in serdes_7322_init_old()
7848 le_val = IS_QME(ppd->dd) ? 0 : 1; in serdes_7322_init_old()
7851 /* Clear cmode-override, may be set from older driver */ in serdes_7322_init_old()
7852 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); in serdes_7322_init_old()
7858 /* LoS filter threshold_count on, ch 0-3, set to 8 */ in serdes_7322_init_old()
7859 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); in serdes_7322_init_old()
7860 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); in serdes_7322_init_old()
7861 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); in serdes_7322_init_old()
7862 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); in serdes_7322_init_old()
7864 /* LoS filter threshold_count off, ch 0-3, set to 4 */ in serdes_7322_init_old()
7865 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); in serdes_7322_init_old()
7866 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); in serdes_7322_init_old()
7867 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); in serdes_7322_init_old()
7868 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); in serdes_7322_init_old()
7871 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); in serdes_7322_init_old()
7887 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; in serdes_7322_init_old()
7892 * always on, and QDR is initially enabled; later disabled. in serdes_7322_init_old()
7897 ppd->dd->cspec->r1 ? in serdes_7322_init_old()
7899 ppd->cpspec->qdr_dfe_on = 1; in serdes_7322_init_old()
7907 if (!ppd->dd->cspec->r1) { in serdes_7322_init_old()
7922 int chan, chan_done = (1 << SERDES_CHANS) - 1; in serdes_7322_init_new()
7924 /* Clear cmode-override, may be set from older driver */ in serdes_7322_init_new()
7925 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); in serdes_7322_init_new()
7933 /* Reset - Calibration Setup */ in serdes_7322_init_new()
7962 /* DFE Bandwidth [2:14-12] */ in serdes_7322_init_new()
7967 if (!ppd->dd->cspec->r1) { in serdes_7322_init_new()
7973 /* Baseline Wander Correction Gain [13:4-0] (leave as default) */ in serdes_7322_init_new()
7974 /* Baseline Wander Correction Gain [3:7-5] (leave as default) */ in serdes_7322_init_new()
7975 /* Data Rate Select [5:7-6] (leave as default) */ in serdes_7322_init_new()
7976 /* RX Parallel Word Width [3:10-8] (leave as default) */ in serdes_7322_init_new()
7979 /* Single- or Multi-channel reset */ in serdes_7322_init_new()
7992 /* LoS filter threshold_count on, ch 0-3, set to 8 */ in serdes_7322_init_new()
7993 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); in serdes_7322_init_new()
7994 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); in serdes_7322_init_new()
7995 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); in serdes_7322_init_new()
7996 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); in serdes_7322_init_new()
7998 /* LoS filter threshold_count off, ch 0-3, set to 4 */ in serdes_7322_init_new()
7999 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); in serdes_7322_init_new()
8000 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); in serdes_7322_init_new()
8001 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); in serdes_7322_init_new()
8002 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); in serdes_7322_init_new()
8005 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); in serdes_7322_init_new()
8012 /* Turn on LOS on initial SERDES init */ in serdes_7322_init_new()
8029 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), in serdes_7322_init_new()
8039 IBSD(ppd->hw_pidx), chan_done); in serdes_7322_init_new()
8042 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), in serdes_7322_init_new()
8047 IBSD(ppd->hw_pidx), chan); in serdes_7322_init_new()
8057 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; in serdes_7322_init_new()
8067 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; in serdes_7322_init_new()
8083 * always on, and QDR is initially enabled; later disabled. in serdes_7322_init_new()
8088 ppd->dd->cspec->r1 ? in serdes_7322_init_new()
8090 ppd->cpspec->qdr_dfe_on = 1; in serdes_7322_init_new()
8118 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in set_man_code()
8126 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in set_man_mode_h1()
8129 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in set_man_mode_h1()
8136 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in clock_man()
8138 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in clock_man()
8140 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in clock_man()
8142 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in clock_man()
8165 deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, in write_tx_serdes_param()
8168 deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, in write_tx_serdes_param()
8171 deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, in write_tx_serdes_param()
8174 deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, in write_tx_serdes_param()
8181 * Set the parameters for mez cards on link bounce, so they are
8191 dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ? in adj_tx_serdes()
8192 qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ? in adj_tx_serdes()
8202 ppd->cpspec->qdr_reforce = 0; in force_h1()
8203 if (!ppd->dd->cspec->r1) in force_h1()
8208 set_man_code(ppd, chan, ppd->cpspec->h1_val); in force_h1()
8247 return -1; in qib_r_wait_for_rdy()
8311 #define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8402 struct qib_devdata *dd = ppd->dd; in setup_7322_link_recovery()
8404 if (!ppd->dd->cspec->r1) in setup_7322_link_recovery()
8407 dd->cspec->recovery_ports_initted++; in setup_7322_link_recovery()
8408 ppd->cpspec->recovery_init = 1; in setup_7322_link_recovery()
8410 if (!both && dd->cspec->recovery_ports_initted == 1) { in setup_7322_link_recovery()
8411 portsel = ppd->port == 1 ? portsel_port1 : portsel_port2; in setup_7322_link_recovery()
8435 struct qib_devdata *dd = ppd->dd; in check_7322_rxe_status()
8438 if (dd->cspec->recovery_ports_initted != 1) in check_7322_rxe_status()
8440 qib_write_kreg(dd, kr_control, dd->control | in check_7322_rxe_status()
8451 ppd->dd->cspec->stay_in_freeze = 1; in check_7322_rxe_status()
8452 qib_7322_set_intr_state(ppd->dd, 0); in check_7322_rxe_status()
8458 qib_write_kreg(ppd->dd, kr_hwerrclear, in check_7322_rxe_status()
8462 qib_write_kreg(dd, kr_control, dd->control); in check_7322_rxe_status()
8465 if (ppd->link_speed_supported) { in check_7322_rxe_status()
8466 ppd->cpspec->ibcctrl_a &= in check_7322_rxe_status()
8469 ppd->cpspec->ibcctrl_a); in check_7322_rxe_status()
8471 if (ppd->lflags & QIBL_IB_LINK_DISABLED) in check_7322_rxe_status()