Lines Matching full:dd
162 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ argument
164 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ argument
758 static inline void qib_write_kreg(const struct qib_devdata *dd,
767 static void qib_setup_dca(struct qib_devdata *dd);
768 static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
769 static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
774 * @dd: device
782 static inline u32 qib_read_ureg32(const struct qib_devdata *dd, in qib_read_ureg32() argument
785 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_ureg32()
788 (dd->ureg_align * ctxt) + (dd->userbase ? in qib_read_ureg32()
789 (char __iomem *)dd->userbase : in qib_read_ureg32()
790 (char __iomem *)dd->kregbase + dd->uregbase))); in qib_read_ureg32()
795 * @dd: device
802 static inline void qib_write_ureg(const struct qib_devdata *dd, in qib_write_ureg() argument
807 if (dd->userbase) in qib_write_ureg()
809 ((char __iomem *) dd->userbase + in qib_write_ureg()
810 dd->ureg_align * ctxt); in qib_write_ureg()
813 (dd->uregbase + in qib_write_ureg()
814 (char __iomem *) dd->kregbase + in qib_write_ureg()
815 dd->ureg_align * ctxt); in qib_write_ureg()
817 if (dd->kregbase && (dd->flags & QIB_PRESENT)) in qib_write_ureg()
821 static inline u32 qib_read_kreg32(const struct qib_devdata *dd, in qib_read_kreg32() argument
824 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_kreg32()
826 return readl((u32 __iomem *) &dd->kregbase[regno]); in qib_read_kreg32()
829 static inline u64 qib_read_kreg64(const struct qib_devdata *dd, in qib_read_kreg64() argument
832 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_kreg64()
834 return readq(&dd->kregbase[regno]); in qib_read_kreg64()
837 static inline void qib_write_kreg(const struct qib_devdata *dd, in qib_write_kreg() argument
840 if (dd->kregbase && (dd->flags & QIB_PRESENT)) in qib_write_kreg()
841 writeq(value, &dd->kregbase[regno]); in qib_write_kreg()
851 if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT)) in qib_read_kreg_port()
859 if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase && in qib_write_kreg_port()
860 (ppd->dd->flags & QIB_PRESENT)) in qib_write_kreg_port()
866 * @dd: the qlogic_ib device
871 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd, in qib_write_kreg_ctxt() argument
875 qib_write_kreg(dd, regno + ctxt, value); in qib_write_kreg_ctxt()
878 static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno) in read_7322_creg() argument
880 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) in read_7322_creg()
882 return readq(&dd->cspec->cregbase[regno]); in read_7322_creg()
887 static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno) in read_7322_creg32() argument
889 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) in read_7322_creg32()
891 return readl(&dd->cspec->cregbase[regno]); in read_7322_creg32()
900 (ppd->dd->flags & QIB_PRESENT)) in write_7322_creg_port()
908 !(ppd->dd->flags & QIB_PRESENT)) in read_7322_creg_port()
917 !(ppd->dd->flags & QIB_PRESENT)) in read_7322_creg32_port()
1319 struct qib_devdata *dd = ppd->dd; in qib_disarm_7322_senderrbufs() local
1322 u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; in qib_disarm_7322_senderrbufs()
1332 sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i); in qib_disarm_7322_senderrbufs()
1335 qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]); in qib_disarm_7322_senderrbufs()
1340 qib_disarm_piobufs_set(dd, sbuf, piobcnt); in qib_disarm_7322_senderrbufs()
1396 struct qib_devdata *dd = ppd->dd; in flush_fifo() local
1427 if (dd->flags & QIB_PIO_FLUSH_WC) { in flush_fifo()
1435 qib_sendbuf_done(dd, bufn); in flush_fifo()
1443 struct qib_devdata *dd = ppd->dd; in qib_7322_sdma_sendctrl() local
1471 spin_lock(&dd->sendctrl_lock); in qib_7322_sdma_sendctrl()
1477 qib_write_kreg(dd, kr_scratch, 0); in qib_7322_sdma_sendctrl()
1489 qib_write_kreg(dd, kr_scratch, 0); in qib_7322_sdma_sendctrl()
1494 qib_write_kreg(dd, kr_scratch, 0); in qib_7322_sdma_sendctrl()
1497 spin_unlock(&dd->sendctrl_lock); in qib_7322_sdma_sendctrl()
1499 if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1) in qib_7322_sdma_sendctrl()
1567 struct qib_devdata *dd = ppd->dd; in sdma_7322_p_errors() local
1574 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, in sdma_7322_p_errors()
1581 qib_dev_porterr(dd, ppd->port, in sdma_7322_p_errors()
1628 static noinline void handle_7322_errors(struct qib_devdata *dd) in handle_7322_errors() argument
1636 errs = qib_read_kreg64(dd, kr_errstatus); in handle_7322_errors()
1638 qib_devinfo(dd->pcidev, in handle_7322_errors()
1644 errs &= dd->cspec->errormask; in handle_7322_errors()
1645 msg = dd->cspec->emsgbuf; in handle_7322_errors()
1650 qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf)); in handle_7322_errors()
1654 qib_disarm_7322_senderrbufs(dd->pport); in handle_7322_errors()
1660 qib_disarm_7322_senderrbufs(dd->pport); in handle_7322_errors()
1662 qib_write_kreg(dd, kr_errclear, errs); in handle_7322_errors()
1672 err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask, in handle_7322_errors()
1682 qib_dev_err(dd, in handle_7322_errors()
1684 dd->flags &= ~QIB_INITTED; /* needs re-init */ in handle_7322_errors()
1686 *dd->devstatusp |= QIB_STATUS_HWERROR; in handle_7322_errors()
1687 for (pidx = 0; pidx < dd->num_pports; ++pidx) in handle_7322_errors()
1688 if (dd->pport[pidx].link_speed_supported) in handle_7322_errors()
1689 *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF; in handle_7322_errors()
1693 qib_dev_err(dd, "%s error\n", msg); in handle_7322_errors()
1703 qib_handle_urcv(dd, ~0U); in handle_7322_errors()
1716 struct qib_devdata *dd = from_tasklet(dd, t, error_tasklet); in qib_error_tasklet() local
1718 handle_7322_errors(dd); in qib_error_tasklet()
1719 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); in qib_error_tasklet()
1782 if (!ppd->dd->cspec->r1) in handle_serdes_issues()
1791 if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) && in handle_serdes_issues()
1803 if (!ppd->dd->cspec->r1 && in handle_serdes_issues()
1818 ppd->dd->cspec->r1 ? in handle_serdes_issues()
1823 ppd->dd->unit, ppd->port, ibclt); in handle_serdes_issues()
1839 struct qib_devdata *dd = ppd->dd; in handle_7322_p_errors() local
1842 fmask = qib_read_kreg64(dd, kr_act_fmask); in handle_7322_p_errors()
1848 qib_devinfo(dd->pcidev, in handle_7322_p_errors()
1865 qib_dev_porterr(dd, ppd->port, in handle_7322_p_errors()
1982 qib_dev_porterr(dd, ppd->port, "%s error\n", msg); in handle_7322_p_errors()
1991 static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable) in qib_7322_set_intr_state() argument
1994 if (dd->flags & QIB_BADINTR) in qib_7322_set_intr_state()
1996 qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask); in qib_7322_set_intr_state()
1998 qib_write_kreg(dd, kr_intclear, 0ULL); in qib_7322_set_intr_state()
1999 if (dd->cspec->num_msix_entries) { in qib_7322_set_intr_state()
2001 u64 val = qib_read_kreg64(dd, kr_intgranted); in qib_7322_set_intr_state()
2004 qib_write_kreg(dd, kr_intgranted, val); in qib_7322_set_intr_state()
2007 qib_write_kreg(dd, kr_intmask, 0ULL); in qib_7322_set_intr_state()
2025 static void qib_7322_clear_freeze(struct qib_devdata *dd) in qib_7322_clear_freeze() argument
2030 qib_write_kreg(dd, kr_errmask, 0ULL); in qib_7322_clear_freeze()
2032 for (pidx = 0; pidx < dd->num_pports; ++pidx) in qib_7322_clear_freeze()
2033 if (dd->pport[pidx].link_speed_supported) in qib_7322_clear_freeze()
2034 qib_write_kreg_port(dd->pport + pidx, krp_errmask, in qib_7322_clear_freeze()
2038 qib_7322_set_intr_state(dd, 0); in qib_7322_clear_freeze()
2041 qib_write_kreg(dd, kr_control, dd->control); in qib_7322_clear_freeze()
2042 qib_read_kreg32(dd, kr_scratch); in qib_7322_clear_freeze()
2050 qib_write_kreg(dd, kr_hwerrclear, 0ULL); in qib_7322_clear_freeze()
2051 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE); in qib_7322_clear_freeze()
2052 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); in qib_7322_clear_freeze()
2054 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_7322_clear_freeze()
2055 if (!dd->pport[pidx].link_speed_supported) in qib_7322_clear_freeze()
2057 qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull); in qib_7322_clear_freeze()
2058 qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull); in qib_7322_clear_freeze()
2060 qib_7322_set_intr_state(dd, 1); in qib_7322_clear_freeze()
2066 * @dd: the qlogic_ib device
2075 static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, in qib_7322_handle_hwerrors() argument
2082 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); in qib_7322_handle_hwerrors()
2086 qib_dev_err(dd, in qib_7322_handle_hwerrors()
2093 qib_write_kreg(dd, kr_hwerrclear, hwerrs & in qib_7322_handle_hwerrors()
2096 hwerrs &= dd->cspec->hwerrmask; in qib_7322_handle_hwerrors()
2101 qib_devinfo(dd->pcidev, in qib_7322_handle_hwerrors()
2105 ctrl = qib_read_kreg32(dd, kr_control); in qib_7322_handle_hwerrors()
2106 if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) { in qib_7322_handle_hwerrors()
2111 dd->cspec->stay_in_freeze) { in qib_7322_handle_hwerrors()
2119 if (dd->flags & QIB_INITTED) in qib_7322_handle_hwerrors()
2122 qib_7322_clear_freeze(dd); in qib_7322_handle_hwerrors()
2131 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); in qib_7322_handle_hwerrors()
2132 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); in qib_7322_handle_hwerrors()
2139 qib_dev_err(dd, "%s hardware error\n", msg); in qib_7322_handle_hwerrors()
2147 struct qib_pportdata *ppd = dd->pport; in qib_7322_handle_hwerrors()
2149 for (; pidx < dd->num_pports; ++pidx, ppd++) { in qib_7322_handle_hwerrors()
2165 if (isfatal && !dd->diag_client) { in qib_7322_handle_hwerrors()
2166 qib_dev_err(dd, in qib_7322_handle_hwerrors()
2168 dd->serial); in qib_7322_handle_hwerrors()
2173 if (dd->freezemsg) in qib_7322_handle_hwerrors()
2174 snprintf(dd->freezemsg, dd->freezelen, in qib_7322_handle_hwerrors()
2176 qib_disable_after_error(dd); in qib_7322_handle_hwerrors()
2183 * @dd: the qlogic_ib device
2191 static void qib_7322_init_hwerrors(struct qib_devdata *dd) in qib_7322_init_hwerrors() argument
2196 extsval = qib_read_kreg64(dd, kr_extstatus); in qib_7322_init_hwerrors()
2199 qib_dev_err(dd, "MemBIST did not complete!\n"); in qib_7322_init_hwerrors()
2202 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed)); in qib_7322_init_hwerrors()
2203 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); in qib_7322_init_hwerrors()
2206 qib_write_kreg(dd, kr_errclear, ~0ULL); in qib_7322_init_hwerrors()
2208 qib_write_kreg(dd, kr_errmask, ~0ULL); in qib_7322_init_hwerrors()
2209 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); in qib_7322_init_hwerrors()
2210 for (pidx = 0; pidx < dd->num_pports; ++pidx) in qib_7322_init_hwerrors()
2211 if (dd->pport[pidx].link_speed_supported) in qib_7322_init_hwerrors()
2212 qib_write_kreg_port(dd->pport + pidx, krp_errmask, in qib_7322_init_hwerrors()
2222 static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable) in qib_set_7322_armlaunch() argument
2225 qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH); in qib_set_7322_armlaunch()
2226 dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH; in qib_set_7322_armlaunch()
2228 dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH; in qib_set_7322_armlaunch()
2229 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); in qib_set_7322_armlaunch()
2241 struct qib_devdata *dd = ppd->dd; in qib_set_ib_7322_lstate() local
2279 qib_write_kreg(dd, kr_scratch, 0); in qib_set_ib_7322_lstate()
2292 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports)) argument
2297 struct qib_devdata *dd = ppd->dd; in set_vls() local
2309 totcred = NUM_RCV_BUF_UNITS(dd); in set_vls()
2325 qib_write_kreg(dd, kr_scratch, 0ULL); in set_vls()
2338 qib_write_kreg(dd, kr_scratch, 0ULL); in set_vls()
2353 struct qib_devdata *dd = ppd->dd; in qib_7322_bringup_serdes() local
2365 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_bringup_serdes()
2457 if (dd->base_guid) in qib_7322_bringup_serdes()
2458 guid = be64_to_cpu(dd->base_guid) + ppd->port - 1; in qib_7322_bringup_serdes()
2464 qib_write_kreg(dd, kr_scratch, 0); in qib_7322_bringup_serdes()
2474 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_bringup_serdes()
2479 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); in qib_7322_bringup_serdes()
2482 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); in qib_7322_bringup_serdes()
2510 if (ppd->dd->cspec->r1) in qib_7322_mini_quiet_serdes()
2533 struct qib_devdata *dd = ppd->dd; in qib_7322_mini_quiet_serdes() local
2537 diagc = qib_read_kreg64(dd, kr_hwdiagctrl); in qib_7322_mini_quiet_serdes()
2538 qib_write_kreg(dd, kr_hwdiagctrl, in qib_7322_mini_quiet_serdes()
2566 qib_write_kreg(dd, kr_hwdiagctrl, diagc); in qib_7322_mini_quiet_serdes()
2594 struct qib_devdata *dd = ppd->dd; in qib_setup_7322_setextled() local
2603 if (dd->diag_client) in qib_setup_7322_setextled()
2620 spin_lock_irqsave(&dd->cspec->gpio_lock, flags); in qib_setup_7322_setextled()
2621 extctl = dd->cspec->extctrl & (ppd->port == 1 ? in qib_setup_7322_setextled()
2635 dd->cspec->extctrl = extctl; in qib_setup_7322_setextled()
2636 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); in qib_setup_7322_setextled()
2637 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); in qib_setup_7322_setextled()
2645 static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event) in qib_7322_notify_dca() argument
2649 if (dd->flags & QIB_DCA_ENABLED) in qib_7322_notify_dca()
2651 if (!dca_add_requester(&dd->pcidev->dev)) { in qib_7322_notify_dca()
2652 qib_devinfo(dd->pcidev, "DCA enabled\n"); in qib_7322_notify_dca()
2653 dd->flags |= QIB_DCA_ENABLED; in qib_7322_notify_dca()
2654 qib_setup_dca(dd); in qib_7322_notify_dca()
2658 if (dd->flags & QIB_DCA_ENABLED) { in qib_7322_notify_dca()
2659 dca_remove_requester(&dd->pcidev->dev); in qib_7322_notify_dca()
2660 dd->flags &= ~QIB_DCA_ENABLED; in qib_7322_notify_dca()
2661 dd->cspec->dca_ctrl = 0; in qib_7322_notify_dca()
2662 qib_write_kreg(dd, KREG_IDX(DCACtrlA), in qib_7322_notify_dca()
2663 dd->cspec->dca_ctrl); in qib_7322_notify_dca()
2672 struct qib_devdata *dd = rcd->dd; in qib_update_rhdrq_dca() local
2673 struct qib_chip_specific *cspec = dd->cspec; in qib_update_rhdrq_dca()
2675 if (!(dd->flags & QIB_DCA_ENABLED)) in qib_update_rhdrq_dca()
2684 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb; in qib_update_rhdrq_dca()
2685 qib_devinfo(dd->pcidev, in qib_update_rhdrq_dca()
2688 qib_write_kreg(dd, rmp->regno, in qib_update_rhdrq_dca()
2691 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); in qib_update_rhdrq_dca()
2697 struct qib_devdata *dd = ppd->dd; in qib_update_sdma_dca() local
2698 struct qib_chip_specific *cspec = dd->cspec; in qib_update_sdma_dca()
2701 if (!(dd->flags & QIB_DCA_ENABLED)) in qib_update_sdma_dca()
2709 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << in qib_update_sdma_dca()
2713 qib_devinfo(dd->pcidev, in qib_update_sdma_dca()
2716 qib_write_kreg(dd, KREG_IDX(DCACtrlF), in qib_update_sdma_dca()
2721 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); in qib_update_sdma_dca()
2725 static void qib_setup_dca(struct qib_devdata *dd) in qib_setup_dca() argument
2727 struct qib_chip_specific *cspec = dd->cspec; in qib_setup_dca()
2758 qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i, in qib_setup_dca()
2761 setup_dca_notifier(dd, i); in qib_setup_dca()
2786 struct qib_devdata *dd; in qib_irq_notifier_release() local
2791 dd = rcd->dd; in qib_irq_notifier_release()
2795 dd = ppd->dd; in qib_irq_notifier_release()
2797 qib_devinfo(dd->pcidev, in qib_irq_notifier_release()
2803 static void qib_7322_free_irq(struct qib_devdata *dd) in qib_7322_free_irq() argument
2808 dd->cspec->main_int_mask = ~0ULL; in qib_7322_free_irq()
2810 for (i = 0; i < dd->cspec->num_msix_entries; i++) { in qib_7322_free_irq()
2812 if (dd->cspec->msix_entries[i].arg) { in qib_7322_free_irq()
2814 reset_dca_notifier(dd, i); in qib_7322_free_irq()
2816 irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i), in qib_7322_free_irq()
2818 free_cpumask_var(dd->cspec->msix_entries[i].mask); in qib_7322_free_irq()
2819 pci_free_irq(dd->pcidev, i, in qib_7322_free_irq()
2820 dd->cspec->msix_entries[i].arg); in qib_7322_free_irq()
2825 if (!dd->cspec->num_msix_entries) in qib_7322_free_irq()
2826 pci_free_irq(dd->pcidev, 0, dd); in qib_7322_free_irq()
2828 dd->cspec->num_msix_entries = 0; in qib_7322_free_irq()
2830 pci_free_irq_vectors(dd->pcidev); in qib_7322_free_irq()
2833 intgranted = qib_read_kreg64(dd, kr_intgranted); in qib_7322_free_irq()
2835 qib_write_kreg(dd, kr_intgranted, intgranted); in qib_7322_free_irq()
2838 static void qib_setup_7322_cleanup(struct qib_devdata *dd) in qib_setup_7322_cleanup() argument
2843 if (dd->flags & QIB_DCA_ENABLED) { in qib_setup_7322_cleanup()
2844 dca_remove_requester(&dd->pcidev->dev); in qib_setup_7322_cleanup()
2845 dd->flags &= ~QIB_DCA_ENABLED; in qib_setup_7322_cleanup()
2846 dd->cspec->dca_ctrl = 0; in qib_setup_7322_cleanup()
2847 qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl); in qib_setup_7322_cleanup()
2851 qib_7322_free_irq(dd); in qib_setup_7322_cleanup()
2852 kfree(dd->cspec->cntrs); in qib_setup_7322_cleanup()
2853 bitmap_free(dd->cspec->sendchkenable); in qib_setup_7322_cleanup()
2854 bitmap_free(dd->cspec->sendgrhchk); in qib_setup_7322_cleanup()
2855 bitmap_free(dd->cspec->sendibchk); in qib_setup_7322_cleanup()
2856 kfree(dd->cspec->msix_entries); in qib_setup_7322_cleanup()
2857 for (i = 0; i < dd->num_pports; i++) { in qib_setup_7322_cleanup()
2862 kfree(dd->pport[i].cpspec->portcntrs); in qib_setup_7322_cleanup()
2863 if (dd->flags & QIB_HAS_QSFP) { in qib_setup_7322_cleanup()
2864 spin_lock_irqsave(&dd->cspec->gpio_lock, flags); in qib_setup_7322_cleanup()
2865 dd->cspec->gpio_mask &= ~mask; in qib_setup_7322_cleanup()
2866 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); in qib_setup_7322_cleanup()
2867 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); in qib_setup_7322_cleanup()
2873 static void sdma_7322_intr(struct qib_devdata *dd, u64 istat) in sdma_7322_intr() argument
2875 struct qib_pportdata *ppd0 = &dd->pport[0]; in sdma_7322_intr()
2876 struct qib_pportdata *ppd1 = &dd->pport[1]; in sdma_7322_intr()
2896 static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint) in qib_wantpiobuf_7322_intr() argument
2900 spin_lock_irqsave(&dd->sendctrl_lock, flags); in qib_wantpiobuf_7322_intr()
2902 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail); in qib_wantpiobuf_7322_intr()
2904 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail); in qib_wantpiobuf_7322_intr()
2905 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); in qib_wantpiobuf_7322_intr()
2906 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_wantpiobuf_7322_intr()
2907 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in qib_wantpiobuf_7322_intr()
2915 static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat) in unknown_7322_ibits() argument
2921 qib_dev_err(dd, in unknown_7322_ibits()
2924 qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills)); in unknown_7322_ibits()
2928 static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) in unknown_7322_gpio_intr() argument
2941 gpiostatus = qib_read_kreg32(dd, kr_gpio_status); in unknown_7322_gpio_intr()
2949 qib_write_kreg(dd, kr_gpio_clear, gpiostatus); in unknown_7322_gpio_intr()
2954 for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP); in unknown_7322_gpio_intr()
2960 if (!dd->pport[pidx].link_speed_supported) in unknown_7322_gpio_intr()
2963 ppd = dd->pport + pidx; in unknown_7322_gpio_intr()
2965 if (gpiostatus & dd->cspec->gpio_mask & mask) { in unknown_7322_gpio_intr()
2970 pins = qib_read_kreg64(dd, kr_extstatus); in unknown_7322_gpio_intr()
2981 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask); in unknown_7322_gpio_intr()
2987 dd->cspec->gpio_mask &= ~gpio_irq; in unknown_7322_gpio_intr()
2988 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); in unknown_7322_gpio_intr()
2996 static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat) in unlikely_7322_intr() argument
2999 unknown_7322_ibits(dd, istat); in unlikely_7322_intr()
3001 unknown_7322_gpio_intr(dd); in unlikely_7322_intr()
3003 qib_write_kreg(dd, kr_errmask, 0ULL); in unlikely_7322_intr()
3004 tasklet_schedule(&dd->error_tasklet); in unlikely_7322_intr()
3006 if (istat & INT_MASK_P(Err, 0) && dd->rcd[0]) in unlikely_7322_intr()
3007 handle_7322_p_errors(dd->rcd[0]->ppd); in unlikely_7322_intr()
3008 if (istat & INT_MASK_P(Err, 1) && dd->rcd[1]) in unlikely_7322_intr()
3009 handle_7322_p_errors(dd->rcd[1]->ppd); in unlikely_7322_intr()
3018 struct qib_devdata *dd = rcd->dd; in adjust_rcv_timeout() local
3019 u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt]; in adjust_rcv_timeout()
3032 dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout; in adjust_rcv_timeout()
3033 qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout); in adjust_rcv_timeout()
3046 struct qib_devdata *dd = data; in qib_7322intr() local
3054 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { in qib_7322intr()
3065 istat = qib_read_kreg64(dd, kr_intstatus); in qib_7322intr()
3068 qib_bad_intrstatus(dd); in qib_7322intr()
3069 qib_dev_err(dd, "Interrupt status all f's, skipping\n"); in qib_7322intr()
3075 istat &= dd->cspec->main_int_mask; in qib_7322intr()
3082 this_cpu_inc(*dd->int_counter); in qib_7322intr()
3088 unlikely_7322_intr(dd, istat); in qib_7322intr()
3096 qib_write_kreg(dd, kr_intclear, istat); in qib_7322intr()
3107 for (i = 0; i < dd->first_user_ctxt; i++) { in qib_7322intr()
3110 if (dd->rcd[i]) in qib_7322intr()
3111 qib_kreceive(dd->rcd[i], NULL, &npkts); in qib_7322intr()
3118 qib_handle_urcv(dd, ctxtrbits); in qib_7322intr()
3123 sdma_7322_intr(dd, istat); in qib_7322intr()
3125 if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) in qib_7322intr()
3126 qib_ib_piobufavail(dd); in qib_7322intr()
3139 struct qib_devdata *dd = rcd->dd; in qib_7322pintr() local
3142 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in qib_7322pintr()
3151 this_cpu_inc(*dd->int_counter); in qib_7322pintr()
3154 qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | in qib_7322pintr()
3167 struct qib_devdata *dd = data; in qib_7322bufavail() local
3169 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in qib_7322bufavail()
3178 this_cpu_inc(*dd->int_counter); in qib_7322bufavail()
3181 qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL); in qib_7322bufavail()
3184 if (dd->flags & QIB_INITTED) in qib_7322bufavail()
3185 qib_ib_piobufavail(dd); in qib_7322bufavail()
3187 qib_wantpiobuf_7322_intr(dd, 0); in qib_7322bufavail()
3198 struct qib_devdata *dd = ppd->dd; in sdma_intr() local
3200 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in sdma_intr()
3209 this_cpu_inc(*dd->int_counter); in sdma_intr()
3212 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? in sdma_intr()
3225 struct qib_devdata *dd = ppd->dd; in sdma_idle_intr() local
3227 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in sdma_idle_intr()
3236 this_cpu_inc(*dd->int_counter); in sdma_idle_intr()
3239 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? in sdma_idle_intr()
3252 struct qib_devdata *dd = ppd->dd; in sdma_progress_intr() local
3254 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in sdma_progress_intr()
3263 this_cpu_inc(*dd->int_counter); in sdma_progress_intr()
3266 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? in sdma_progress_intr()
3280 struct qib_devdata *dd = ppd->dd; in sdma_cleanup_intr() local
3282 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) in sdma_cleanup_intr()
3291 this_cpu_inc(*dd->int_counter); in sdma_cleanup_intr()
3294 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? in sdma_cleanup_intr()
3304 static void reset_dca_notifier(struct qib_devdata *dd, int msixnum) in reset_dca_notifier() argument
3306 if (!dd->cspec->msix_entries[msixnum].dca) in reset_dca_notifier()
3309 qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n", in reset_dca_notifier()
3310 dd->unit, pci_irq_vector(dd->pcidev, msixnum)); in reset_dca_notifier()
3311 irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL); in reset_dca_notifier()
3312 dd->cspec->msix_entries[msixnum].notifier = NULL; in reset_dca_notifier()
3315 static void setup_dca_notifier(struct qib_devdata *dd, int msixnum) in setup_dca_notifier() argument
3317 struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum]; in setup_dca_notifier()
3327 n->notify.irq = pci_irq_vector(dd->pcidev, msixnum); in setup_dca_notifier()
3332 qib_devinfo(dd->pcidev, in setup_dca_notifier()
3355 static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend) in qib_setup_7322_interrupt() argument
3363 if (!dd->num_pports) in qib_setup_7322_interrupt()
3372 qib_7322_set_intr_state(dd, 0); in qib_setup_7322_interrupt()
3375 qib_7322_init_hwerrors(dd); in qib_setup_7322_interrupt()
3378 qib_write_kreg(dd, kr_intclear, ~0ULL); in qib_setup_7322_interrupt()
3381 qib_write_kreg(dd, kr_intgranted, ~0ULL); in qib_setup_7322_interrupt()
3382 qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL); in qib_setup_7322_interrupt()
3385 if (!dd->cspec->num_msix_entries) { in qib_setup_7322_interrupt()
3388 ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd, in qib_setup_7322_interrupt()
3392 dd, in qib_setup_7322_interrupt()
3394 pci_irq_vector(dd->pcidev, 0), ret); in qib_setup_7322_interrupt()
3397 dd->cspec->main_int_mask = ~0ULL; in qib_setup_7322_interrupt()
3405 local_mask = cpumask_of_pcibus(dd->pcidev->bus); in qib_setup_7322_interrupt()
3418 for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) { in qib_setup_7322_interrupt()
3428 if (irq_table[i].port > dd->num_pports) in qib_setup_7322_interrupt()
3430 arg = dd->pport + irq_table[i].port - 1; in qib_setup_7322_interrupt()
3432 arg = dd; in qib_setup_7322_interrupt()
3438 ret = pci_request_irq(dd->pcidev, msixnum, handler, in qib_setup_7322_interrupt()
3440 dd->unit, in qib_setup_7322_interrupt()
3447 arg = dd->rcd[ctxt]; in qib_setup_7322_interrupt()
3457 ret = pci_request_irq(dd->pcidev, msixnum, handler, in qib_setup_7322_interrupt()
3460 dd->unit); in qib_setup_7322_interrupt()
3468 qib_dev_err(dd, in qib_setup_7322_interrupt()
3471 pci_irq_vector(dd->pcidev, msixnum), in qib_setup_7322_interrupt()
3473 qib_7322_free_irq(dd); in qib_setup_7322_interrupt()
3474 pci_alloc_irq_vectors(dd->pcidev, 1, 1, in qib_setup_7322_interrupt()
3478 dd->cspec->msix_entries[msixnum].arg = arg; in qib_setup_7322_interrupt()
3480 dd->cspec->msix_entries[msixnum].dca = dca; in qib_setup_7322_interrupt()
3481 dd->cspec->msix_entries[msixnum].rcv = in qib_setup_7322_interrupt()
3491 qib_read_kreg64(dd, 2 * msixnum + 1 + in qib_setup_7322_interrupt()
3495 &dd->cspec->msix_entries[msixnum].mask, in qib_setup_7322_interrupt()
3499 dd->cspec->msix_entries[msixnum].mask); in qib_setup_7322_interrupt()
3506 dd->cspec->msix_entries[msixnum].mask); in qib_setup_7322_interrupt()
3509 pci_irq_vector(dd->pcidev, msixnum), in qib_setup_7322_interrupt()
3510 dd->cspec->msix_entries[msixnum].mask); in qib_setup_7322_interrupt()
3516 qib_write_kreg(dd, kr_intredirect + i, redirect[i]); in qib_setup_7322_interrupt()
3517 dd->cspec->main_int_mask = mask; in qib_setup_7322_interrupt()
3518 tasklet_setup(&dd->error_tasklet, qib_error_tasklet); in qib_setup_7322_interrupt()
3523 * @dd: the qlogic_ib device
3527 static unsigned qib_7322_boardname(struct qib_devdata *dd) in qib_7322_boardname() argument
3533 boardid = SYM_FIELD(dd->revision, Revision, BoardID); in qib_7322_boardname()
3537 dd->boardname = "InfiniPath_QLE7342_Emulation"; in qib_7322_boardname()
3540 dd->boardname = "InfiniPath_QLE7340"; in qib_7322_boardname()
3541 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3545 dd->boardname = "InfiniPath_QLE7342"; in qib_7322_boardname()
3546 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3549 dd->boardname = "InfiniPath_QMI7342"; in qib_7322_boardname()
3552 dd->boardname = "InfiniPath_Unsupported7342"; in qib_7322_boardname()
3553 qib_dev_err(dd, "Unsupported version of QMH7342\n"); in qib_7322_boardname()
3557 dd->boardname = "InfiniPath_QMH7342"; in qib_7322_boardname()
3561 dd->boardname = "InfiniPath_QME7342"; in qib_7322_boardname()
3564 dd->boardname = "InfiniPath_QME7362"; in qib_7322_boardname()
3565 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3568 dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr"; in qib_7322_boardname()
3569 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3572 dd->boardname = "InfiniPath_QLE7342_TEST"; in qib_7322_boardname()
3573 dd->flags |= QIB_HAS_QSFP; in qib_7322_boardname()
3576 dd->boardname = "InfiniPath_QLE73xy_UNKNOWN"; in qib_7322_boardname()
3577 qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid); in qib_7322_boardname()
3580 dd->board_atten = 1; /* index into txdds_Xdr */ in qib_7322_boardname()
3582 snprintf(dd->boardversion, sizeof(dd->boardversion), in qib_7322_boardname()
3584 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, in qib_7322_boardname()
3585 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch), in qib_7322_boardname()
3586 dd->majrev, dd->minrev, in qib_7322_boardname()
3587 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW)); in qib_7322_boardname()
3590 qib_devinfo(dd->pcidev, in qib_7322_boardname()
3592 dd->unit); in qib_7322_boardname()
3603 static int qib_do_7322_reset(struct qib_devdata *dd) in qib_do_7322_reset() argument
3613 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); in qib_do_7322_reset()
3615 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz); in qib_do_7322_reset()
3617 msix_entries = dd->cspec->num_msix_entries; in qib_do_7322_reset()
3620 qib_7322_set_intr_state(dd, 0); in qib_do_7322_reset()
3622 qib_7322_free_irq(dd); in qib_do_7322_reset()
3626 msix_vecsave = kmalloc_array(2 * dd->cspec->num_msix_entries, in qib_do_7322_reset()
3641 vecaddr = qib_read_kreg64(dd, 2 * i + in qib_do_7322_reset()
3643 vecdata = qib_read_kreg64(dd, 1 + 2 * i + in qib_do_7322_reset()
3652 dd->pport->cpspec->ibdeltainprog = 0; in qib_do_7322_reset()
3653 dd->pport->cpspec->ibsymdelta = 0; in qib_do_7322_reset()
3654 dd->pport->cpspec->iblnkerrdelta = 0; in qib_do_7322_reset()
3655 dd->pport->cpspec->ibmalfdelta = 0; in qib_do_7322_reset()
3657 dd->z_int_counter = qib_int_counter(dd); in qib_do_7322_reset()
3664 dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); in qib_do_7322_reset()
3665 dd->flags |= QIB_DOING_RESET; in qib_do_7322_reset()
3666 val = dd->control | QLOGIC_IB_C_RESET; in qib_do_7322_reset()
3667 writeq(val, &dd->kregbase[kr_control]); in qib_do_7322_reset()
3677 qib_pcie_reenable(dd, cmdval, int_line, clinesz); in qib_do_7322_reset()
3683 val = readq(&dd->kregbase[kr_revision]); in qib_do_7322_reset()
3684 if (val == dd->revision) in qib_do_7322_reset()
3687 qib_dev_err(dd, in qib_do_7322_reset()
3694 dd->flags |= QIB_PRESENT; /* it's back */ in qib_do_7322_reset()
3701 qib_write_kreg(dd, 2 * i + in qib_do_7322_reset()
3704 qib_write_kreg(dd, 1 + 2 * i + in qib_do_7322_reset()
3711 for (i = 0; i < dd->num_pports; ++i) in qib_do_7322_reset()
3712 write_7322_init_portregs(&dd->pport[i]); in qib_do_7322_reset()
3713 write_7322_initregs(dd); in qib_do_7322_reset()
3715 if (qib_pcie_params(dd, dd->lbus_width, &msix_entries)) in qib_do_7322_reset()
3716 qib_dev_err(dd, in qib_do_7322_reset()
3719 dd->cspec->num_msix_entries = msix_entries; in qib_do_7322_reset()
3720 qib_setup_7322_interrupt(dd, 1); in qib_do_7322_reset()
3722 for (i = 0; i < dd->num_pports; ++i) { in qib_do_7322_reset()
3723 struct qib_pportdata *ppd = &dd->pport[i]; in qib_do_7322_reset()
3732 dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */ in qib_do_7322_reset()
3739 * @dd: the qlogic_ib device
3744 static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, in qib_7322_put_tid() argument
3747 if (!(dd->flags & QIB_PRESENT)) in qib_7322_put_tid()
3749 if (pa != dd->tidinvalid) { in qib_7322_put_tid()
3754 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", in qib_7322_put_tid()
3759 qib_dev_err(dd, in qib_7322_put_tid()
3766 chippa |= dd->tidtemplate; in qib_7322_put_tid()
3776 * @dd: the qlogic_ib device
3782 static void qib_7322_clear_tids(struct qib_devdata *dd, in qib_7322_clear_tids() argument
3790 if (!dd->kregbase || !rcd) in qib_7322_clear_tids()
3795 tidinv = dd->tidinvalid; in qib_7322_clear_tids()
3797 ((char __iomem *) dd->kregbase + in qib_7322_clear_tids()
3798 dd->rcvtidbase + in qib_7322_clear_tids()
3799 ctxt * dd->rcvtidcnt * sizeof(*tidbase)); in qib_7322_clear_tids()
3801 for (i = 0; i < dd->rcvtidcnt; i++) in qib_7322_clear_tids()
3802 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, in qib_7322_clear_tids()
3806 ((char __iomem *) dd->kregbase + in qib_7322_clear_tids()
3807 dd->rcvegrbase + in qib_7322_clear_tids()
3811 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, in qib_7322_clear_tids()
3817 * @dd: the qlogic_ib device
3821 static void qib_7322_tidtemplate(struct qib_devdata *dd) in qib_7322_tidtemplate() argument
3832 if (dd->rcvegrbufsize == 2048) in qib_7322_tidtemplate()
3833 dd->tidtemplate = IBA7322_TID_SZ_2K; in qib_7322_tidtemplate()
3834 else if (dd->rcvegrbufsize == 4096) in qib_7322_tidtemplate()
3835 dd->tidtemplate = IBA7322_TID_SZ_4K; in qib_7322_tidtemplate()
3836 dd->tidinvalid = 0; in qib_7322_tidtemplate()
3854 if (rcd->dd->cspec->r1) in qib_7322_get_base_info()
3856 if (rcd->dd->flags & QIB_USE_SPCL_TRIG) in qib_7322_get_base_info()
3863 qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr) in qib_7322_get_msgheader() argument
3868 (rhf_addr - dd->rhf_offset + offset); in qib_7322_get_msgheader()
3874 static void qib_7322_config_ctxts(struct qib_devdata *dd) in qib_7322_config_ctxts() argument
3879 nchipctxts = qib_read_kreg32(dd, kr_contextcnt); in qib_7322_config_ctxts()
3880 dd->cspec->numctxts = nchipctxts; in qib_7322_config_ctxts()
3881 if (qib_n_krcv_queues > 1 && dd->num_pports) { in qib_7322_config_ctxts()
3882 dd->first_user_ctxt = NUM_IB_PORTS + in qib_7322_config_ctxts()
3883 (qib_n_krcv_queues - 1) * dd->num_pports; in qib_7322_config_ctxts()
3884 if (dd->first_user_ctxt > nchipctxts) in qib_7322_config_ctxts()
3885 dd->first_user_ctxt = nchipctxts; in qib_7322_config_ctxts()
3886 dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports; in qib_7322_config_ctxts()
3888 dd->first_user_ctxt = NUM_IB_PORTS; in qib_7322_config_ctxts()
3889 dd->n_krcv_queues = 1; in qib_7322_config_ctxts()
3893 int nctxts = dd->first_user_ctxt + num_online_cpus(); in qib_7322_config_ctxts()
3896 dd->ctxtcnt = 6; in qib_7322_config_ctxts()
3898 dd->ctxtcnt = 10; in qib_7322_config_ctxts()
3900 dd->ctxtcnt = nchipctxts; in qib_7322_config_ctxts()
3901 } else if (qib_cfgctxts < dd->num_pports) in qib_7322_config_ctxts()
3902 dd->ctxtcnt = dd->num_pports; in qib_7322_config_ctxts()
3904 dd->ctxtcnt = qib_cfgctxts; in qib_7322_config_ctxts()
3905 if (!dd->ctxtcnt) /* none of the above, set to max */ in qib_7322_config_ctxts()
3906 dd->ctxtcnt = nchipctxts; in qib_7322_config_ctxts()
3913 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); in qib_7322_config_ctxts()
3914 if (dd->ctxtcnt > 10) in qib_7322_config_ctxts()
3915 dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg); in qib_7322_config_ctxts()
3916 else if (dd->ctxtcnt > 6) in qib_7322_config_ctxts()
3917 dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg); in qib_7322_config_ctxts()
3921 dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode); in qib_7322_config_ctxts()
3927 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); in qib_7322_config_ctxts()
3928 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); in qib_7322_config_ctxts()
3931 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); in qib_7322_config_ctxts()
3933 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt); in qib_7322_config_ctxts()
3935 dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt, in qib_7322_config_ctxts()
3936 dd->num_pports > 1 ? 1024U : 2048U); in qib_7322_config_ctxts()
4044 struct qib_devdata *dd = ppd->dd; in qib_7322_set_ib_cfg() local
4129 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_set_ib_cfg()
4143 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_set_ib_cfg()
4163 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_set_ib_cfg()
4180 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_set_ib_cfg()
4219 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16); in qib_7322_set_ib_cfg()
4250 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n", in qib_7322_set_ib_cfg()
4279 if (ppd->dd->cspec->r1) { in qib_7322_set_ib_cfg()
4292 qib_write_kreg(dd, kr_scratch, 0); in qib_7322_set_ib_cfg()
4307 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", in qib_7322_set_loopback()
4308 ppd->dd->unit, ppd->port); in qib_7322_set_loopback()
4314 qib_devinfo(ppd->dd->pcidev, in qib_7322_set_loopback()
4316 ppd->dd->unit, ppd->port); in qib_7322_set_loopback()
4327 qib_write_kreg(ppd->dd, kr_scratch, 0); in qib_7322_set_loopback()
4362 struct qib_devdata *dd = ppd->dd; in set_vl_weights() local
4365 spin_lock_irqsave(&dd->sendctrl_lock, flags); in set_vl_weights()
4368 qib_write_kreg(dd, kr_scratch, 0); in set_vl_weights()
4369 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in set_vl_weights()
4417 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); in qib_update_7322_usrhead()
4418 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); in qib_update_7322_usrhead()
4419 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); in qib_update_7322_usrhead()
4426 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); in qib_7322_hdrqempty()
4430 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); in qib_7322_hdrqempty()
4460 struct qib_devdata *dd = ppd->dd; in rcvctrl_7322_mod() local
4465 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); in rcvctrl_7322_mod()
4468 dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable); in rcvctrl_7322_mod()
4470 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable); in rcvctrl_7322_mod()
4472 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); in rcvctrl_7322_mod()
4474 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd); in rcvctrl_7322_mod()
4480 mask = (1ULL << dd->ctxtcnt) - 1; in rcvctrl_7322_mod()
4484 rcd = dd->rcd[ctxt]; in rcvctrl_7322_mod()
4489 if (!(dd->flags & QIB_NODMA_RTAIL)) { in rcvctrl_7322_mod()
4491 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); in rcvctrl_7322_mod()
4494 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, in rcvctrl_7322_mod()
4496 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, in rcvctrl_7322_mod()
4504 dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull); in rcvctrl_7322_mod()
4506 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull)); in rcvctrl_7322_mod()
4508 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail)); in rcvctrl_7322_mod()
4510 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail)); in rcvctrl_7322_mod()
4517 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); in rcvctrl_7322_mod()
4520 if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) { in rcvctrl_7322_mod()
4527 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); in rcvctrl_7322_mod()
4528 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); in rcvctrl_7322_mod()
4531 (void) qib_read_kreg32(dd, kr_scratch); in rcvctrl_7322_mod()
4532 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); in rcvctrl_7322_mod()
4533 dd->rcd[ctxt]->head = val; in rcvctrl_7322_mod()
4535 if (ctxt < dd->first_user_ctxt) in rcvctrl_7322_mod()
4536 val |= dd->rhdrhead_intr_off; in rcvctrl_7322_mod()
4537 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); in rcvctrl_7322_mod()
4539 dd->rcd[ctxt] && dd->rhdrhead_intr_off) { in rcvctrl_7322_mod()
4541 val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off; in rcvctrl_7322_mod()
4542 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); in rcvctrl_7322_mod()
4549 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0); in rcvctrl_7322_mod()
4550 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0); in rcvctrl_7322_mod()
4552 qib_write_ureg(dd, ur_rcvflowtable + f, in rcvctrl_7322_mod()
4557 for (i = 0; i < dd->cfgctxts; i++) { in rcvctrl_7322_mod()
4558 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, in rcvctrl_7322_mod()
4560 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0); in rcvctrl_7322_mod()
4562 qib_write_ureg(dd, ur_rcvflowtable + f, in rcvctrl_7322_mod()
4567 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); in rcvctrl_7322_mod()
4599 struct qib_devdata *dd = ppd->dd; in sendctrl_7322_mod() local
4603 spin_lock_irqsave(&dd->sendctrl_lock, flags); in sendctrl_7322_mod()
4605 /* First the dd ones that are "sticky", saved in shadow */ in sendctrl_7322_mod()
4607 dd->sendctrl = 0; in sendctrl_7322_mod()
4609 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); in sendctrl_7322_mod()
4611 dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd); in sendctrl_7322_mod()
4612 if (dd->flags & QIB_USE_SPCL_TRIG) in sendctrl_7322_mod()
4613 dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn); in sendctrl_7322_mod()
4625 tmp_dd_sendctrl = dd->sendctrl; in sendctrl_7322_mod()
4626 last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; in sendctrl_7322_mod()
4633 qib_write_kreg(dd, kr_sendctrl, in sendctrl_7322_mod()
4636 qib_write_kreg(dd, kr_scratch, 0); in sendctrl_7322_mod()
4652 qib_write_kreg(dd, kr_scratch, 0); in sendctrl_7322_mod()
4655 tmp_dd_sendctrl = dd->sendctrl; in sendctrl_7322_mod()
4662 (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) in sendctrl_7322_mod()
4666 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl); in sendctrl_7322_mod()
4667 qib_write_kreg(dd, kr_scratch, 0); in sendctrl_7322_mod()
4672 qib_write_kreg(dd, kr_scratch, 0); in sendctrl_7322_mod()
4676 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); in sendctrl_7322_mod()
4677 qib_write_kreg(dd, kr_scratch, 0); in sendctrl_7322_mod()
4680 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in sendctrl_7322_mod()
4690 v = qib_read_kreg32(dd, kr_scratch); in sendctrl_7322_mod()
4691 qib_write_kreg(dd, kr_scratch, v); in sendctrl_7322_mod()
4692 v = qib_read_kreg32(dd, kr_scratch); in sendctrl_7322_mod()
4693 qib_write_kreg(dd, kr_scratch, v); in sendctrl_7322_mod()
4694 qib_read_kreg32(dd, kr_scratch); in sendctrl_7322_mod()
4709 struct qib_devdata *dd = ppd->dd; in qib_portcntr_7322() local
4757 qib_devinfo(ppd->dd->pcidev, in qib_portcntr_7322()
4768 for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) { in qib_portcntr_7322()
4769 struct qib_ctxtdata *rcd = dd->rcd[i]; in qib_portcntr_7322()
4773 ret += read_7322_creg32(dd, cr_base_egrovfl + i); in qib_portcntr_7322()
4965 static void init_7322_cntrnames(struct qib_devdata *dd) in init_7322_cntrnames() argument
4970 for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts; in init_7322_cntrnames()
4979 dd->cspec->ncntrs = i; in init_7322_cntrnames()
4982 dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1; in init_7322_cntrnames()
4984 dd->cspec->cntrnamelen = 1 + s - cntr7322names; in init_7322_cntrnames()
4985 dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64), in init_7322_cntrnames()
4990 dd->cspec->nportcntrs = i - 1; in init_7322_cntrnames()
4991 dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1; in init_7322_cntrnames()
4992 for (i = 0; i < dd->num_pports; ++i) { in init_7322_cntrnames()
4993 dd->pport[i].cpspec->portcntrs = in init_7322_cntrnames()
4994 kmalloc_array(dd->cspec->nportcntrs, sizeof(u64), in init_7322_cntrnames()
4999 static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep, in qib_read_7322cntrs() argument
5005 ret = dd->cspec->cntrnamelen; in qib_read_7322cntrs()
5011 u64 *cntr = dd->cspec->cntrs; in qib_read_7322cntrs()
5014 ret = dd->cspec->ncntrs * sizeof(u64); in qib_read_7322cntrs()
5021 for (i = 0; i < dd->cspec->ncntrs; i++) in qib_read_7322cntrs()
5023 *cntr++ = read_7322_creg(dd, in qib_read_7322cntrs()
5027 *cntr++ = read_7322_creg32(dd, in qib_read_7322cntrs()
5034 static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port, in qib_read_7322portcntrs() argument
5040 ret = dd->cspec->portcntrnamelen; in qib_read_7322portcntrs()
5046 struct qib_pportdata *ppd = &dd->pport[port]; in qib_read_7322portcntrs()
5050 ret = dd->cspec->nportcntrs * sizeof(u64); in qib_read_7322portcntrs()
5057 for (i = 0; i < dd->cspec->nportcntrs; i++) { in qib_read_7322portcntrs()
5088 struct qib_devdata *dd = from_timer(dd, t, stats_timer); in qib_get_7322_faststats() local
5094 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_get_7322_faststats()
5095 ppd = dd->pport + pidx; in qib_get_7322_faststats()
5102 if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED) in qib_get_7322_faststats()
5103 || dd->diag_client) in qib_get_7322_faststats()
5113 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); in qib_get_7322_faststats()
5114 traffic_wds -= ppd->dd->traffic_wds; in qib_get_7322_faststats()
5115 ppd->dd->traffic_wds += traffic_wds; in qib_get_7322_faststats()
5116 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); in qib_get_7322_faststats()
5126 ppd->dd->cspec->r1 ? in qib_get_7322_faststats()
5132 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); in qib_get_7322_faststats()
5138 static int qib_7322_intr_fallback(struct qib_devdata *dd) in qib_7322_intr_fallback() argument
5140 if (!dd->cspec->num_msix_entries) in qib_7322_intr_fallback()
5143 qib_devinfo(dd->pcidev, in qib_7322_intr_fallback()
5145 qib_7322_free_irq(dd); in qib_7322_intr_fallback()
5146 if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0) in qib_7322_intr_fallback()
5147 qib_dev_err(dd, "Failed to enable INTx\n"); in qib_7322_intr_fallback()
5148 qib_setup_7322_interrupt(dd, 0); in qib_7322_intr_fallback()
5164 struct qib_devdata *dd = ppd->dd; in qib_7322_mini_pcs_reset() local
5170 qib_write_kreg(dd, kr_hwerrmask, in qib_7322_mini_pcs_reset()
5171 dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop)); in qib_7322_mini_pcs_reset()
5177 qib_read_kreg32(dd, kr_scratch); in qib_7322_mini_pcs_reset()
5180 qib_write_kreg(dd, kr_scratch, 0ULL); in qib_7322_mini_pcs_reset()
5181 qib_write_kreg(dd, kr_hwerrclear, in qib_7322_mini_pcs_reset()
5183 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); in qib_7322_mini_pcs_reset()
5201 struct qib_devdata *dd = ppd->dd; in autoneg_7322_sendpkt() local
5213 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL); in autoneg_7322_sendpkt()
5218 if (dd->flags & QIB_USE_SPCL_TRIG) { in autoneg_7322_sendpkt()
5219 u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023; in autoneg_7322_sendpkt()
5225 qib_sendbuf_done(dd, pnum); in autoneg_7322_sendpkt()
5227 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL); in autoneg_7322_sendpkt()
5235 struct qib_devdata *dd = ppd->dd; in qib_autoneg_7322_send() local
5270 qib_read_kreg64(dd, kr_scratch); in qib_autoneg_7322_send()
5273 qib_read_kreg64(dd, kr_scratch); in qib_autoneg_7322_send()
5314 qib_write_kreg(ppd->dd, kr_scratch, 0); in set_7322_ibspeed_fast()
5561 if (ppd->dd->flags & QIB_HAS_QSFP) { in qib_7322_ib_updown()
5629 if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10) in qib_7322_ib_updown()
5672 static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask) in gpio_7322_mod() argument
5681 spin_lock_irqsave(&dd->cspec->gpio_lock, flags); in gpio_7322_mod()
5682 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); in gpio_7322_mod()
5683 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); in gpio_7322_mod()
5684 new_out = (dd->cspec->gpio_out & ~mask) | out; in gpio_7322_mod()
5686 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); in gpio_7322_mod()
5687 qib_write_kreg(dd, kr_gpio_out, new_out); in gpio_7322_mod()
5688 dd->cspec->gpio_out = new_out; in gpio_7322_mod()
5689 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); in gpio_7322_mod()
5699 read_val = qib_read_kreg64(dd, kr_extstatus); in gpio_7322_mod()
5704 static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen) in qib_7322_eeprom_wen() argument
5710 prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM; in qib_7322_eeprom_wen()
5711 gpio_7322_mod(dd, wen ? 0 : mask, mask, mask); in qib_7322_eeprom_wen()
5721 static void get_7322_chip_params(struct qib_devdata *dd) in get_7322_chip_params() argument
5727 dd->palign = qib_read_kreg32(dd, kr_pagealign); in get_7322_chip_params()
5729 dd->uregbase = qib_read_kreg32(dd, kr_userregbase); in get_7322_chip_params()
5731 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); in get_7322_chip_params()
5732 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); in get_7322_chip_params()
5733 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); in get_7322_chip_params()
5734 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); in get_7322_chip_params()
5735 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; in get_7322_chip_params()
5737 val = qib_read_kreg64(dd, kr_sendpiobufcnt); in get_7322_chip_params()
5738 dd->piobcnt2k = val & ~0U; in get_7322_chip_params()
5739 dd->piobcnt4k = val >> 32; in get_7322_chip_params()
5740 val = qib_read_kreg64(dd, kr_sendpiosize); in get_7322_chip_params()
5741 dd->piosize2k = val & ~0U; in get_7322_chip_params()
5742 dd->piosize4k = val >> 32; in get_7322_chip_params()
5747 dd->pport[0].ibmtu = (u32)mtu; in get_7322_chip_params()
5748 dd->pport[1].ibmtu = (u32)mtu; in get_7322_chip_params()
5751 dd->pio2kbase = (u32 __iomem *) in get_7322_chip_params()
5752 ((char __iomem *) dd->kregbase + dd->pio2k_bufbase); in get_7322_chip_params()
5753 dd->pio4kbase = (u32 __iomem *) in get_7322_chip_params()
5754 ((char __iomem *) dd->kregbase + in get_7322_chip_params()
5755 (dd->piobufbase >> 32)); in get_7322_chip_params()
5761 dd->align4k = ALIGN(dd->piosize4k, dd->palign); in get_7322_chip_params()
5763 piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS; in get_7322_chip_params()
5765 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / in get_7322_chip_params()
5774 static void qib_7322_set_baseaddrs(struct qib_devdata *dd) in qib_7322_set_baseaddrs() argument
5778 cregbase = qib_read_kreg32(dd, kr_counterregbase); in qib_7322_set_baseaddrs()
5780 dd->cspec->cregbase = (u64 __iomem *)(cregbase + in qib_7322_set_baseaddrs()
5781 (char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5783 dd->egrtidbase = (u64 __iomem *) in qib_7322_set_baseaddrs()
5784 ((char __iomem *) dd->kregbase + dd->rcvegrbase); in qib_7322_set_baseaddrs()
5787 dd->pport[0].cpspec->kpregbase = in qib_7322_set_baseaddrs()
5788 (u64 __iomem *)((char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5789 dd->pport[1].cpspec->kpregbase = in qib_7322_set_baseaddrs()
5790 (u64 __iomem *)(dd->palign + in qib_7322_set_baseaddrs()
5791 (char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5792 dd->pport[0].cpspec->cpregbase = in qib_7322_set_baseaddrs()
5793 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0], in qib_7322_set_baseaddrs()
5794 kr_counterregbase) + (char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5795 dd->pport[1].cpspec->cpregbase = in qib_7322_set_baseaddrs()
5796 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1], in qib_7322_set_baseaddrs()
5797 kr_counterregbase) + (char __iomem *)dd->kregbase); in qib_7322_set_baseaddrs()
5813 static int sendctrl_hook(struct qib_devdata *dd, in sendctrl_hook() argument
5829 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in sendctrl_hook()
5833 ppd = dd->pport + pidx; in sendctrl_hook()
5838 psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr); in sendctrl_hook()
5844 if (pidx >= dd->num_pports) in sendctrl_hook()
5854 spin_lock_irqsave(&dd->sendctrl_lock, flags); in sendctrl_hook()
5864 local_data = (u64)qib_read_kreg32(dd, idx); in sendctrl_hook()
5866 local_data = qib_read_kreg64(dd, idx); in sendctrl_hook()
5887 qib_write_kreg(dd, idx, tval); in sendctrl_hook()
5888 qib_write_kreg(dd, kr_scratch, 0Ull); in sendctrl_hook()
5890 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in sendctrl_hook()
5956 if (!ret && !ppd->dd->cspec->r1) { in qsfp_7322_event()
5998 struct qib_devdata *dd = ppd->dd; in qib_init_7322_qsfp() local
6004 spin_lock_irqsave(&dd->cspec->gpio_lock, flags); in qib_init_7322_qsfp()
6005 dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert)); in qib_init_7322_qsfp()
6006 dd->cspec->gpio_mask |= mod_prs_bit; in qib_init_7322_qsfp()
6007 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); in qib_init_7322_qsfp()
6008 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); in qib_init_7322_qsfp()
6009 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); in qib_init_7322_qsfp()
6025 static void set_no_qsfp_atten(struct qib_devdata *dd, int change) in set_no_qsfp_atten() argument
6037 for (pidx = 0; pidx < dd->num_pports; ++pidx) in set_no_qsfp_atten()
6038 dd->pport[pidx].cpspec->no_eep = deflt; in set_no_qsfp_atten()
6041 if (IS_QME(dd) || IS_QMH(dd)) in set_no_qsfp_atten()
6079 for (pidx = 0; dd->unit == unit && pidx < dd->num_pports; in set_no_qsfp_atten()
6081 struct qib_pportdata *ppd = &dd->pport[pidx]; in set_no_qsfp_atten()
6093 if (IS_QMH(dd) || IS_QME(dd)) in set_no_qsfp_atten()
6106 for (pidx = 0; pidx < dd->num_pports; ++pidx) in set_no_qsfp_atten()
6107 if (dd->pport[pidx].link_speed_supported) in set_no_qsfp_atten()
6108 init_txdds_table(&dd->pport[pidx], 0); in set_no_qsfp_atten()
6115 struct qib_devdata *dd; in setup_txselect() local
6132 xa_for_each(&qib_dev_table, index, dd) in setup_txselect()
6133 if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) in setup_txselect()
6134 set_no_qsfp_atten(dd, 1); in setup_txselect()
6143 static int qib_late_7322_initreg(struct qib_devdata *dd) in qib_late_7322_initreg() argument
6148 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); in qib_late_7322_initreg()
6149 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); in qib_late_7322_initreg()
6150 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); in qib_late_7322_initreg()
6151 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); in qib_late_7322_initreg()
6152 val = qib_read_kreg64(dd, kr_sendpioavailaddr); in qib_late_7322_initreg()
6153 if (val != dd->pioavailregs_phys) { in qib_late_7322_initreg()
6154 qib_dev_err(dd, in qib_late_7322_initreg()
6156 (unsigned long) dd->pioavailregs_phys, in qib_late_7322_initreg()
6161 n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; in qib_late_7322_initreg()
6162 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL); in qib_late_7322_initreg()
6164 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL); in qib_late_7322_initreg()
6166 qib_register_observer(dd, &sendctrl_0_observer); in qib_late_7322_initreg()
6167 qib_register_observer(dd, &sendctrl_1_observer); in qib_late_7322_initreg()
6169 dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN; in qib_late_7322_initreg()
6170 qib_write_kreg(dd, kr_control, dd->control); in qib_late_7322_initreg()
6177 set_no_qsfp_atten(dd, 0); in qib_late_7322_initreg()
6178 for (n = 0; n < dd->num_pports; ++n) { in qib_late_7322_initreg()
6179 struct qib_pportdata *ppd = dd->pport + n; in qib_late_7322_initreg()
6184 if (dd->flags & QIB_HAS_QSFP) in qib_late_7322_initreg()
6187 dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN; in qib_late_7322_initreg()
6188 qib_write_kreg(dd, kr_control, dd->control); in qib_late_7322_initreg()
6217 qib_write_kreg(ppd->dd, kr_scratch, 0); in write_7322_init_portregs()
6248 if (ppd->dd->cspec->r1) in write_7322_init_portregs()
6259 static void write_7322_initregs(struct qib_devdata *dd) in write_7322_initregs() argument
6266 qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1); in write_7322_initregs()
6268 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in write_7322_initregs()
6272 if (dd->n_krcv_queues < 2 || in write_7322_initregs()
6273 !dd->pport[pidx].link_speed_supported) in write_7322_initregs()
6276 ppd = &dd->pport[pidx]; in write_7322_initregs()
6279 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); in write_7322_initregs()
6281 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); in write_7322_initregs()
6286 if (dd->num_pports > 1) in write_7322_initregs()
6287 n = dd->first_user_ctxt / dd->num_pports; in write_7322_initregs()
6289 n = dd->first_user_ctxt - 1; in write_7322_initregs()
6293 if (dd->num_pports > 1) in write_7322_initregs()
6294 ctxt = (i % n) * dd->num_pports + pidx; in write_7322_initregs()
6316 for (i = 0; i < dd->first_user_ctxt; i++) { in write_7322_initregs()
6317 dd->cspec->rcvavail_timeout[i] = rcv_int_timeout; in write_7322_initregs()
6318 qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout); in write_7322_initregs()
6327 for (i = 0; i < dd->cfgctxts; i++) { in write_7322_initregs()
6331 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); in write_7322_initregs()
6339 if (dd->num_pports) in write_7322_initregs()
6340 setup_7322_link_recovery(dd->pport, dd->num_pports > 1); in write_7322_initregs()
6343 static int qib_init_7322_variables(struct qib_devdata *dd) in qib_init_7322_variables() argument
6352 ppd = (struct qib_pportdata *)(dd + 1); in qib_init_7322_variables()
6353 dd->pport = ppd; in qib_init_7322_variables()
6354 ppd[0].dd = dd; in qib_init_7322_variables()
6355 ppd[1].dd = dd; in qib_init_7322_variables()
6357 dd->cspec = (struct qib_chip_specific *)(ppd + 2); in qib_init_7322_variables()
6359 ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1); in qib_init_7322_variables()
6364 spin_lock_init(&dd->cspec->rcvmod_lock); in qib_init_7322_variables()
6365 spin_lock_init(&dd->cspec->gpio_lock); in qib_init_7322_variables()
6368 dd->revision = readq(&dd->kregbase[kr_revision]); in qib_init_7322_variables()
6370 if ((dd->revision & 0xffffffffU) == 0xffffffffU) { in qib_init_7322_variables()
6371 qib_dev_err(dd, in qib_init_7322_variables()
6376 dd->flags |= QIB_PRESENT; /* now register routines work */ in qib_init_7322_variables()
6378 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor); in qib_init_7322_variables()
6379 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor); in qib_init_7322_variables()
6380 dd->cspec->r1 = dd->minrev == 1; in qib_init_7322_variables()
6382 get_7322_chip_params(dd); in qib_init_7322_variables()
6383 features = qib_7322_boardname(dd); in qib_init_7322_variables()
6386 sbufcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; in qib_init_7322_variables()
6388 dd->cspec->sendchkenable = bitmap_zalloc(sbufcnt, GFP_KERNEL); in qib_init_7322_variables()
6389 dd->cspec->sendgrhchk = bitmap_zalloc(sbufcnt, GFP_KERNEL); in qib_init_7322_variables()
6390 dd->cspec->sendibchk = bitmap_zalloc(sbufcnt, GFP_KERNEL); in qib_init_7322_variables()
6391 if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk || in qib_init_7322_variables()
6392 !dd->cspec->sendibchk) { in qib_init_7322_variables()
6397 ppd = dd->pport; in qib_init_7322_variables()
6403 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; in qib_init_7322_variables()
6404 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; in qib_init_7322_variables()
6405 dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV; in qib_init_7322_variables()
6407 dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY | in qib_init_7322_variables()
6411 dd->flags |= qib_special_trigger ? in qib_init_7322_variables()
6418 qib_7322_set_baseaddrs(dd); in qib_init_7322_variables()
6424 dd->cspec->int_enable_mask = QIB_I_BITSEXTANT; in qib_init_7322_variables()
6426 dd->cspec->hwerrmask = ~0ULL; in qib_init_7322_variables()
6429 dd->cspec->hwerrmask &= in qib_init_7322_variables()
6441 dd->skip_kctxt_mask |= 1 << pidx; in qib_init_7322_variables()
6447 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, in qib_init_7322_variables()
6451 dd->cspec->int_enable_mask &= ~( in qib_init_7322_variables()
6462 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, in qib_init_7322_variables()
6466 dd->cspec->int_enable_mask &= ~( in qib_init_7322_variables()
6477 dd->num_pports++; in qib_init_7322_variables()
6478 ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports); in qib_init_7322_variables()
6480 dd->num_pports--; in qib_init_7322_variables()
6502 qib_devinfo(dd->pcidev, in qib_init_7322_variables()
6514 qib_devinfo(dd->pcidev, in qib_init_7322_variables()
6527 if (ppd->dd->cspec->r1) in qib_init_7322_variables()
6535 if (!(dd->flags & QIB_HAS_QSFP)) { in qib_init_7322_variables()
6536 if (!IS_QMH(dd) && !IS_QME(dd)) in qib_init_7322_variables()
6537 qib_devinfo(dd->pcidev, in qib_init_7322_variables()
6539 dd->unit, ppd->port); in qib_init_7322_variables()
6540 cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; in qib_init_7322_variables()
6545 ppd->cpspec->no_eep = IS_QMH(dd) ? in qib_init_7322_variables()
6559 dd->rcvhdrentsize = qib_rcvhdrentsize ? in qib_init_7322_variables()
6561 dd->rcvhdrsize = qib_rcvhdrsize ? in qib_init_7322_variables()
6563 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); in qib_init_7322_variables()
6566 dd->rcvegrbufsize = max(mtu, 2048); in qib_init_7322_variables()
6567 dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); in qib_init_7322_variables()
6569 qib_7322_tidtemplate(dd); in qib_init_7322_variables()
6575 dd->rhdrhead_intr_off = in qib_init_7322_variables()
6579 timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0); in qib_init_7322_variables()
6581 dd->ureg_align = 0x10000; /* 64KB alignment */ in qib_init_7322_variables()
6583 dd->piosize2kmax_dwords = dd->piosize2k >> 2; in qib_init_7322_variables()
6585 qib_7322_config_ctxts(dd); in qib_init_7322_variables()
6586 qib_set_ctxtcnt(dd); in qib_init_7322_variables()
6595 ret = init_chip_wc_pat(dd, 0); in qib_init_7322_variables()
6600 vl15off = dd->physaddr + (dd->piobufbase >> 32) + in qib_init_7322_variables()
6601 dd->piobcnt4k * dd->align4k; in qib_init_7322_variables()
6602 dd->piovl15base = ioremap(vl15off, in qib_init_7322_variables()
6603 NUM_VL15_BUFS * dd->align4k); in qib_init_7322_variables()
6604 if (!dd->piovl15base) { in qib_init_7322_variables()
6609 qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ in qib_init_7322_variables()
6614 if (!dd->num_pports) { in qib_init_7322_variables()
6615 qib_dev_err(dd, "No ports enabled, giving up initialization\n"); in qib_init_7322_variables()
6619 write_7322_initregs(dd); in qib_init_7322_variables()
6620 ret = qib_create_ctxts(dd); in qib_init_7322_variables()
6621 init_7322_cntrnames(dd); in qib_init_7322_variables()
6635 if (dd->flags & QIB_HAS_SEND_DMA) { in qib_init_7322_variables()
6636 dd->cspec->sdmabufcnt = dd->piobcnt4k; in qib_init_7322_variables()
6639 dd->cspec->sdmabufcnt = 0; in qib_init_7322_variables()
6640 sbufs = dd->piobcnt4k; in qib_init_7322_variables()
6642 dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k - in qib_init_7322_variables()
6643 dd->cspec->sdmabufcnt; in qib_init_7322_variables()
6644 dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; in qib_init_7322_variables()
6645 dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ in qib_init_7322_variables()
6646 dd->last_pio = dd->cspec->lastbuf_for_pio; in qib_init_7322_variables()
6647 dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ? in qib_init_7322_variables()
6648 dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0; in qib_init_7322_variables()
6656 if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh) in qib_init_7322_variables()
6657 updthresh = dd->pbufsctxt - 2; in qib_init_7322_variables()
6658 dd->cspec->updthresh_dflt = updthresh; in qib_init_7322_variables()
6659 dd->cspec->updthresh = updthresh; in qib_init_7322_variables()
6662 dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) in qib_init_7322_variables()
6666 dd->psxmitwait_supported = 1; in qib_init_7322_variables()
6667 dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE; in qib_init_7322_variables()
6669 if (!dd->ctxtcnt) in qib_init_7322_variables()
6670 dd->ctxtcnt = 1; /* for other initialization code */ in qib_init_7322_variables()
6679 struct qib_devdata *dd = ppd->dd; in qib_7322_getsendbuf() local
6683 first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx; in qib_7322_getsendbuf()
6686 if ((plen + 1) > dd->piosize2kmax_dwords) in qib_7322_getsendbuf()
6687 first = dd->piobcnt2k; in qib_7322_getsendbuf()
6690 last = dd->cspec->lastbuf_for_pio; in qib_7322_getsendbuf()
6692 return qib_getsendbuf_range(dd, pbufnum, first, last); in qib_7322_getsendbuf()
6718 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6722 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6726 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6732 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6744 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6751 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6756 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6760 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6764 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6768 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6772 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6776 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6780 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6784 qib_dev_porterr(ppd->dd, ppd->port, in dump_sdma_7322_state()
6844 struct qib_devdata *dd = ppd->dd; in init_sdma_7322_regs() local
6856 if (dd->num_pports) in init_sdma_7322_regs()
6857 n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */ in init_sdma_7322_regs()
6859 n = dd->cspec->sdmabufcnt; /* failsafe for init */ in init_sdma_7322_regs()
6860 erstbuf = (dd->piobcnt2k + dd->piobcnt4k) - in init_sdma_7322_regs()
6861 ((dd->num_pports == 1 || ppd->port == 2) ? n : in init_sdma_7322_regs()
6862 dd->cspec->sdmabufcnt); in init_sdma_7322_regs()
6882 struct qib_devdata *dd = ppd->dd; in qib_sdma_7322_gethead() local
6891 (dd->flags & QIB_HAS_SDMA_TIMEOUT); in qib_sdma_7322_gethead()
6967 static void qib_7322_initvl15_bufs(struct qib_devdata *dd) in qib_7322_initvl15_bufs() argument
6971 vl15bufs = dd->piobcnt2k + dd->piobcnt4k; in qib_7322_initvl15_bufs()
6972 qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS, in qib_7322_initvl15_bufs()
6979 if (rcd->dd->num_pports > 1) { in qib_7322_init_ctxt()
6987 rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt; in qib_7322_init_ctxt()
6994 static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, in qib_7322_txchk_change() argument
7021 le64_to_cpu(dd->pioavailregs_dma[i]); in qib_7322_txchk_change()
7036 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_7322_txchk_change()
7048 clear_bit(i, dd->cspec->sendchkenable); in qib_7322_txchk_change()
7058 qib_read_kreg32(dd, kr_scratch); in qib_7322_txchk_change()
7060 set_bit(i, dd->cspec->sendchkenable); in qib_7322_txchk_change()
7066 set_bit(i, dd->cspec->sendibchk); in qib_7322_txchk_change()
7067 clear_bit(i, dd->cspec->sendgrhchk); in qib_7322_txchk_change()
7069 spin_lock_irqsave(&dd->uctxt_lock, flags); in qib_7322_txchk_change()
7071 for (i = dd->first_user_ctxt; in qib_7322_txchk_change()
7072 dd->cspec->updthresh != dd->cspec->updthresh_dflt in qib_7322_txchk_change()
7073 && i < dd->cfgctxts; i++) in qib_7322_txchk_change()
7074 if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt && in qib_7322_txchk_change()
7075 ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1) in qib_7322_txchk_change()
7076 < dd->cspec->updthresh_dflt) in qib_7322_txchk_change()
7078 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_7322_txchk_change()
7079 if (i == dd->cfgctxts) { in qib_7322_txchk_change()
7080 spin_lock_irqsave(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7081 dd->cspec->updthresh = dd->cspec->updthresh_dflt; in qib_7322_txchk_change()
7082 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); in qib_7322_txchk_change()
7083 dd->sendctrl |= (dd->cspec->updthresh & in qib_7322_txchk_change()
7086 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7087 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_7322_txchk_change()
7094 clear_bit(i, dd->cspec->sendibchk); in qib_7322_txchk_change()
7095 set_bit(i, dd->cspec->sendgrhchk); in qib_7322_txchk_change()
7097 spin_lock_irqsave(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7099 / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) { in qib_7322_txchk_change()
7100 dd->cspec->updthresh = (rcd->piocnt / in qib_7322_txchk_change()
7102 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); in qib_7322_txchk_change()
7103 dd->sendctrl |= (dd->cspec->updthresh & in qib_7322_txchk_change()
7106 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7107 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_7322_txchk_change()
7109 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in qib_7322_txchk_change()
7117 qib_write_kreg(dd, kr_sendcheckmask + i, in qib_7322_txchk_change()
7118 dd->cspec->sendchkenable[i]); in qib_7322_txchk_change()
7121 qib_write_kreg(dd, kr_sendgrhcheckmask + i, in qib_7322_txchk_change()
7122 dd->cspec->sendgrhchk[i]); in qib_7322_txchk_change()
7123 qib_write_kreg(dd, kr_sendibpktmask + i, in qib_7322_txchk_change()
7124 dd->cspec->sendibchk[i]); in qib_7322_txchk_change()
7131 qib_read_kreg32(dd, kr_scratch); in qib_7322_txchk_change()
7136 static void writescratch(struct qib_devdata *dd, u32 val) in writescratch() argument
7138 qib_write_kreg(dd, kr_scratch, val); in writescratch()
7142 static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum) in qib_7322_tempsense_rd() argument
7161 struct qib_devdata *dd; in qib_init_iba7322_funcs() local
7165 dd = qib_alloc_devdata(pdev, in qib_init_iba7322_funcs()
7169 if (IS_ERR(dd)) in qib_init_iba7322_funcs()
7172 dd->f_bringup_serdes = qib_7322_bringup_serdes; in qib_init_iba7322_funcs()
7173 dd->f_cleanup = qib_setup_7322_cleanup; in qib_init_iba7322_funcs()
7174 dd->f_clear_tids = qib_7322_clear_tids; in qib_init_iba7322_funcs()
7175 dd->f_free_irq = qib_7322_free_irq; in qib_init_iba7322_funcs()
7176 dd->f_get_base_info = qib_7322_get_base_info; in qib_init_iba7322_funcs()
7177 dd->f_get_msgheader = qib_7322_get_msgheader; in qib_init_iba7322_funcs()
7178 dd->f_getsendbuf = qib_7322_getsendbuf; in qib_init_iba7322_funcs()
7179 dd->f_gpio_mod = gpio_7322_mod; in qib_init_iba7322_funcs()
7180 dd->f_eeprom_wen = qib_7322_eeprom_wen; in qib_init_iba7322_funcs()
7181 dd->f_hdrqempty = qib_7322_hdrqempty; in qib_init_iba7322_funcs()
7182 dd->f_ib_updown = qib_7322_ib_updown; in qib_init_iba7322_funcs()
7183 dd->f_init_ctxt = qib_7322_init_ctxt; in qib_init_iba7322_funcs()
7184 dd->f_initvl15_bufs = qib_7322_initvl15_bufs; in qib_init_iba7322_funcs()
7185 dd->f_intr_fallback = qib_7322_intr_fallback; in qib_init_iba7322_funcs()
7186 dd->f_late_initreg = qib_late_7322_initreg; in qib_init_iba7322_funcs()
7187 dd->f_setpbc_control = qib_7322_setpbc_control; in qib_init_iba7322_funcs()
7188 dd->f_portcntr = qib_portcntr_7322; in qib_init_iba7322_funcs()
7189 dd->f_put_tid = qib_7322_put_tid; in qib_init_iba7322_funcs()
7190 dd->f_quiet_serdes = qib_7322_mini_quiet_serdes; in qib_init_iba7322_funcs()
7191 dd->f_rcvctrl = rcvctrl_7322_mod; in qib_init_iba7322_funcs()
7192 dd->f_read_cntrs = qib_read_7322cntrs; in qib_init_iba7322_funcs()
7193 dd->f_read_portcntrs = qib_read_7322portcntrs; in qib_init_iba7322_funcs()
7194 dd->f_reset = qib_do_7322_reset; in qib_init_iba7322_funcs()
7195 dd->f_init_sdma_regs = init_sdma_7322_regs; in qib_init_iba7322_funcs()
7196 dd->f_sdma_busy = qib_sdma_7322_busy; in qib_init_iba7322_funcs()
7197 dd->f_sdma_gethead = qib_sdma_7322_gethead; in qib_init_iba7322_funcs()
7198 dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl; in qib_init_iba7322_funcs()
7199 dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt; in qib_init_iba7322_funcs()
7200 dd->f_sdma_update_tail = qib_sdma_update_7322_tail; in qib_init_iba7322_funcs()
7201 dd->f_sendctrl = sendctrl_7322_mod; in qib_init_iba7322_funcs()
7202 dd->f_set_armlaunch = qib_set_7322_armlaunch; in qib_init_iba7322_funcs()
7203 dd->f_set_cntr_sample = qib_set_cntr_7322_sample; in qib_init_iba7322_funcs()
7204 dd->f_iblink_state = qib_7322_iblink_state; in qib_init_iba7322_funcs()
7205 dd->f_ibphys_portstate = qib_7322_phys_portstate; in qib_init_iba7322_funcs()
7206 dd->f_get_ib_cfg = qib_7322_get_ib_cfg; in qib_init_iba7322_funcs()
7207 dd->f_set_ib_cfg = qib_7322_set_ib_cfg; in qib_init_iba7322_funcs()
7208 dd->f_set_ib_loopback = qib_7322_set_loopback; in qib_init_iba7322_funcs()
7209 dd->f_get_ib_table = qib_7322_get_ib_table; in qib_init_iba7322_funcs()
7210 dd->f_set_ib_table = qib_7322_set_ib_table; in qib_init_iba7322_funcs()
7211 dd->f_set_intr_state = qib_7322_set_intr_state; in qib_init_iba7322_funcs()
7212 dd->f_setextled = qib_setup_7322_setextled; in qib_init_iba7322_funcs()
7213 dd->f_txchk_change = qib_7322_txchk_change; in qib_init_iba7322_funcs()
7214 dd->f_update_usrhead = qib_update_7322_usrhead; in qib_init_iba7322_funcs()
7215 dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr; in qib_init_iba7322_funcs()
7216 dd->f_xgxs_reset = qib_7322_mini_pcs_reset; in qib_init_iba7322_funcs()
7217 dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up; in qib_init_iba7322_funcs()
7218 dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up; in qib_init_iba7322_funcs()
7219 dd->f_sdma_init_early = qib_7322_sdma_init_early; in qib_init_iba7322_funcs()
7220 dd->f_writescratch = writescratch; in qib_init_iba7322_funcs()
7221 dd->f_tempsense_rd = qib_7322_tempsense_rd; in qib_init_iba7322_funcs()
7223 dd->f_notify_dca = qib_7322_notify_dca; in qib_init_iba7322_funcs()
7226 * Do remaining PCIe setup and save PCIe values in dd. in qib_init_iba7322_funcs()
7231 ret = qib_pcie_ddinit(dd, pdev, ent); in qib_init_iba7322_funcs()
7236 ret = qib_init_7322_variables(dd); in qib_init_iba7322_funcs()
7240 if (qib_mini_init || !dd->num_pports) in qib_init_iba7322_funcs()
7249 tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table); in qib_init_iba7322_funcs()
7252 irq_table[i].port <= dd->num_pports) || in qib_init_iba7322_funcs()
7254 dd->rcd[i - ARRAY_SIZE(irq_table)])) in qib_init_iba7322_funcs()
7258 actual_cnt -= dd->num_pports; in qib_init_iba7322_funcs()
7261 dd->cspec->msix_entries = kcalloc(tabsize, in qib_init_iba7322_funcs()
7264 if (!dd->cspec->msix_entries) in qib_init_iba7322_funcs()
7267 if (qib_pcie_params(dd, 8, &tabsize)) in qib_init_iba7322_funcs()
7268 qib_dev_err(dd, in qib_init_iba7322_funcs()
7271 dd->cspec->num_msix_entries = tabsize; in qib_init_iba7322_funcs()
7274 qib_setup_7322_interrupt(dd, 1); in qib_init_iba7322_funcs()
7277 qib_write_kreg(dd, kr_hwdiagctrl, 0); in qib_init_iba7322_funcs()
7280 qib_devinfo(dd->pcidev, "DCA enabled\n"); in qib_init_iba7322_funcs()
7281 dd->flags |= QIB_DCA_ENABLED; in qib_init_iba7322_funcs()
7282 qib_setup_dca(dd); in qib_init_iba7322_funcs()
7288 qib_pcie_ddcleanup(dd); in qib_init_iba7322_funcs()
7290 qib_free_devdata(dd); in qib_init_iba7322_funcs()
7291 dd = ERR_PTR(ret); in qib_init_iba7322_funcs()
7293 return dd; in qib_init_iba7322_funcs()
7317 struct qib_devdata *dd = ppd->dd; in set_txdds() local
7329 regidx += (dd->palign / sizeof(u64)); in set_txdds()
7335 qib_write_kreg(dd, regidx, pack_ent); in set_txdds()
7337 qib_write_kreg(ppd->dd, kr_scratch, 0); in set_txdds()
7610 *sdr_dds = txdds_sdr + ppd->dd->board_atten; in find_best_ent()
7611 *ddr_dds = txdds_ddr + ppd->dd->board_atten; in find_best_ent()
7612 *qdr_dds = txdds_qdr + ppd->dd->board_atten; in find_best_ent()
7639 } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) && in find_best_ent()
7644 ppd->dd->unit, ppd->port, idx); in find_best_ent()
7666 if (!(ppd->dd->flags & QIB_HAS_QSFP) || override) in init_txdds_table()
7705 static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr, in ahb_mod() argument
7713 prev_acc = qib_read_kreg64(dd, KR_AHB_ACC); in ahb_mod()
7716 qib_write_kreg(dd, KR_AHB_ACC, acc); in ahb_mod()
7719 trans = qib_read_kreg64(dd, KR_AHB_TRANS); in ahb_mod()
7724 qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES); in ahb_mod()
7735 qib_write_kreg(dd, KR_AHB_TRANS, trans); in ahb_mod()
7738 trans = qib_read_kreg64(dd, KR_AHB_TRANS); in ahb_mod()
7743 qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n", in ahb_mod()
7748 trans = qib_read_kreg64(dd, KR_AHB_TRANS); in ahb_mod()
7758 qib_write_kreg(dd, KR_AHB_TRANS, trans); in ahb_mod()
7761 trans = qib_read_kreg64(dd, KR_AHB_TRANS); in ahb_mod()
7766 qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n", in ahb_mod()
7773 qib_write_kreg(dd, KR_AHB_ACC, prev_acc); in ahb_mod()
7780 struct qib_devdata *dd = ppd->dd; in ibsd_wr_allchans() local
7784 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr, in ibsd_wr_allchans()
7786 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr, in ibsd_wr_allchans()
7798 ppd->dd->unit, ppd->port); in serdes_7322_los_enable()
7802 ppd->dd->unit, ppd->port); in serdes_7322_los_enable()
7812 if (ppd->dd->cspec->r1) in serdes_7322_init()
7844 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; in serdes_7322_init_old()
7848 le_val = IS_QME(ppd->dd) ? 0 : 1; in serdes_7322_init_old()
7852 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); in serdes_7322_init_old()
7859 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); in serdes_7322_init_old()
7860 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); in serdes_7322_init_old()
7861 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); in serdes_7322_init_old()
7862 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); in serdes_7322_init_old()
7865 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); in serdes_7322_init_old()
7866 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); in serdes_7322_init_old()
7867 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); in serdes_7322_init_old()
7868 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); in serdes_7322_init_old()
7871 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); in serdes_7322_init_old()
7887 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; in serdes_7322_init_old()
7897 ppd->dd->cspec->r1 ? in serdes_7322_init_old()
7907 if (!ppd->dd->cspec->r1) { in serdes_7322_init_old()
7925 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); in serdes_7322_init_new()
7967 if (!ppd->dd->cspec->r1) { in serdes_7322_init_new()
7993 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); in serdes_7322_init_new()
7994 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); in serdes_7322_init_new()
7995 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); in serdes_7322_init_new()
7996 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); in serdes_7322_init_new()
7999 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); in serdes_7322_init_new()
8000 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); in serdes_7322_init_new()
8001 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); in serdes_7322_init_new()
8002 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); in serdes_7322_init_new()
8005 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); in serdes_7322_init_new()
8029 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), in serdes_7322_init_new()
8042 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), in serdes_7322_init_new()
8057 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; in serdes_7322_init_new()
8067 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; in serdes_7322_init_new()
8088 ppd->dd->cspec->r1 ? in serdes_7322_init_new()
8118 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in set_man_code()
8126 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in set_man_mode_h1()
8129 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in set_man_mode_h1()
8136 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in clock_man()
8138 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in clock_man()
8140 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in clock_man()
8142 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), in clock_man()
8203 if (!ppd->dd->cspec->r1) in force_h1()
8225 static int qib_r_grab(struct qib_devdata *dd) in qib_r_grab() argument
8229 qib_write_kreg(dd, kr_r_access, val); in qib_r_grab()
8230 qib_read_kreg32(dd, kr_scratch); in qib_r_grab()
8237 static int qib_r_wait_for_rdy(struct qib_devdata *dd) in qib_r_wait_for_rdy() argument
8243 val = qib_read_kreg32(dd, kr_r_access); in qib_r_wait_for_rdy()
8250 static int qib_r_shift(struct qib_devdata *dd, int bisten, in qib_r_shift() argument
8258 ret = qib_r_wait_for_rdy(dd); in qib_r_shift()
8272 qib_write_kreg(dd, kr_r_access, val); in qib_r_shift()
8273 qib_read_kreg32(dd, kr_scratch); in qib_r_shift()
8274 ret = qib_r_wait_for_rdy(dd); in qib_r_shift()
8280 qib_write_kreg(dd, kr_r_access, val); in qib_r_shift()
8281 qib_read_kreg32(dd, kr_scratch); in qib_r_shift()
8282 ret = qib_r_wait_for_rdy(dd); in qib_r_shift()
8290 static int qib_r_update(struct qib_devdata *dd, int bisten) in qib_r_update() argument
8296 ret = qib_r_wait_for_rdy(dd); in qib_r_update()
8298 qib_write_kreg(dd, kr_r_access, val); in qib_r_update()
8299 qib_read_kreg32(dd, kr_scratch); in qib_r_update()
8402 struct qib_devdata *dd = ppd->dd; in setup_7322_link_recovery() local
8404 if (!ppd->dd->cspec->r1) in setup_7322_link_recovery()
8407 dd->cspec->recovery_ports_initted++; in setup_7322_link_recovery()
8410 if (!both && dd->cspec->recovery_ports_initted == 1) { in setup_7322_link_recovery()
8418 if (qib_r_grab(dd) < 0 || in setup_7322_link_recovery()
8419 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 || in setup_7322_link_recovery()
8420 qib_r_update(dd, BISTEN_ETM) < 0 || in setup_7322_link_recovery()
8421 qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 || in setup_7322_link_recovery()
8422 qib_r_update(dd, BISTEN_AT) < 0 || in setup_7322_link_recovery()
8423 qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL, in setup_7322_link_recovery()
8425 qib_r_update(dd, BISTEN_PORT_SEL) < 0 || in setup_7322_link_recovery()
8426 qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 || in setup_7322_link_recovery()
8427 qib_r_update(dd, BISTEN_AT) < 0 || in setup_7322_link_recovery()
8428 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 || in setup_7322_link_recovery()
8429 qib_r_update(dd, BISTEN_ETM) < 0) in setup_7322_link_recovery()
8430 qib_dev_err(dd, "Failed IB link recovery setup\n"); in setup_7322_link_recovery()
8435 struct qib_devdata *dd = ppd->dd; in check_7322_rxe_status() local
8438 if (dd->cspec->recovery_ports_initted != 1) in check_7322_rxe_status()
8440 qib_write_kreg(dd, kr_control, dd->control | in check_7322_rxe_status()
8442 (void)qib_read_kreg64(dd, kr_scratch); in check_7322_rxe_status()
8444 fmask = qib_read_kreg64(dd, kr_act_fmask); in check_7322_rxe_status()
8451 ppd->dd->cspec->stay_in_freeze = 1; in check_7322_rxe_status()
8452 qib_7322_set_intr_state(ppd->dd, 0); in check_7322_rxe_status()
8453 qib_write_kreg(dd, kr_fmask, 0ULL); in check_7322_rxe_status()
8454 qib_dev_err(dd, "HCA unusable until powercycled\n"); in check_7322_rxe_status()
8458 qib_write_kreg(ppd->dd, kr_hwerrclear, in check_7322_rxe_status()
8462 qib_write_kreg(dd, kr_control, dd->control); in check_7322_rxe_status()
8463 qib_read_kreg32(dd, kr_scratch); in check_7322_rxe_status()
8470 qib_read_kreg32(dd, kr_scratch); in check_7322_rxe_status()