Lines Matching refs:dd

101 #define emulator_rev(dd) ((dd)->irev >> 8)  argument
103 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3) argument
104 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4) argument
1003 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1004 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1005 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1007 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1009 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1011 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1013 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1015 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1016 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1019 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1021 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1022 static void handle_dcc_err(struct hfi1_devdata *dd,
1024 static void handle_lcb_err(struct hfi1_devdata *dd,
1026 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1027 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1028 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1029 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1030 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1031 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1032 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1033 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1038 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1040 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1041 static int thermal_init(struct hfi1_devdata *dd);
1054 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1055 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1056 static void handle_temp_err(struct hfi1_devdata *dd);
1057 static void dc_shutdown(struct hfi1_devdata *dd);
1058 static void dc_start(struct hfi1_devdata *dd);
1062 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1063 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1076 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1292 const struct hfi1_devdata *dd, in hfi1_addr_from_offset() argument
1295 if (offset >= dd->base2_start) in hfi1_addr_from_offset()
1296 return dd->kregbase2 + (offset - dd->base2_start); in hfi1_addr_from_offset()
1297 return dd->kregbase1 + offset; in hfi1_addr_from_offset()
1308 u64 read_csr(const struct hfi1_devdata *dd, u32 offset) in read_csr() argument
1310 if (dd->flags & HFI1_PRESENT) in read_csr()
1311 return readq(hfi1_addr_from_offset(dd, offset)); in read_csr()
1321 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value) in write_csr() argument
1323 if (dd->flags & HFI1_PRESENT) { in write_csr()
1324 void __iomem *base = hfi1_addr_from_offset(dd, offset); in write_csr()
1327 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start)) in write_csr()
1342 const struct hfi1_devdata *dd, in get_csr_addr() argument
1345 if (dd->flags & HFI1_PRESENT) in get_csr_addr()
1346 return hfi1_addr_from_offset(dd, offset); in get_csr_addr()
1350 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr, in read_write_csr() argument
1356 ret = read_csr(dd, csr); in read_write_csr()
1358 write_csr(dd, csr, value); in read_write_csr()
1361 dd_dev_err(dd, "Invalid cntr register access mode"); in read_write_csr()
1373 struct hfi1_devdata *dd = context; in dev_access_u32_csr() local
1384 return read_write_csr(dd, csr, mode, data); in dev_access_u32_csr()
1390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sde_err_cnt() local
1392 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_err_cnt()
1393 return dd->per_sdma[idx].err_cnt; in access_sde_err_cnt()
1400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sde_int_cnt() local
1402 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_int_cnt()
1403 return dd->per_sdma[idx].sdma_int_cnt; in access_sde_int_cnt()
1410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sde_idle_int_cnt() local
1412 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_idle_int_cnt()
1413 return dd->per_sdma[idx].idle_int_cnt; in access_sde_idle_int_cnt()
1421 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sde_progress_int_cnt() local
1423 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_progress_int_cnt()
1424 return dd->per_sdma[idx].progress_int_cnt; in access_sde_progress_int_cnt()
1431 struct hfi1_devdata *dd = context; in dev_access_u64_csr() local
1445 val = read_write_csr(dd, csr, mode, data); in dev_access_u64_csr()
1452 struct hfi1_devdata *dd = context; in dc_access_lcb_cntr() local
1459 ret = read_lcb_csr(dd, csr, &data); in dc_access_lcb_cntr()
1461 ret = write_lcb_csr(dd, csr, data); in dc_access_lcb_cntr()
1464 if (!(dd->flags & HFI1_SHUTDOWN)) in dc_access_lcb_cntr()
1465 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr); in dc_access_lcb_cntr()
1481 return read_write_csr(ppd->dd, entry->csr, mode, data); in port_access_u32_csr()
1499 val = read_write_csr(ppd->dd, csr, mode, data); in port_access_u64_csr()
1504 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode, in read_write_sw() argument
1515 dd_dev_err(dd, "Invalid cntr sw access mode"); in read_write_sw()
1531 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data); in access_sw_link_dn_cnt()
1541 return read_write_sw(ppd->dd, &ppd->link_up, mode, data); in access_sw_link_up_cnt()
1552 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data); in access_sw_unknown_frame_cnt()
1569 return read_write_sw(ppd->dd, counter, mode, data); in access_sw_xmit_discards()
1581 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors, in access_xmit_constraint_errs()
1593 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors, in access_rcv_constraint_errs()
1607 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val, in read_write_cpu() argument
1623 dd_dev_err(dd, "Per CPU cntrs can only be zeroed"); in read_write_cpu()
1625 dd_dev_err(dd, "Invalid cntr sw cpu access mode"); in read_write_cpu()
1635 struct hfi1_devdata *dd = context; in access_sw_cpu_intr() local
1637 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl, in access_sw_cpu_intr()
1644 struct hfi1_devdata *dd = context; in access_sw_cpu_rcv_limit() local
1646 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl, in access_sw_cpu_rcv_limit()
1653 struct hfi1_devdata *dd = context; in access_sw_pio_wait() local
1655 return dd->verbs_dev.n_piowait; in access_sw_pio_wait()
1661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sw_pio_drain() local
1663 return dd->verbs_dev.n_piodrain; in access_sw_pio_drain()
1669 struct hfi1_devdata *dd = context; in access_sw_ctx0_seq_drop() local
1671 return dd->ctx0_seq_drop; in access_sw_ctx0_seq_drop()
1677 struct hfi1_devdata *dd = context; in access_sw_vtx_wait() local
1679 return dd->verbs_dev.n_txwait; in access_sw_vtx_wait()
1685 struct hfi1_devdata *dd = context; in access_sw_kmem_wait() local
1687 return dd->verbs_dev.n_kmem_wait; in access_sw_kmem_wait()
1693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sw_send_schedule() local
1695 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl, in access_sw_send_schedule()
1704 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_pll_lock_fail_err_cnt() local
1706 return dd->misc_err_status_cnt[12]; in access_misc_pll_lock_fail_err_cnt()
1713 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_mbist_fail_err_cnt() local
1715 return dd->misc_err_status_cnt[11]; in access_misc_mbist_fail_err_cnt()
1722 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_invalid_eep_cmd_err_cnt() local
1724 return dd->misc_err_status_cnt[10]; in access_misc_invalid_eep_cmd_err_cnt()
1731 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_efuse_done_parity_err_cnt() local
1733 return dd->misc_err_status_cnt[9]; in access_misc_efuse_done_parity_err_cnt()
1740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_efuse_write_err_cnt() local
1742 return dd->misc_err_status_cnt[8]; in access_misc_efuse_write_err_cnt()
1749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_efuse_read_bad_addr_err_cnt() local
1751 return dd->misc_err_status_cnt[7]; in access_misc_efuse_read_bad_addr_err_cnt()
1758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_efuse_csr_parity_err_cnt() local
1760 return dd->misc_err_status_cnt[6]; in access_misc_efuse_csr_parity_err_cnt()
1767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_fw_auth_failed_err_cnt() local
1769 return dd->misc_err_status_cnt[5]; in access_misc_fw_auth_failed_err_cnt()
1776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_key_mismatch_err_cnt() local
1778 return dd->misc_err_status_cnt[4]; in access_misc_key_mismatch_err_cnt()
1785 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_sbus_write_failed_err_cnt() local
1787 return dd->misc_err_status_cnt[3]; in access_misc_sbus_write_failed_err_cnt()
1794 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_csr_write_bad_addr_err_cnt() local
1796 return dd->misc_err_status_cnt[2]; in access_misc_csr_write_bad_addr_err_cnt()
1803 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_csr_read_bad_addr_err_cnt() local
1805 return dd->misc_err_status_cnt[1]; in access_misc_csr_read_bad_addr_err_cnt()
1812 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_csr_parity_err_cnt() local
1814 return dd->misc_err_status_cnt[0]; in access_misc_csr_parity_err_cnt()
1825 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sw_cce_err_status_aggregated_cnt() local
1827 return dd->sw_cce_err_status_aggregate; in access_sw_cce_err_status_aggregated_cnt()
1838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_msix_csr_parity_err_cnt() local
1840 return dd->cce_err_status_cnt[40]; in access_cce_msix_csr_parity_err_cnt()
1847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_int_map_unc_err_cnt() local
1849 return dd->cce_err_status_cnt[39]; in access_cce_int_map_unc_err_cnt()
1856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_int_map_cor_err_cnt() local
1858 return dd->cce_err_status_cnt[38]; in access_cce_int_map_cor_err_cnt()
1865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_msix_table_unc_err_cnt() local
1867 return dd->cce_err_status_cnt[37]; in access_cce_msix_table_unc_err_cnt()
1874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_msix_table_cor_err_cnt() local
1876 return dd->cce_err_status_cnt[36]; in access_cce_msix_table_cor_err_cnt()
1883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_rxdma_conv_fifo_parity_err_cnt() local
1885 return dd->cce_err_status_cnt[35]; in access_cce_rxdma_conv_fifo_parity_err_cnt()
1892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_rcpl_async_fifo_parity_err_cnt() local
1894 return dd->cce_err_status_cnt[34]; in access_cce_rcpl_async_fifo_parity_err_cnt()
1901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_seg_write_bad_addr_err_cnt() local
1903 return dd->cce_err_status_cnt[33]; in access_cce_seg_write_bad_addr_err_cnt()
1910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_seg_read_bad_addr_err_cnt() local
1912 return dd->cce_err_status_cnt[32]; in access_cce_seg_read_bad_addr_err_cnt()
1918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_la_triggered_cnt() local
1920 return dd->cce_err_status_cnt[31]; in access_la_triggered_cnt()
1927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_trgt_cpl_timeout_err_cnt() local
1929 return dd->cce_err_status_cnt[30]; in access_cce_trgt_cpl_timeout_err_cnt()
1936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_receive_parity_err_cnt() local
1938 return dd->cce_err_status_cnt[29]; in access_pcic_receive_parity_err_cnt()
1945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_transmit_back_parity_err_cnt() local
1947 return dd->cce_err_status_cnt[28]; in access_pcic_transmit_back_parity_err_cnt()
1954 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_transmit_front_parity_err_cnt() local
1956 return dd->cce_err_status_cnt[27]; in access_pcic_transmit_front_parity_err_cnt()
1963 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_cpl_dat_q_unc_err_cnt() local
1965 return dd->cce_err_status_cnt[26]; in access_pcic_cpl_dat_q_unc_err_cnt()
1972 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_cpl_hd_q_unc_err_cnt() local
1974 return dd->cce_err_status_cnt[25]; in access_pcic_cpl_hd_q_unc_err_cnt()
1981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_post_dat_q_unc_err_cnt() local
1983 return dd->cce_err_status_cnt[24]; in access_pcic_post_dat_q_unc_err_cnt()
1990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_post_hd_q_unc_err_cnt() local
1992 return dd->cce_err_status_cnt[23]; in access_pcic_post_hd_q_unc_err_cnt()
1999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_retry_sot_mem_unc_err_cnt() local
2001 return dd->cce_err_status_cnt[22]; in access_pcic_retry_sot_mem_unc_err_cnt()
2008 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_retry_mem_unc_err() local
2010 return dd->cce_err_status_cnt[21]; in access_pcic_retry_mem_unc_err()
2017 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_n_post_dat_q_parity_err_cnt() local
2019 return dd->cce_err_status_cnt[20]; in access_pcic_n_post_dat_q_parity_err_cnt()
2026 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_n_post_h_q_parity_err_cnt() local
2028 return dd->cce_err_status_cnt[19]; in access_pcic_n_post_h_q_parity_err_cnt()
2035 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_cpl_dat_q_cor_err_cnt() local
2037 return dd->cce_err_status_cnt[18]; in access_pcic_cpl_dat_q_cor_err_cnt()
2044 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_cpl_hd_q_cor_err_cnt() local
2046 return dd->cce_err_status_cnt[17]; in access_pcic_cpl_hd_q_cor_err_cnt()
2053 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_post_dat_q_cor_err_cnt() local
2055 return dd->cce_err_status_cnt[16]; in access_pcic_post_dat_q_cor_err_cnt()
2062 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_post_hd_q_cor_err_cnt() local
2064 return dd->cce_err_status_cnt[15]; in access_pcic_post_hd_q_cor_err_cnt()
2071 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_retry_sot_mem_cor_err_cnt() local
2073 return dd->cce_err_status_cnt[14]; in access_pcic_retry_sot_mem_cor_err_cnt()
2080 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_retry_mem_cor_err_cnt() local
2082 return dd->cce_err_status_cnt[13]; in access_pcic_retry_mem_cor_err_cnt()
2089 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_cli1_async_fifo_dbg_parity_err_cnt() local
2091 return dd->cce_err_status_cnt[12]; in access_cce_cli1_async_fifo_dbg_parity_err_cnt()
2098 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_cli1_async_fifo_rxdma_parity_err_cnt() local
2100 return dd->cce_err_status_cnt[11]; in access_cce_cli1_async_fifo_rxdma_parity_err_cnt()
2107 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt() local
2109 return dd->cce_err_status_cnt[10]; in access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt()
2116 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt() local
2118 return dd->cce_err_status_cnt[9]; in access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt()
2125 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_cli2_async_fifo_parity_err_cnt() local
2127 return dd->cce_err_status_cnt[8]; in access_cce_cli2_async_fifo_parity_err_cnt()
2134 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_csr_cfg_bus_parity_err_cnt() local
2136 return dd->cce_err_status_cnt[7]; in access_cce_csr_cfg_bus_parity_err_cnt()
2143 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_cli0_async_fifo_parity_err_cnt() local
2145 return dd->cce_err_status_cnt[6]; in access_cce_cli0_async_fifo_parity_err_cnt()
2152 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_rspd_data_parity_err_cnt() local
2154 return dd->cce_err_status_cnt[5]; in access_cce_rspd_data_parity_err_cnt()
2161 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_trgt_access_err_cnt() local
2163 return dd->cce_err_status_cnt[4]; in access_cce_trgt_access_err_cnt()
2170 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_trgt_async_fifo_parity_err_cnt() local
2172 return dd->cce_err_status_cnt[3]; in access_cce_trgt_async_fifo_parity_err_cnt()
2179 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_csr_write_bad_addr_err_cnt() local
2181 return dd->cce_err_status_cnt[2]; in access_cce_csr_write_bad_addr_err_cnt()
2188 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_csr_read_bad_addr_err_cnt() local
2190 return dd->cce_err_status_cnt[1]; in access_cce_csr_read_bad_addr_err_cnt()
2197 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_ccs_csr_parity_err_cnt() local
2199 return dd->cce_err_status_cnt[0]; in access_ccs_csr_parity_err_cnt()
2210 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_csr_parity_err_cnt() local
2212 return dd->rcv_err_status_cnt[63]; in access_rx_csr_parity_err_cnt()
2219 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_csr_write_bad_addr_err_cnt() local
2221 return dd->rcv_err_status_cnt[62]; in access_rx_csr_write_bad_addr_err_cnt()
2228 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_csr_read_bad_addr_err_cnt() local
2230 return dd->rcv_err_status_cnt[61]; in access_rx_csr_read_bad_addr_err_cnt()
2237 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_csr_unc_err_cnt() local
2239 return dd->rcv_err_status_cnt[60]; in access_rx_dma_csr_unc_err_cnt()
2246 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_dq_fsm_encoding_err_cnt() local
2248 return dd->rcv_err_status_cnt[59]; in access_rx_dma_dq_fsm_encoding_err_cnt()
2255 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_eq_fsm_encoding_err_cnt() local
2257 return dd->rcv_err_status_cnt[58]; in access_rx_dma_eq_fsm_encoding_err_cnt()
2264 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_csr_parity_err_cnt() local
2266 return dd->rcv_err_status_cnt[57]; in access_rx_dma_csr_parity_err_cnt()
2273 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_data_cor_err_cnt() local
2275 return dd->rcv_err_status_cnt[56]; in access_rx_rbuf_data_cor_err_cnt()
2282 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_data_unc_err_cnt() local
2284 return dd->rcv_err_status_cnt[55]; in access_rx_rbuf_data_unc_err_cnt()
2291 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_data_fifo_rd_cor_err_cnt() local
2293 return dd->rcv_err_status_cnt[54]; in access_rx_dma_data_fifo_rd_cor_err_cnt()
2300 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_data_fifo_rd_unc_err_cnt() local
2302 return dd->rcv_err_status_cnt[53]; in access_rx_dma_data_fifo_rd_unc_err_cnt()
2309 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_hdr_fifo_rd_cor_err_cnt() local
2311 return dd->rcv_err_status_cnt[52]; in access_rx_dma_hdr_fifo_rd_cor_err_cnt()
2318 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_hdr_fifo_rd_unc_err_cnt() local
2320 return dd->rcv_err_status_cnt[51]; in access_rx_dma_hdr_fifo_rd_unc_err_cnt()
2327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_desc_part2_cor_err_cnt() local
2329 return dd->rcv_err_status_cnt[50]; in access_rx_rbuf_desc_part2_cor_err_cnt()
2336 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_desc_part2_unc_err_cnt() local
2338 return dd->rcv_err_status_cnt[49]; in access_rx_rbuf_desc_part2_unc_err_cnt()
2345 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_desc_part1_cor_err_cnt() local
2347 return dd->rcv_err_status_cnt[48]; in access_rx_rbuf_desc_part1_cor_err_cnt()
2354 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_desc_part1_unc_err_cnt() local
2356 return dd->rcv_err_status_cnt[47]; in access_rx_rbuf_desc_part1_unc_err_cnt()
2363 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_hq_intr_fsm_err_cnt() local
2365 return dd->rcv_err_status_cnt[46]; in access_rx_hq_intr_fsm_err_cnt()
2372 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_hq_intr_csr_parity_err_cnt() local
2374 return dd->rcv_err_status_cnt[45]; in access_rx_hq_intr_csr_parity_err_cnt()
2381 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_lookup_csr_parity_err_cnt() local
2383 return dd->rcv_err_status_cnt[44]; in access_rx_lookup_csr_parity_err_cnt()
2390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_lookup_rcv_array_cor_err_cnt() local
2392 return dd->rcv_err_status_cnt[43]; in access_rx_lookup_rcv_array_cor_err_cnt()
2399 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_lookup_rcv_array_unc_err_cnt() local
2401 return dd->rcv_err_status_cnt[42]; in access_rx_lookup_rcv_array_unc_err_cnt()
2408 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_lookup_des_part2_parity_err_cnt() local
2410 return dd->rcv_err_status_cnt[41]; in access_rx_lookup_des_part2_parity_err_cnt()
2417 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_lookup_des_part1_unc_cor_err_cnt() local
2419 return dd->rcv_err_status_cnt[40]; in access_rx_lookup_des_part1_unc_cor_err_cnt()
2426 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_lookup_des_part1_unc_err_cnt() local
2428 return dd->rcv_err_status_cnt[39]; in access_rx_lookup_des_part1_unc_err_cnt()
2435 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_next_free_buf_cor_err_cnt() local
2437 return dd->rcv_err_status_cnt[38]; in access_rx_rbuf_next_free_buf_cor_err_cnt()
2444 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_next_free_buf_unc_err_cnt() local
2446 return dd->rcv_err_status_cnt[37]; in access_rx_rbuf_next_free_buf_unc_err_cnt()
2453 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rbuf_fl_init_wr_addr_parity_err_cnt() local
2455 return dd->rcv_err_status_cnt[36]; in access_rbuf_fl_init_wr_addr_parity_err_cnt()
2462 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_fl_initdone_parity_err_cnt() local
2464 return dd->rcv_err_status_cnt[35]; in access_rx_rbuf_fl_initdone_parity_err_cnt()
2471 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_fl_write_addr_parity_err_cnt() local
2473 return dd->rcv_err_status_cnt[34]; in access_rx_rbuf_fl_write_addr_parity_err_cnt()
2480 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_fl_rd_addr_parity_err_cnt() local
2482 return dd->rcv_err_status_cnt[33]; in access_rx_rbuf_fl_rd_addr_parity_err_cnt()
2489 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_empty_err_cnt() local
2491 return dd->rcv_err_status_cnt[32]; in access_rx_rbuf_empty_err_cnt()
2498 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_full_err_cnt() local
2500 return dd->rcv_err_status_cnt[31]; in access_rx_rbuf_full_err_cnt()
2507 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rbuf_bad_lookup_err_cnt() local
2509 return dd->rcv_err_status_cnt[30]; in access_rbuf_bad_lookup_err_cnt()
2516 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rbuf_ctx_id_parity_err_cnt() local
2518 return dd->rcv_err_status_cnt[29]; in access_rbuf_ctx_id_parity_err_cnt()
2525 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rbuf_csr_qeopdw_parity_err_cnt() local
2527 return dd->rcv_err_status_cnt[28]; in access_rbuf_csr_qeopdw_parity_err_cnt()
2534 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt() local
2536 return dd->rcv_err_status_cnt[27]; in access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt()
2543 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt() local
2545 return dd->rcv_err_status_cnt[26]; in access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt()
2552 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt() local
2554 return dd->rcv_err_status_cnt[25]; in access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt()
2561 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_vld_bit_parity_err_cnt() local
2563 return dd->rcv_err_status_cnt[24]; in access_rx_rbuf_csr_q_vld_bit_parity_err_cnt()
2570 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_next_buf_parity_err_cnt() local
2572 return dd->rcv_err_status_cnt[23]; in access_rx_rbuf_csr_q_next_buf_parity_err_cnt()
2579 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt() local
2581 return dd->rcv_err_status_cnt[22]; in access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt()
2588 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt() local
2590 return dd->rcv_err_status_cnt[21]; in access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt()
2597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_block_list_read_cor_err_cnt() local
2599 return dd->rcv_err_status_cnt[20]; in access_rx_rbuf_block_list_read_cor_err_cnt()
2606 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_block_list_read_unc_err_cnt() local
2608 return dd->rcv_err_status_cnt[19]; in access_rx_rbuf_block_list_read_unc_err_cnt()
2615 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_lookup_des_cor_err_cnt() local
2617 return dd->rcv_err_status_cnt[18]; in access_rx_rbuf_lookup_des_cor_err_cnt()
2624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_lookup_des_unc_err_cnt() local
2626 return dd->rcv_err_status_cnt[17]; in access_rx_rbuf_lookup_des_unc_err_cnt()
2633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt() local
2635 return dd->rcv_err_status_cnt[16]; in access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt()
2642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_lookup_des_reg_unc_err_cnt() local
2644 return dd->rcv_err_status_cnt[15]; in access_rx_rbuf_lookup_des_reg_unc_err_cnt()
2651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_free_list_cor_err_cnt() local
2653 return dd->rcv_err_status_cnt[14]; in access_rx_rbuf_free_list_cor_err_cnt()
2660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_free_list_unc_err_cnt() local
2662 return dd->rcv_err_status_cnt[13]; in access_rx_rbuf_free_list_unc_err_cnt()
2669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_fsm_encoding_err_cnt() local
2671 return dd->rcv_err_status_cnt[12]; in access_rx_rcv_fsm_encoding_err_cnt()
2678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_flag_cor_err_cnt() local
2680 return dd->rcv_err_status_cnt[11]; in access_rx_dma_flag_cor_err_cnt()
2687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_flag_unc_err_cnt() local
2689 return dd->rcv_err_status_cnt[10]; in access_rx_dma_flag_unc_err_cnt()
2696 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dc_sop_eop_parity_err_cnt() local
2698 return dd->rcv_err_status_cnt[9]; in access_rx_dc_sop_eop_parity_err_cnt()
2705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_csr_parity_err_cnt() local
2707 return dd->rcv_err_status_cnt[8]; in access_rx_rcv_csr_parity_err_cnt()
2714 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_qp_map_table_cor_err_cnt() local
2716 return dd->rcv_err_status_cnt[7]; in access_rx_rcv_qp_map_table_cor_err_cnt()
2723 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_qp_map_table_unc_err_cnt() local
2725 return dd->rcv_err_status_cnt[6]; in access_rx_rcv_qp_map_table_unc_err_cnt()
2732 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_data_cor_err_cnt() local
2734 return dd->rcv_err_status_cnt[5]; in access_rx_rcv_data_cor_err_cnt()
2741 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_data_unc_err_cnt() local
2743 return dd->rcv_err_status_cnt[4]; in access_rx_rcv_data_unc_err_cnt()
2750 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_hdr_cor_err_cnt() local
2752 return dd->rcv_err_status_cnt[3]; in access_rx_rcv_hdr_cor_err_cnt()
2759 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_hdr_unc_err_cnt() local
2761 return dd->rcv_err_status_cnt[2]; in access_rx_rcv_hdr_unc_err_cnt()
2768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dc_intf_parity_err_cnt() local
2770 return dd->rcv_err_status_cnt[1]; in access_rx_dc_intf_parity_err_cnt()
2777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_csr_cor_err_cnt() local
2779 return dd->rcv_err_status_cnt[0]; in access_rx_dma_csr_cor_err_cnt()
2790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_pec_sop_head_parity_err_cnt() local
2792 return dd->send_pio_err_status_cnt[35]; in access_pio_pec_sop_head_parity_err_cnt()
2799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_pcc_sop_head_parity_err_cnt() local
2801 return dd->send_pio_err_status_cnt[34]; in access_pio_pcc_sop_head_parity_err_cnt()
2808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_last_returned_cnt_parity_err_cnt() local
2810 return dd->send_pio_err_status_cnt[33]; in access_pio_last_returned_cnt_parity_err_cnt()
2817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_current_free_cnt_parity_err_cnt() local
2819 return dd->send_pio_err_status_cnt[32]; in access_pio_current_free_cnt_parity_err_cnt()
2826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_reserved_31_err_cnt() local
2828 return dd->send_pio_err_status_cnt[31]; in access_pio_reserved_31_err_cnt()
2835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_reserved_30_err_cnt() local
2837 return dd->send_pio_err_status_cnt[30]; in access_pio_reserved_30_err_cnt()
2844 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_ppmc_sop_len_err_cnt() local
2846 return dd->send_pio_err_status_cnt[29]; in access_pio_ppmc_sop_len_err_cnt()
2853 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_ppmc_bqc_mem_parity_err_cnt() local
2855 return dd->send_pio_err_status_cnt[28]; in access_pio_ppmc_bqc_mem_parity_err_cnt()
2862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_vl_fifo_parity_err_cnt() local
2864 return dd->send_pio_err_status_cnt[27]; in access_pio_vl_fifo_parity_err_cnt()
2871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_vlf_sop_parity_err_cnt() local
2873 return dd->send_pio_err_status_cnt[26]; in access_pio_vlf_sop_parity_err_cnt()
2880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_vlf_v1_len_parity_err_cnt() local
2882 return dd->send_pio_err_status_cnt[25]; in access_pio_vlf_v1_len_parity_err_cnt()
2889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_block_qw_count_parity_err_cnt() local
2891 return dd->send_pio_err_status_cnt[24]; in access_pio_block_qw_count_parity_err_cnt()
2898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_qw_valid_parity_err_cnt() local
2900 return dd->send_pio_err_status_cnt[23]; in access_pio_write_qw_valid_parity_err_cnt()
2907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_state_machine_err_cnt() local
2909 return dd->send_pio_err_status_cnt[22]; in access_pio_state_machine_err_cnt()
2916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_data_parity_err_cnt() local
2918 return dd->send_pio_err_status_cnt[21]; in access_pio_write_data_parity_err_cnt()
2925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_host_addr_mem_cor_err_cnt() local
2927 return dd->send_pio_err_status_cnt[20]; in access_pio_host_addr_mem_cor_err_cnt()
2934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_host_addr_mem_unc_err_cnt() local
2936 return dd->send_pio_err_status_cnt[19]; in access_pio_host_addr_mem_unc_err_cnt()
2943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_pkt_evict_sm_or_arb_sm_err_cnt() local
2945 return dd->send_pio_err_status_cnt[18]; in access_pio_pkt_evict_sm_or_arb_sm_err_cnt()
2952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_init_sm_in_err_cnt() local
2954 return dd->send_pio_err_status_cnt[17]; in access_pio_init_sm_in_err_cnt()
2961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_ppmc_pbl_fifo_err_cnt() local
2963 return dd->send_pio_err_status_cnt[16]; in access_pio_ppmc_pbl_fifo_err_cnt()
2970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_credit_ret_fifo_parity_err_cnt() local
2972 return dd->send_pio_err_status_cnt[15]; in access_pio_credit_ret_fifo_parity_err_cnt()
2979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_v1_len_mem_bank1_cor_err_cnt() local
2981 return dd->send_pio_err_status_cnt[14]; in access_pio_v1_len_mem_bank1_cor_err_cnt()
2988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_v1_len_mem_bank0_cor_err_cnt() local
2990 return dd->send_pio_err_status_cnt[13]; in access_pio_v1_len_mem_bank0_cor_err_cnt()
2997 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_v1_len_mem_bank1_unc_err_cnt() local
2999 return dd->send_pio_err_status_cnt[12]; in access_pio_v1_len_mem_bank1_unc_err_cnt()
3006 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_v1_len_mem_bank0_unc_err_cnt() local
3008 return dd->send_pio_err_status_cnt[11]; in access_pio_v1_len_mem_bank0_unc_err_cnt()
3015 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_sm_pkt_reset_parity_err_cnt() local
3017 return dd->send_pio_err_status_cnt[10]; in access_pio_sm_pkt_reset_parity_err_cnt()
3024 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_pkt_evict_fifo_parity_err_cnt() local
3026 return dd->send_pio_err_status_cnt[9]; in access_pio_pkt_evict_fifo_parity_err_cnt()
3033 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_sbrdctrl_crrel_fifo_parity_err_cnt() local
3035 return dd->send_pio_err_status_cnt[8]; in access_pio_sbrdctrl_crrel_fifo_parity_err_cnt()
3042 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_sbrdctl_crrel_parity_err_cnt() local
3044 return dd->send_pio_err_status_cnt[7]; in access_pio_sbrdctl_crrel_parity_err_cnt()
3051 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_pec_fifo_parity_err_cnt() local
3053 return dd->send_pio_err_status_cnt[6]; in access_pio_pec_fifo_parity_err_cnt()
3060 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_pcc_fifo_parity_err_cnt() local
3062 return dd->send_pio_err_status_cnt[5]; in access_pio_pcc_fifo_parity_err_cnt()
3069 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_sb_mem_fifo1_err_cnt() local
3071 return dd->send_pio_err_status_cnt[4]; in access_pio_sb_mem_fifo1_err_cnt()
3078 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_sb_mem_fifo0_err_cnt() local
3080 return dd->send_pio_err_status_cnt[3]; in access_pio_sb_mem_fifo0_err_cnt()
3087 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_csr_parity_err_cnt() local
3089 return dd->send_pio_err_status_cnt[2]; in access_pio_csr_parity_err_cnt()
3096 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_addr_parity_err_cnt() local
3098 return dd->send_pio_err_status_cnt[1]; in access_pio_write_addr_parity_err_cnt()
3105 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_bad_ctxt_err_cnt() local
3107 return dd->send_pio_err_status_cnt[0]; in access_pio_write_bad_ctxt_err_cnt()
3118 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_pcie_req_tracking_cor_err_cnt() local
3120 return dd->send_dma_err_status_cnt[3]; in access_sdma_pcie_req_tracking_cor_err_cnt()
3127 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_pcie_req_tracking_unc_err_cnt() local
3129 return dd->send_dma_err_status_cnt[2]; in access_sdma_pcie_req_tracking_unc_err_cnt()
3136 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_csr_parity_err_cnt() local
3138 return dd->send_dma_err_status_cnt[1]; in access_sdma_csr_parity_err_cnt()
3145 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_rpy_tag_err_cnt() local
3147 return dd->send_dma_err_status_cnt[0]; in access_sdma_rpy_tag_err_cnt()
3158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_read_pio_memory_csr_unc_err_cnt() local
3160 return dd->send_egress_err_status_cnt[63]; in access_tx_read_pio_memory_csr_unc_err_cnt()
3167 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_read_sdma_memory_csr_err_cnt() local
3169 return dd->send_egress_err_status_cnt[62]; in access_tx_read_sdma_memory_csr_err_cnt()
3176 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_egress_fifo_cor_err_cnt() local
3178 return dd->send_egress_err_status_cnt[61]; in access_tx_egress_fifo_cor_err_cnt()
3185 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_read_pio_memory_cor_err_cnt() local
3187 return dd->send_egress_err_status_cnt[60]; in access_tx_read_pio_memory_cor_err_cnt()
3194 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_read_sdma_memory_cor_err_cnt() local
3196 return dd->send_egress_err_status_cnt[59]; in access_tx_read_sdma_memory_cor_err_cnt()
3203 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sb_hdr_cor_err_cnt() local
3205 return dd->send_egress_err_status_cnt[58]; in access_tx_sb_hdr_cor_err_cnt()
3212 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_credit_overrun_err_cnt() local
3214 return dd->send_egress_err_status_cnt[57]; in access_tx_credit_overrun_err_cnt()
3221 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo8_cor_err_cnt() local
3223 return dd->send_egress_err_status_cnt[56]; in access_tx_launch_fifo8_cor_err_cnt()
3230 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo7_cor_err_cnt() local
3232 return dd->send_egress_err_status_cnt[55]; in access_tx_launch_fifo7_cor_err_cnt()
3239 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo6_cor_err_cnt() local
3241 return dd->send_egress_err_status_cnt[54]; in access_tx_launch_fifo6_cor_err_cnt()
3248 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo5_cor_err_cnt() local
3250 return dd->send_egress_err_status_cnt[53]; in access_tx_launch_fifo5_cor_err_cnt()
3257 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo4_cor_err_cnt() local
3259 return dd->send_egress_err_status_cnt[52]; in access_tx_launch_fifo4_cor_err_cnt()
3266 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo3_cor_err_cnt() local
3268 return dd->send_egress_err_status_cnt[51]; in access_tx_launch_fifo3_cor_err_cnt()
3275 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo2_cor_err_cnt() local
3277 return dd->send_egress_err_status_cnt[50]; in access_tx_launch_fifo2_cor_err_cnt()
3284 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo1_cor_err_cnt() local
3286 return dd->send_egress_err_status_cnt[49]; in access_tx_launch_fifo1_cor_err_cnt()
3293 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo0_cor_err_cnt() local
3295 return dd->send_egress_err_status_cnt[48]; in access_tx_launch_fifo0_cor_err_cnt()
3302 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_credit_return_vl_err_cnt() local
3304 return dd->send_egress_err_status_cnt[47]; in access_tx_credit_return_vl_err_cnt()
3311 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_hcrc_insertion_err_cnt() local
3313 return dd->send_egress_err_status_cnt[46]; in access_tx_hcrc_insertion_err_cnt()
3320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_egress_fifo_unc_err_cnt() local
3322 return dd->send_egress_err_status_cnt[45]; in access_tx_egress_fifo_unc_err_cnt()
3329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_read_pio_memory_unc_err_cnt() local
3331 return dd->send_egress_err_status_cnt[44]; in access_tx_read_pio_memory_unc_err_cnt()
3338 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_read_sdma_memory_unc_err_cnt() local
3340 return dd->send_egress_err_status_cnt[43]; in access_tx_read_sdma_memory_unc_err_cnt()
3347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sb_hdr_unc_err_cnt() local
3349 return dd->send_egress_err_status_cnt[42]; in access_tx_sb_hdr_unc_err_cnt()
3356 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_credit_return_partiy_err_cnt() local
3358 return dd->send_egress_err_status_cnt[41]; in access_tx_credit_return_partiy_err_cnt()
3365 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo8_unc_or_parity_err_cnt() local
3367 return dd->send_egress_err_status_cnt[40]; in access_tx_launch_fifo8_unc_or_parity_err_cnt()
3374 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo7_unc_or_parity_err_cnt() local
3376 return dd->send_egress_err_status_cnt[39]; in access_tx_launch_fifo7_unc_or_parity_err_cnt()
3383 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo6_unc_or_parity_err_cnt() local
3385 return dd->send_egress_err_status_cnt[38]; in access_tx_launch_fifo6_unc_or_parity_err_cnt()
3392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo5_unc_or_parity_err_cnt() local
3394 return dd->send_egress_err_status_cnt[37]; in access_tx_launch_fifo5_unc_or_parity_err_cnt()
3401 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo4_unc_or_parity_err_cnt() local
3403 return dd->send_egress_err_status_cnt[36]; in access_tx_launch_fifo4_unc_or_parity_err_cnt()
3410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo3_unc_or_parity_err_cnt() local
3412 return dd->send_egress_err_status_cnt[35]; in access_tx_launch_fifo3_unc_or_parity_err_cnt()
3419 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo2_unc_or_parity_err_cnt() local
3421 return dd->send_egress_err_status_cnt[34]; in access_tx_launch_fifo2_unc_or_parity_err_cnt()
3428 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo1_unc_or_parity_err_cnt() local
3430 return dd->send_egress_err_status_cnt[33]; in access_tx_launch_fifo1_unc_or_parity_err_cnt()
3437 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo0_unc_or_parity_err_cnt() local
3439 return dd->send_egress_err_status_cnt[32]; in access_tx_launch_fifo0_unc_or_parity_err_cnt()
3446 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma15_disallowed_packet_err_cnt() local
3448 return dd->send_egress_err_status_cnt[31]; in access_tx_sdma15_disallowed_packet_err_cnt()
3455 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma14_disallowed_packet_err_cnt() local
3457 return dd->send_egress_err_status_cnt[30]; in access_tx_sdma14_disallowed_packet_err_cnt()
3464 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma13_disallowed_packet_err_cnt() local
3466 return dd->send_egress_err_status_cnt[29]; in access_tx_sdma13_disallowed_packet_err_cnt()
3473 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma12_disallowed_packet_err_cnt() local
3475 return dd->send_egress_err_status_cnt[28]; in access_tx_sdma12_disallowed_packet_err_cnt()
3482 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma11_disallowed_packet_err_cnt() local
3484 return dd->send_egress_err_status_cnt[27]; in access_tx_sdma11_disallowed_packet_err_cnt()
3491 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma10_disallowed_packet_err_cnt() local
3493 return dd->send_egress_err_status_cnt[26]; in access_tx_sdma10_disallowed_packet_err_cnt()
3500 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma9_disallowed_packet_err_cnt() local
3502 return dd->send_egress_err_status_cnt[25]; in access_tx_sdma9_disallowed_packet_err_cnt()
3509 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma8_disallowed_packet_err_cnt() local
3511 return dd->send_egress_err_status_cnt[24]; in access_tx_sdma8_disallowed_packet_err_cnt()
3518 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma7_disallowed_packet_err_cnt() local
3520 return dd->send_egress_err_status_cnt[23]; in access_tx_sdma7_disallowed_packet_err_cnt()
3527 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma6_disallowed_packet_err_cnt() local
3529 return dd->send_egress_err_status_cnt[22]; in access_tx_sdma6_disallowed_packet_err_cnt()
3536 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma5_disallowed_packet_err_cnt() local
3538 return dd->send_egress_err_status_cnt[21]; in access_tx_sdma5_disallowed_packet_err_cnt()
3545 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma4_disallowed_packet_err_cnt() local
3547 return dd->send_egress_err_status_cnt[20]; in access_tx_sdma4_disallowed_packet_err_cnt()
3554 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma3_disallowed_packet_err_cnt() local
3556 return dd->send_egress_err_status_cnt[19]; in access_tx_sdma3_disallowed_packet_err_cnt()
3563 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma2_disallowed_packet_err_cnt() local
3565 return dd->send_egress_err_status_cnt[18]; in access_tx_sdma2_disallowed_packet_err_cnt()
3572 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma1_disallowed_packet_err_cnt() local
3574 return dd->send_egress_err_status_cnt[17]; in access_tx_sdma1_disallowed_packet_err_cnt()
3581 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma0_disallowed_packet_err_cnt() local
3583 return dd->send_egress_err_status_cnt[16]; in access_tx_sdma0_disallowed_packet_err_cnt()
3590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_config_parity_err_cnt() local
3592 return dd->send_egress_err_status_cnt[15]; in access_tx_config_parity_err_cnt()
3599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sbrd_ctl_csr_parity_err_cnt() local
3601 return dd->send_egress_err_status_cnt[14]; in access_tx_sbrd_ctl_csr_parity_err_cnt()
3608 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_csr_parity_err_cnt() local
3610 return dd->send_egress_err_status_cnt[13]; in access_tx_launch_csr_parity_err_cnt()
3617 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_illegal_vl_err_cnt() local
3619 return dd->send_egress_err_status_cnt[12]; in access_tx_illegal_vl_err_cnt()
3626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sbrd_ctl_state_machine_parity_err_cnt() local
3628 return dd->send_egress_err_status_cnt[11]; in access_tx_sbrd_ctl_state_machine_parity_err_cnt()
3635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_egress_reserved_10_err_cnt() local
3637 return dd->send_egress_err_status_cnt[10]; in access_egress_reserved_10_err_cnt()
3644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_egress_reserved_9_err_cnt() local
3646 return dd->send_egress_err_status_cnt[9]; in access_egress_reserved_9_err_cnt()
3653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma_launch_intf_parity_err_cnt() local
3655 return dd->send_egress_err_status_cnt[8]; in access_tx_sdma_launch_intf_parity_err_cnt()
3662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_pio_launch_intf_parity_err_cnt() local
3664 return dd->send_egress_err_status_cnt[7]; in access_tx_pio_launch_intf_parity_err_cnt()
3671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_egress_reserved_6_err_cnt() local
3673 return dd->send_egress_err_status_cnt[6]; in access_egress_reserved_6_err_cnt()
3680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_incorrect_link_state_err_cnt() local
3682 return dd->send_egress_err_status_cnt[5]; in access_tx_incorrect_link_state_err_cnt()
3689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_linkdown_err_cnt() local
3691 return dd->send_egress_err_status_cnt[4]; in access_tx_linkdown_err_cnt()
3698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_egress_fifi_underrun_or_parity_err_cnt() local
3700 return dd->send_egress_err_status_cnt[3]; in access_tx_egress_fifi_underrun_or_parity_err_cnt()
3707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_egress_reserved_2_err_cnt() local
3709 return dd->send_egress_err_status_cnt[2]; in access_egress_reserved_2_err_cnt()
3716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_pkt_integrity_mem_unc_err_cnt() local
3718 return dd->send_egress_err_status_cnt[1]; in access_tx_pkt_integrity_mem_unc_err_cnt()
3725 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_pkt_integrity_mem_cor_err_cnt() local
3727 return dd->send_egress_err_status_cnt[0]; in access_tx_pkt_integrity_mem_cor_err_cnt()
3738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_send_csr_write_bad_addr_err_cnt() local
3740 return dd->send_err_status_cnt[2]; in access_send_csr_write_bad_addr_err_cnt()
3747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_send_csr_read_bad_addr_err_cnt() local
3749 return dd->send_err_status_cnt[1]; in access_send_csr_read_bad_addr_err_cnt()
3756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_send_csr_parity_cnt() local
3758 return dd->send_err_status_cnt[0]; in access_send_csr_parity_cnt()
3769 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_out_of_bounds_err_cnt() local
3771 return dd->sw_ctxt_err_status_cnt[4]; in access_pio_write_out_of_bounds_err_cnt()
3778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_overflow_err_cnt() local
3780 return dd->sw_ctxt_err_status_cnt[3]; in access_pio_write_overflow_err_cnt()
3787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_crosses_boundary_err_cnt() local
3789 return dd->sw_ctxt_err_status_cnt[2]; in access_pio_write_crosses_boundary_err_cnt()
3796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_disallowed_packet_err_cnt() local
3798 return dd->sw_ctxt_err_status_cnt[1]; in access_pio_disallowed_packet_err_cnt()
3805 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_inconsistent_sop_err_cnt() local
3807 return dd->sw_ctxt_err_status_cnt[0]; in access_pio_inconsistent_sop_err_cnt()
3818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_request_fifo_cor_err_cnt() local
3820 return dd->sw_send_dma_eng_err_status_cnt[23]; in access_sdma_header_request_fifo_cor_err_cnt()
3827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_storage_cor_err_cnt() local
3829 return dd->sw_send_dma_eng_err_status_cnt[22]; in access_sdma_header_storage_cor_err_cnt()
3836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_packet_tracking_cor_err_cnt() local
3838 return dd->sw_send_dma_eng_err_status_cnt[21]; in access_sdma_packet_tracking_cor_err_cnt()
3845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_assembly_cor_err_cnt() local
3847 return dd->sw_send_dma_eng_err_status_cnt[20]; in access_sdma_assembly_cor_err_cnt()
3854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_desc_table_cor_err_cnt() local
3856 return dd->sw_send_dma_eng_err_status_cnt[19]; in access_sdma_desc_table_cor_err_cnt()
3863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_request_fifo_unc_err_cnt() local
3865 return dd->sw_send_dma_eng_err_status_cnt[18]; in access_sdma_header_request_fifo_unc_err_cnt()
3872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_storage_unc_err_cnt() local
3874 return dd->sw_send_dma_eng_err_status_cnt[17]; in access_sdma_header_storage_unc_err_cnt()
3881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_packet_tracking_unc_err_cnt() local
3883 return dd->sw_send_dma_eng_err_status_cnt[16]; in access_sdma_packet_tracking_unc_err_cnt()
3890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_assembly_unc_err_cnt() local
3892 return dd->sw_send_dma_eng_err_status_cnt[15]; in access_sdma_assembly_unc_err_cnt()
3899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_desc_table_unc_err_cnt() local
3901 return dd->sw_send_dma_eng_err_status_cnt[14]; in access_sdma_desc_table_unc_err_cnt()
3908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_timeout_err_cnt() local
3910 return dd->sw_send_dma_eng_err_status_cnt[13]; in access_sdma_timeout_err_cnt()
3917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_length_err_cnt() local
3919 return dd->sw_send_dma_eng_err_status_cnt[12]; in access_sdma_header_length_err_cnt()
3926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_address_err_cnt() local
3928 return dd->sw_send_dma_eng_err_status_cnt[11]; in access_sdma_header_address_err_cnt()
3935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_select_err_cnt() local
3937 return dd->sw_send_dma_eng_err_status_cnt[10]; in access_sdma_header_select_err_cnt()
3944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_reserved_9_err_cnt() local
3946 return dd->sw_send_dma_eng_err_status_cnt[9]; in access_sdma_reserved_9_err_cnt()
3953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_packet_desc_overflow_err_cnt() local
3955 return dd->sw_send_dma_eng_err_status_cnt[8]; in access_sdma_packet_desc_overflow_err_cnt()
3962 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_length_mismatch_err_cnt() local
3964 return dd->sw_send_dma_eng_err_status_cnt[7]; in access_sdma_length_mismatch_err_cnt()
3970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_halt_err_cnt() local
3972 return dd->sw_send_dma_eng_err_status_cnt[6]; in access_sdma_halt_err_cnt()
3979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_mem_read_err_cnt() local
3981 return dd->sw_send_dma_eng_err_status_cnt[5]; in access_sdma_mem_read_err_cnt()
3988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_first_desc_err_cnt() local
3990 return dd->sw_send_dma_eng_err_status_cnt[4]; in access_sdma_first_desc_err_cnt()
3997 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_tail_out_of_bounds_err_cnt() local
3999 return dd->sw_send_dma_eng_err_status_cnt[3]; in access_sdma_tail_out_of_bounds_err_cnt()
4006 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_too_long_err_cnt() local
4008 return dd->sw_send_dma_eng_err_status_cnt[2]; in access_sdma_too_long_err_cnt()
4015 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_gen_mismatch_err_cnt() local
4017 return dd->sw_send_dma_eng_err_status_cnt[1]; in access_sdma_gen_mismatch_err_cnt()
4024 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_wrong_dw_err_cnt() local
4026 return dd->sw_send_dma_eng_err_status_cnt[0]; in access_sdma_wrong_dw_err_cnt()
4033 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_dc_rcv_err_cnt() local
4038 val = read_write_csr(dd, csr, mode, data); in access_dc_rcv_err_cnt()
4040 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ? in access_dc_rcv_err_cnt()
4041 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors; in access_dc_rcv_err_cnt()
4043 dd->sw_rcv_bypass_packet_errors = 0; in access_dc_rcv_err_cnt()
4045 dd_dev_err(dd, "Invalid cntr register access mode"); in access_dc_rcv_err_cnt()
4056 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4074 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
5204 int is_ax(struct hfi1_devdata *dd) in is_ax() argument
5207 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT in is_ax()
5213 int is_bx(struct hfi1_devdata *dd) in is_bx() argument
5216 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT in is_bx()
5228 mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64))); in is_urg_masked()
5516 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_cce_err() argument
5525 dd_dev_info(dd, "CCE Error: %s\n", in handle_cce_err()
5529 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) { in handle_cce_err()
5532 start_freeze_handling(dd->pport, FREEZE_SELF); in handle_cce_err()
5537 incr_cntr64(&dd->cce_err_status_cnt[i]); in handle_cce_err()
5539 incr_cntr64(&dd->sw_cce_err_status_aggregate); in handle_cce_err()
5551 struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer); in update_rcverr_timer() local
5552 struct hfi1_pportdata *ppd = dd->pport; in update_rcverr_timer()
5553 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL); in update_rcverr_timer()
5555 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt && in update_rcverr_timer()
5557 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__); in update_rcverr_timer()
5563 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt; in update_rcverr_timer()
5565 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); in update_rcverr_timer()
5568 static int init_rcverr(struct hfi1_devdata *dd) in init_rcverr() argument
5570 timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0); in init_rcverr()
5572 dd->rcv_ovfl_cnt = 0; in init_rcverr()
5573 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); in init_rcverr()
5576 static void free_rcverr(struct hfi1_devdata *dd) in free_rcverr() argument
5578 if (dd->rcverr_timer.function) in free_rcverr()
5579 del_timer_sync(&dd->rcverr_timer); in free_rcverr()
5582 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_rxe_err() argument
5587 dd_dev_info(dd, "Receive Error: %s\n", in handle_rxe_err()
5597 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK)) in handle_rxe_err()
5600 start_freeze_handling(dd->pport, flags); in handle_rxe_err()
5605 incr_cntr64(&dd->rcv_err_status_cnt[i]); in handle_rxe_err()
5609 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_misc_err() argument
5614 dd_dev_info(dd, "Misc Error: %s", in handle_misc_err()
5618 incr_cntr64(&dd->misc_err_status_cnt[i]); in handle_misc_err()
5622 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_pio_err() argument
5627 dd_dev_info(dd, "PIO Error: %s\n", in handle_pio_err()
5631 start_freeze_handling(dd->pport, 0); in handle_pio_err()
5635 incr_cntr64(&dd->send_pio_err_status_cnt[i]); in handle_pio_err()
5639 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_sdma_err() argument
5644 dd_dev_info(dd, "SDMA Error: %s\n", in handle_sdma_err()
5648 start_freeze_handling(dd->pport, 0); in handle_sdma_err()
5652 incr_cntr64(&dd->send_dma_err_status_cnt[i]); in handle_sdma_err()
5661 static void count_port_inactive(struct hfi1_devdata *dd) in count_port_inactive() argument
5663 __count_port_discards(dd->pport); in count_port_inactive()
5675 static void handle_send_egress_err_info(struct hfi1_devdata *dd, in handle_send_egress_err_info() argument
5678 struct hfi1_pportdata *ppd = dd->pport; in handle_send_egress_err_info()
5679 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */ in handle_send_egress_err_info()
5680 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO); in handle_send_egress_err_info()
5684 write_csr(dd, SEND_EGRESS_ERR_INFO, info); in handle_send_egress_err_info()
5686 dd_dev_info(dd, in handle_send_egress_err_info()
5762 static int engine_to_vl(struct hfi1_devdata *dd, int engine) in engine_to_vl() argument
5772 m = rcu_dereference(dd->sdma_map); in engine_to_vl()
5783 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index) in sc_to_vl() argument
5789 sci = &dd->send_contexts[sw_index]; in sc_to_vl()
5798 if (dd->vld[15].sc == sc) in sc_to_vl()
5801 if (dd->vld[i].sc == sc) in sc_to_vl()
5807 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_egress_err() argument
5814 start_freeze_handling(dd->pport, 0); in handle_egress_err()
5815 else if (is_ax(dd) && in handle_egress_err()
5817 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) in handle_egress_err()
5818 start_freeze_handling(dd->pport, 0); in handle_egress_err()
5827 count_port_inactive(dd); in handle_egress_err()
5830 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift)); in handle_egress_err()
5832 handle_send_egress_err_info(dd, vl); in handle_egress_err()
5841 dd_dev_info(dd, "Egress Error: %s\n", in handle_egress_err()
5846 incr_cntr64(&dd->send_egress_err_status_cnt[i]); in handle_egress_err()
5850 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_txe_err() argument
5855 dd_dev_info(dd, "Send Error: %s\n", in handle_txe_err()
5860 incr_cntr64(&dd->send_err_status_cnt[i]); in handle_txe_err()
5881 static void interrupt_clear_down(struct hfi1_devdata *dd, in interrupt_clear_down() argument
5891 reg = read_kctxt_csr(dd, context, eri->status); in interrupt_clear_down()
5894 write_kctxt_csr(dd, context, eri->clear, reg); in interrupt_clear_down()
5896 eri->handler(dd, context, reg); in interrupt_clear_down()
5901 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n", in interrupt_clear_down()
5907 mask = read_kctxt_csr(dd, context, eri->mask); in interrupt_clear_down()
5909 write_kctxt_csr(dd, context, eri->mask, mask); in interrupt_clear_down()
5918 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source) in is_misc_err_int() argument
5923 interrupt_clear_down(dd, 0, eri); in is_misc_err_int()
5925 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n", in is_misc_err_int()
5946 static void is_sendctxt_err_int(struct hfi1_devdata *dd, in is_sendctxt_err_int() argument
5957 sw_index = dd->hw_to_sw[hw_context]; in is_sendctxt_err_int()
5958 if (sw_index >= dd->num_send_contexts) { in is_sendctxt_err_int()
5959 dd_dev_err(dd, in is_sendctxt_err_int()
5964 sci = &dd->send_contexts[sw_index]; in is_sendctxt_err_int()
5965 spin_lock_irqsave(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
5968 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, in is_sendctxt_err_int()
5970 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
5977 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS); in is_sendctxt_err_int()
5979 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context, in is_sendctxt_err_int()
5984 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index)); in is_sendctxt_err_int()
5991 queue_work(dd->pport->hfi1_wq, &sc->halt_work); in is_sendctxt_err_int()
5992 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
6001 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]); in is_sendctxt_err_int()
6005 static void handle_sdma_eng_err(struct hfi1_devdata *dd, in handle_sdma_eng_err() argument
6011 sde = &dd->per_sdma[source]; in handle_sdma_eng_err()
6013 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in handle_sdma_eng_err()
6015 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n", in handle_sdma_eng_err()
6028 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]); in handle_sdma_eng_err()
6035 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source) in is_sdma_eng_err_int() argument
6038 struct sdma_engine *sde = &dd->per_sdma[source]; in is_sdma_eng_err_int()
6040 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in is_sdma_eng_err_int()
6042 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx, in is_sdma_eng_err_int()
6046 interrupt_clear_down(dd, source, &sdma_eng_err); in is_sdma_eng_err_int()
6052 static void is_various_int(struct hfi1_devdata *dd, unsigned int source) in is_various_int() argument
6062 handle_temp_err(dd); in is_various_int()
6064 interrupt_clear_down(dd, 0, eri); in is_various_int()
6066 dd_dev_info(dd, in is_various_int()
6071 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) in handle_qsfp_int() argument
6074 struct hfi1_pportdata *ppd = dd->pport; in handle_qsfp_int()
6080 dd_dev_info(dd, "%s: QSFP module removed\n", in handle_qsfp_int()
6100 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : in handle_qsfp_int()
6122 dd_dev_info(dd, "%s: QSFP module inserted\n", in handle_qsfp_int()
6136 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : in handle_qsfp_int()
6145 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n", in handle_qsfp_int()
6157 static int request_host_lcb_access(struct hfi1_devdata *dd) in request_host_lcb_access() argument
6161 ret = do_8051_command(dd, HCMD_MISC, in request_host_lcb_access()
6164 if (ret != HCMD_SUCCESS && !(dd->flags & HFI1_SHUTDOWN)) { in request_host_lcb_access()
6165 dd_dev_err(dd, "%s: command failed with error %d\n", in request_host_lcb_access()
6171 static int request_8051_lcb_access(struct hfi1_devdata *dd) in request_8051_lcb_access() argument
6175 ret = do_8051_command(dd, HCMD_MISC, in request_8051_lcb_access()
6179 dd_dev_err(dd, "%s: command failed with error %d\n", in request_8051_lcb_access()
6189 static inline void set_host_lcb_access(struct hfi1_devdata *dd) in set_host_lcb_access() argument
6191 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, in set_host_lcb_access()
6200 static inline void set_8051_lcb_access(struct hfi1_devdata *dd) in set_8051_lcb_access() argument
6202 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, in set_8051_lcb_access()
6216 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok) in acquire_lcb_access() argument
6218 struct hfi1_pportdata *ppd = dd->pport; in acquire_lcb_access()
6236 dd_dev_info(dd, "%s: link state %s not up\n", in acquire_lcb_access()
6242 if (dd->lcb_access_count == 0) { in acquire_lcb_access()
6243 ret = request_host_lcb_access(dd); in acquire_lcb_access()
6245 if (!(dd->flags & HFI1_SHUTDOWN)) in acquire_lcb_access()
6246 dd_dev_err(dd, in acquire_lcb_access()
6251 set_host_lcb_access(dd); in acquire_lcb_access()
6253 dd->lcb_access_count++; in acquire_lcb_access()
6267 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok) in release_lcb_access() argument
6277 mutex_lock(&dd->pport->hls_lock); in release_lcb_access()
6279 while (!mutex_trylock(&dd->pport->hls_lock)) in release_lcb_access()
6283 if (dd->lcb_access_count == 0) { in release_lcb_access()
6284 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n", in release_lcb_access()
6289 if (dd->lcb_access_count == 1) { in release_lcb_access()
6290 set_8051_lcb_access(dd); in release_lcb_access()
6291 ret = request_8051_lcb_access(dd); in release_lcb_access()
6293 dd_dev_err(dd, in release_lcb_access()
6297 set_host_lcb_access(dd); in release_lcb_access()
6301 dd->lcb_access_count--; in release_lcb_access()
6303 mutex_unlock(&dd->pport->hls_lock); in release_lcb_access()
6316 static void init_lcb_access(struct hfi1_devdata *dd) in init_lcb_access() argument
6318 dd->lcb_access_count = 0; in init_lcb_access()
6324 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data) in hreq_response() argument
6326 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, in hreq_response()
6338 struct hfi1_devdata *dd = ppd->dd; in handle_8051_request() local
6343 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1); in handle_8051_request()
6348 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0); in handle_8051_request()
6363 dd_dev_info(dd, "8051 request: request 0x%x not supported\n", in handle_8051_request()
6365 hreq_response(dd, HREQ_NOT_SUPPORTED, 0); in handle_8051_request()
6369 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET); in handle_8051_request()
6371 (void)read_csr(dd, DCC_CFG_RESET); in handle_8051_request()
6375 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); in handle_8051_request()
6376 hreq_response(dd, HREQ_SUCCESS, 0); in handle_8051_request()
6380 hreq_response(dd, HREQ_SUCCESS, 0); in handle_8051_request()
6384 hreq_response(dd, HREQ_SUCCESS, data); in handle_8051_request()
6387 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type); in handle_8051_request()
6388 hreq_response(dd, HREQ_NOT_SUPPORTED, 0); in handle_8051_request()
6396 void set_up_vau(struct hfi1_devdata *dd, u8 vau) in set_up_vau() argument
6398 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); in set_up_vau()
6403 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); in set_up_vau()
6411 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf) in set_up_vl15() argument
6413 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); in set_up_vl15()
6424 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); in set_up_vl15()
6426 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf in set_up_vl15()
6434 void reset_link_credits(struct hfi1_devdata *dd) in reset_link_credits() argument
6440 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); in reset_link_credits()
6441 write_csr(dd, SEND_CM_CREDIT_VL15, 0); in reset_link_credits()
6442 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0); in reset_link_credits()
6444 pio_send_control(dd, PSC_CM_RESET); in reset_link_credits()
6446 dd->vl15buf_cached = 0; in reset_link_credits()
6476 static void lcb_shutdown(struct hfi1_devdata *dd, int abort) in lcb_shutdown() argument
6481 write_csr(dd, DC_LCB_CFG_RUN, 0); in lcb_shutdown()
6483 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, in lcb_shutdown()
6486 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN); in lcb_shutdown()
6487 reg = read_csr(dd, DCC_CFG_RESET); in lcb_shutdown()
6488 write_csr(dd, DCC_CFG_RESET, reg | in lcb_shutdown()
6490 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */ in lcb_shutdown()
6493 write_csr(dd, DCC_CFG_RESET, reg); in lcb_shutdown()
6494 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); in lcb_shutdown()
6508 static void _dc_shutdown(struct hfi1_devdata *dd) in _dc_shutdown() argument
6510 lockdep_assert_held(&dd->dc8051_lock); in _dc_shutdown()
6512 if (dd->dc_shutdown) in _dc_shutdown()
6515 dd->dc_shutdown = 1; in _dc_shutdown()
6517 lcb_shutdown(dd, 1); in _dc_shutdown()
6523 write_csr(dd, DC_DC8051_CFG_RST, 0x1); in _dc_shutdown()
6526 static void dc_shutdown(struct hfi1_devdata *dd) in dc_shutdown() argument
6528 mutex_lock(&dd->dc8051_lock); in dc_shutdown()
6529 _dc_shutdown(dd); in dc_shutdown()
6530 mutex_unlock(&dd->dc8051_lock); in dc_shutdown()
6539 static void _dc_start(struct hfi1_devdata *dd) in _dc_start() argument
6541 lockdep_assert_held(&dd->dc8051_lock); in _dc_start()
6543 if (!dd->dc_shutdown) in _dc_start()
6547 write_csr(dd, DC_DC8051_CFG_RST, 0ull); in _dc_start()
6549 if (wait_fm_ready(dd, TIMEOUT_8051_START)) in _dc_start()
6550 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n", in _dc_start()
6554 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); in _dc_start()
6556 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); in _dc_start()
6557 dd->dc_shutdown = 0; in _dc_start()
6560 static void dc_start(struct hfi1_devdata *dd) in dc_start() argument
6562 mutex_lock(&dd->dc8051_lock); in dc_start()
6563 _dc_start(dd); in dc_start()
6564 mutex_unlock(&dd->dc8051_lock); in dc_start()
6570 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd) in adjust_lcb_for_fpga_serdes() argument
6575 if (dd->icode != ICODE_FPGA_EMULATION) in adjust_lcb_for_fpga_serdes()
6585 if (is_emulator_s(dd)) in adjust_lcb_for_fpga_serdes()
6589 version = emulator_rev(dd); in adjust_lcb_for_fpga_serdes()
6590 if (!is_ax(dd)) in adjust_lcb_for_fpga_serdes()
6634 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull); in adjust_lcb_for_fpga_serdes()
6645 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr); in adjust_lcb_for_fpga_serdes()
6647 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, in adjust_lcb_for_fpga_serdes()
6649 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr); in adjust_lcb_for_fpga_serdes()
6661 struct hfi1_devdata *dd = ppd->dd; in handle_sma_message() local
6669 ret = read_idle_sma(dd, &msg); in handle_sma_message()
6672 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg); in handle_sma_message()
6700 dd, in handle_sma_message()
6706 dd_dev_err(dd, in handle_sma_message()
6713 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear) in adjust_rcvctrl() argument
6718 spin_lock_irqsave(&dd->rcvctrl_lock, flags); in adjust_rcvctrl()
6719 rcvctrl = read_csr(dd, RCV_CTRL); in adjust_rcvctrl()
6722 write_csr(dd, RCV_CTRL, rcvctrl); in adjust_rcvctrl()
6723 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags); in adjust_rcvctrl()
6726 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add) in add_rcvctrl() argument
6728 adjust_rcvctrl(dd, add, 0); in add_rcvctrl()
6731 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear) in clear_rcvctrl() argument
6733 adjust_rcvctrl(dd, 0, clear); in clear_rcvctrl()
6741 struct hfi1_devdata *dd = ppd->dd; in start_freeze_handling() local
6747 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); in start_freeze_handling()
6750 dd->flags |= HFI1_FROZEN; in start_freeze_handling()
6753 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); in start_freeze_handling()
6758 for (i = 0; i < dd->num_send_contexts; i++) { in start_freeze_handling()
6759 sc = dd->send_contexts[i].sc; in start_freeze_handling()
6768 dd_dev_err(dd, in start_freeze_handling()
6783 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze) in wait_for_freeze_status() argument
6790 reg = read_csr(dd, CCE_STATUS); in wait_for_freeze_status()
6802 dd_dev_err(dd, in wait_for_freeze_status()
6815 static void rxe_freeze(struct hfi1_devdata *dd) in rxe_freeze() argument
6821 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); in rxe_freeze()
6824 for (i = 0; i < dd->num_rcv_contexts; i++) { in rxe_freeze()
6825 rcd = hfi1_rcd_get_by_index(dd, i); in rxe_freeze()
6826 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd); in rxe_freeze()
6837 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd) in rxe_kernel_unfreeze() argument
6844 for (i = 0; i < dd->num_rcv_contexts; i++) { in rxe_kernel_unfreeze()
6845 rcd = hfi1_rcd_get_by_index(dd, i); in rxe_kernel_unfreeze()
6849 (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) { in rxe_kernel_unfreeze()
6857 hfi1_rcvctrl(dd, rcvmask, rcd); in rxe_kernel_unfreeze()
6862 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); in rxe_kernel_unfreeze()
6874 struct hfi1_devdata *dd = ppd->dd; in handle_freeze() local
6877 wait_for_freeze_status(dd, 1); in handle_freeze()
6882 pio_freeze(dd); in handle_freeze()
6885 sdma_freeze(dd); in handle_freeze()
6890 rxe_freeze(dd); in handle_freeze()
6896 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); in handle_freeze()
6897 wait_for_freeze_status(dd, 0); in handle_freeze()
6899 if (is_ax(dd)) { in handle_freeze()
6900 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); in handle_freeze()
6901 wait_for_freeze_status(dd, 1); in handle_freeze()
6902 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); in handle_freeze()
6903 wait_for_freeze_status(dd, 0); in handle_freeze()
6907 pio_kernel_unfreeze(dd); in handle_freeze()
6910 sdma_unfreeze(dd); in handle_freeze()
6915 rxe_kernel_unfreeze(dd); in handle_freeze()
6930 dd->flags &= ~HFI1_FROZEN; in handle_freeze()
6931 wake_up(&dd->event_queue); in handle_freeze()
6971 struct hfi1_devdata *dd = ppd->dd; in handle_link_up() local
6976 read_ltp_rtt(dd); in handle_link_up()
6981 clear_linkup_counters(dd); in handle_link_up()
6993 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) in handle_link_up()
6994 set_up_vl15(dd, dd->vl15buf_cached); in handle_link_up()
6999 dd_dev_err(dd, in handle_link_up()
7118 read_link_down_reason(ppd->dd, &link_down_reason); in handle_link_down()
7122 dd_dev_info(ppd->dd, "%sUnexpected link down\n", in handle_link_down()
7130 read_planned_down_reason_code(ppd->dd, &neigh_reason); in handle_link_down()
7131 dd_dev_info(ppd->dd, in handle_link_down()
7137 dd_dev_info(ppd->dd, in handle_link_down()
7142 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n", in handle_link_down()
7172 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); in handle_link_down()
7179 dc_shutdown(ppd->dd); in handle_link_down()
7196 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n", in handle_link_bounce()
7260 hfi1_event_pkey_change(ppd->dd, ppd->port); in clear_full_mgmt_pkey()
7267 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width) in link_width_to_bits() argument
7275 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup) in link_width_to_bits()
7283 dd_dev_info(dd, "%s: invalid width %d, using 4\n", in link_width_to_bits()
7309 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width, in get_link_widths() argument
7320 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, in get_link_widths()
7322 read_local_lni(dd, &enable_lane_rx); in get_link_widths()
7333 if ((dd->icode == ICODE_RTL_SILICON) && in get_link_widths()
7334 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) { in get_link_widths()
7338 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G; in get_link_widths()
7341 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; in get_link_widths()
7344 dd_dev_err(dd, in get_link_widths()
7347 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; in get_link_widths()
7352 dd_dev_info(dd, in get_link_widths()
7355 *tx_width = link_width_to_bits(dd, tx); in get_link_widths()
7356 *rx_width = link_width_to_bits(dd, rx); in get_link_widths()
7372 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width, in get_linkup_widths() argument
7379 read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths); in get_linkup_widths()
7383 *tx_width = link_width_to_bits(dd, tx); in get_linkup_widths()
7384 *rx_width = link_width_to_bits(dd, rx); in get_linkup_widths()
7387 get_link_widths(dd, &active_tx, &active_rx); in get_linkup_widths()
7403 get_linkup_widths(ppd->dd, &tx_width, &rx_width); in get_linkup_link_widths()
7425 struct hfi1_devdata *dd = ppd->dd; in handle_verify_cap() local
7444 lcb_shutdown(dd, 0); in handle_verify_cap()
7445 adjust_lcb_for_fpga_serdes(dd); in handle_verify_cap()
7447 read_vc_remote_phy(dd, &power_management, &continuous); in handle_verify_cap()
7448 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf, in handle_verify_cap()
7450 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths); in handle_verify_cap()
7451 read_remote_device_id(dd, &device_id, &device_rev); in handle_verify_cap()
7454 get_link_widths(dd, &active_tx, &active_rx); in handle_verify_cap()
7455 dd_dev_info(dd, in handle_verify_cap()
7458 dd_dev_info(dd, in handle_verify_cap()
7462 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n", in handle_verify_cap()
7464 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n", in handle_verify_cap()
7477 set_up_vau(dd, vau); in handle_verify_cap()
7483 set_up_vl15(dd, 0); in handle_verify_cap()
7484 dd->vl15buf_cached = vl15buf; in handle_verify_cap()
7499 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val); in handle_verify_cap()
7500 write_csr(dd, DC_LCB_CFG_CRC_MODE, in handle_verify_cap()
7504 reg = read_csr(dd, SEND_CM_CTRL); in handle_verify_cap()
7506 write_csr(dd, SEND_CM_CTRL, in handle_verify_cap()
7509 write_csr(dd, SEND_CM_CTRL, in handle_verify_cap()
7514 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { in handle_verify_cap()
7534 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n", in handle_verify_cap()
7555 assign_remote_cm_au_table(dd, vcu); in handle_verify_cap()
7566 if (is_ax(dd)) { /* fixed in B0 */ in handle_verify_cap()
7567 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN); in handle_verify_cap()
7570 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg); in handle_verify_cap()
7574 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); in handle_verify_cap()
7577 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */ in handle_verify_cap()
7578 set_8051_lcb_access(dd); in handle_verify_cap()
7621 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7631 get_link_widths(ppd->dd, &tx, &rx); in apply_link_downgrade_policy()
7639 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n"); in apply_link_downgrade_policy()
7649 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7651 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7662 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7664 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7695 dd_dev_info(ppd->dd, "8051: Link width downgrade\n"); in handle_link_downgrade()
7730 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_8051_interrupt() argument
7732 struct hfi1_pportdata *ppd = dd->pport; in handle_8051_interrupt()
7741 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051); in handle_8051_interrupt()
7761 dd_dev_info(dd, "Link error: %s\n", in handle_8051_interrupt()
7776 dd_dev_err(dd, "8051 info error: %s\n", in handle_8051_interrupt()
7801 dd_dev_info(dd, "8051: Link up\n"); in handle_8051_interrupt()
7820 dd_dev_info(dd, "8051: Link down%s\n", extra); in handle_8051_interrupt()
7830 dd_dev_info(dd, "8051 info host message: %s\n", in handle_8051_interrupt()
7844 dd_dev_err(dd, "Lost 8051 heartbeat\n"); in handle_8051_interrupt()
7845 write_csr(dd, DC_DC8051_ERR_EN, in handle_8051_interrupt()
7846 read_csr(dd, DC_DC8051_ERR_EN) & in handle_8051_interrupt()
7853 dd_dev_err(dd, "8051 error: %s\n", in handle_8051_interrupt()
7866 dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n", in handle_8051_interrupt()
7871 dd_dev_info(dd, in handle_8051_interrupt()
7925 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_dcc_err() argument
7930 struct hfi1_pportdata *ppd = dd->pport; in handle_dcc_err()
7935 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) { in handle_dcc_err()
7936 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE); in handle_dcc_err()
7937 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK; in handle_dcc_err()
7939 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK; in handle_dcc_err()
7945 struct hfi1_pportdata *ppd = dd->pport; in handle_dcc_err()
7955 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG); in handle_dcc_err()
7956 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) { in handle_dcc_err()
7957 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK; in handle_dcc_err()
7959 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK; in handle_dcc_err()
7998 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n", in handle_dcc_err()
8006 info = read_csr(dd, DCC_ERR_INFO_PORTRCV); in handle_dcc_err()
8007 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0); in handle_dcc_err()
8008 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1); in handle_dcc_err()
8009 if (!(dd->err_info_rcvport.status_and_code & in handle_dcc_err()
8011 dd->err_info_rcvport.status_and_code = in handle_dcc_err()
8014 dd->err_info_rcvport.status_and_code |= in handle_dcc_err()
8020 dd->err_info_rcvport.packet_flit1 = hdr0; in handle_dcc_err()
8021 dd->err_info_rcvport.packet_flit2 = hdr1; in handle_dcc_err()
8050 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n" in handle_dcc_err()
8059 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n"); in handle_dcc_err()
8064 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n"); in handle_dcc_err()
8068 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev))) in handle_dcc_err()
8073 dd_dev_info_ratelimited(dd, "DCC Error: %s\n", in handle_dcc_err()
8080 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n", in handle_dcc_err()
8087 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_lcb_err() argument
8091 dd_dev_info(dd, "LCB Error: %s\n", in handle_lcb_err()
8098 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source) in is_dc_int() argument
8103 interrupt_clear_down(dd, 0, eri); in is_dc_int()
8114 dd_dev_err(dd, "Parity error in DC LBM block\n"); in is_dc_int()
8116 dd_dev_err(dd, "Invalid DC interrupt %u\n", source); in is_dc_int()
8123 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source) in is_send_credit_int() argument
8125 sc_group_release_update(dd, source); in is_send_credit_int()
8137 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source) in is_sdma_eng_int() argument
8145 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which, in is_sdma_eng_int()
8147 sdma_dumpstate(&dd->per_sdma[which]); in is_sdma_eng_int()
8150 if (likely(what < 3 && which < dd->num_sdma)) { in is_sdma_eng_int()
8151 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source); in is_sdma_eng_int()
8154 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source); in is_sdma_eng_int()
8168 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source) in is_rcv_avail_int() argument
8173 if (likely(source < dd->num_rcv_contexts)) { in is_rcv_avail_int()
8174 rcd = hfi1_rcd_get_by_index(dd, source); in is_rcv_avail_int()
8186 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n", in is_rcv_avail_int()
8199 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source) in is_rcv_urgent_int() argument
8204 if (likely(source < dd->num_rcv_contexts)) { in is_rcv_urgent_int()
8205 rcd = hfi1_rcd_get_by_index(dd, source); in is_rcv_urgent_int()
8217 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n", in is_rcv_urgent_int()
8224 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source) in is_reserved_int() argument
8228 dd_dev_err(dd, "unexpected %s interrupt\n", in is_reserved_int()
8263 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source) in is_interrupt() argument
8270 trace_hfi1_interrupt(dd, entry, source); in is_interrupt()
8271 entry->is_int(dd, source - entry->start); in is_interrupt()
8276 dd_dev_err(dd, "invalid interrupt source %u\n", source); in is_interrupt()
8290 struct hfi1_devdata *dd = data; in general_interrupt() local
8296 this_cpu_inc(*dd->int_counter); in general_interrupt()
8300 if (dd->gi_mask[i] == 0) { in general_interrupt()
8304 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) & in general_interrupt()
8305 dd->gi_mask[i]; in general_interrupt()
8308 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]); in general_interrupt()
8314 is_interrupt(dd, bit); in general_interrupt()
8324 struct hfi1_devdata *dd = sde->dd; in sdma_interrupt() local
8328 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in sdma_interrupt()
8333 this_cpu_inc(*dd->int_counter); in sdma_interrupt()
8336 status = read_csr(dd, in sdma_interrupt()
8341 write_csr(dd, in sdma_interrupt()
8348 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n", in sdma_interrupt()
8361 struct hfi1_devdata *dd = rcd->dd; in clear_recv_intr() local
8364 write_csr(dd, addr, rcd->imask); in clear_recv_intr()
8366 (void)read_csr(dd, addr); in clear_recv_intr()
8372 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask); in force_recv_intr()
8393 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); in check_packet_present()
8404 struct hfi1_devdata *dd = rcd->dd; in receive_interrupt_common() local
8406 trace_hfi1_receive_interrupt(dd, rcd); in receive_interrupt_common()
8407 this_cpu_inc(*dd->int_counter); in receive_interrupt_common()
8539 u32 read_physical_state(struct hfi1_devdata *dd) in read_physical_state() argument
8543 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE); in read_physical_state()
8548 u32 read_logical_state(struct hfi1_devdata *dd) in read_logical_state() argument
8552 reg = read_csr(dd, DCC_CFG_PORT_CONFIG); in read_logical_state()
8557 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate) in set_logical_state() argument
8561 reg = read_csr(dd, DCC_CFG_PORT_CONFIG); in set_logical_state()
8565 write_csr(dd, DCC_CFG_PORT_CONFIG, reg); in set_logical_state()
8571 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data) in read_lcb_via_8051() argument
8576 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { in read_lcb_via_8051()
8577 if (acquire_lcb_access(dd, 0) == 0) { in read_lcb_via_8051()
8578 *data = read_csr(dd, addr); in read_lcb_via_8051()
8579 release_lcb_access(dd, 0); in read_lcb_via_8051()
8587 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data); in read_lcb_via_8051()
8609 static void update_lcb_cache(struct hfi1_devdata *dd) in update_lcb_cache() argument
8616 ret = read_lcb_csr(dd, lcb_cache[i].off, &val); in update_lcb_cache()
8643 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data) in read_lcb_csr() argument
8645 struct hfi1_pportdata *ppd = dd->pport; in read_lcb_csr()
8649 return read_lcb_via_8051(dd, addr, data); in read_lcb_csr()
8658 *data = read_csr(dd, addr); in read_lcb_csr()
8665 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data) in write_lcb_via_8051() argument
8670 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || in write_lcb_via_8051()
8671 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) { in write_lcb_via_8051()
8672 if (acquire_lcb_access(dd, 0) == 0) { in write_lcb_via_8051()
8673 write_csr(dd, addr, data); in write_lcb_via_8051()
8674 release_lcb_access(dd, 0); in write_lcb_via_8051()
8682 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data); in write_lcb_via_8051()
8692 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data) in write_lcb_csr() argument
8694 struct hfi1_pportdata *ppd = dd->pport; in write_lcb_csr()
8698 return write_lcb_via_8051(dd, addr, data); in write_lcb_csr()
8703 write_csr(dd, addr, data); in write_lcb_csr()
8712 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data, in do_8051_command() argument
8721 mutex_lock(&dd->dc8051_lock); in do_8051_command()
8724 if (dd->dc_shutdown) { in do_8051_command()
8739 if (dd->dc8051_timed_out) { in do_8051_command()
8740 if (dd->dc8051_timed_out > 1) { in do_8051_command()
8741 dd_dev_err(dd, in do_8051_command()
8747 _dc_shutdown(dd); in do_8051_command()
8748 _dc_start(dd); in do_8051_command()
8770 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0); in do_8051_command()
8776 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg); in do_8051_command()
8787 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg); in do_8051_command()
8789 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg); in do_8051_command()
8794 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1); in do_8051_command()
8799 dd->dc8051_timed_out++; in do_8051_command()
8800 dd_dev_err(dd, "8051 host command %u timeout\n", type); in do_8051_command()
8814 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1) in do_8051_command()
8822 dd->dc8051_timed_out = 0; in do_8051_command()
8826 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0); in do_8051_command()
8829 mutex_unlock(&dd->dc8051_lock); in do_8051_command()
8833 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state) in set_physical_link_state() argument
8835 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL); in set_physical_link_state()
8838 int load_8051_config(struct hfi1_devdata *dd, u8 field_id, in load_8051_config() argument
8847 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL); in load_8051_config()
8849 dd_dev_err(dd, in load_8051_config()
8861 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id, in read_8051_config() argument
8877 ret = read_8051_data(dd, addr, 8, &big_data); in read_8051_config()
8887 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n", in read_8051_config()
8894 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management, in write_vc_local_phy() argument
8901 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY, in write_vc_local_phy()
8905 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu, in write_vc_local_fabric() argument
8915 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC, in write_vc_local_fabric()
8919 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits, in read_vc_local_link_mode() argument
8924 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, in read_vc_local_link_mode()
8931 static int write_vc_local_link_mode(struct hfi1_devdata *dd, in write_vc_local_link_mode() argument
8941 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, in write_vc_local_link_mode()
8945 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id, in write_local_device_id() argument
8952 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame); in write_local_device_id()
8955 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id, in read_remote_device_id() argument
8960 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame); in read_remote_device_id()
8966 int write_host_interface_version(struct hfi1_devdata *dd, u8 version) in write_host_interface_version() argument
8972 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame); in write_host_interface_version()
8976 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, in write_host_interface_version()
8980 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor, in read_misc_status() argument
8985 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame); in read_misc_status()
8991 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame); in read_misc_status()
8996 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management, in read_vc_remote_phy() argument
9001 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame); in read_vc_remote_phy()
9008 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z, in read_vc_remote_fabric() argument
9013 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame); in read_vc_remote_fabric()
9021 static void read_vc_remote_link_width(struct hfi1_devdata *dd, in read_vc_remote_link_width() argument
9027 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG, in read_vc_remote_link_width()
9034 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx) in read_local_lni() argument
9038 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame); in read_local_lni()
9042 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls) in read_last_local_state() argument
9044 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls); in read_last_local_state()
9047 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs) in read_last_remote_state() argument
9049 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs); in read_last_remote_state()
9052 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality) in hfi1_read_link_quality() argument
9058 if (dd->pport->host_link_state & HLS_UP) { in hfi1_read_link_quality()
9059 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, in hfi1_read_link_quality()
9067 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc) in read_planned_down_reason_code() argument
9071 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame); in read_planned_down_reason_code()
9075 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr) in read_link_down_reason() argument
9079 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame); in read_link_down_reason()
9083 static int read_tx_settings(struct hfi1_devdata *dd, in read_tx_settings() argument
9092 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame); in read_tx_settings()
9103 static int write_tx_settings(struct hfi1_devdata *dd, in write_tx_settings() argument
9116 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame); in write_tx_settings()
9124 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out) in read_idle_message() argument
9128 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out); in read_idle_message()
9130 dd_dev_err(dd, "read idle message: type %d, err %d\n", in read_idle_message()
9134 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out); in read_idle_message()
9146 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data) in read_idle_sma() argument
9148 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, in read_idle_sma()
9157 static int send_idle_message(struct hfi1_devdata *dd, u64 data) in send_idle_message() argument
9161 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data); in send_idle_message()
9162 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL); in send_idle_message()
9164 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n", in send_idle_message()
9176 int send_idle_sma(struct hfi1_devdata *dd, u64 message) in send_idle_sma() argument
9182 return send_idle_message(dd, data); in send_idle_sma()
9191 static int do_quick_linkup(struct hfi1_devdata *dd) in do_quick_linkup() argument
9195 lcb_shutdown(dd, 0); in do_quick_linkup()
9200 write_csr(dd, DC_LCB_CFG_LOOPBACK, in do_quick_linkup()
9202 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0); in do_quick_linkup()
9207 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); in do_quick_linkup()
9210 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { in do_quick_linkup()
9212 write_csr(dd, DC_LCB_CFG_RUN, in do_quick_linkup()
9215 ret = wait_link_transfer_active(dd, 10); in do_quick_linkup()
9219 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, in do_quick_linkup()
9231 dd_dev_err(dd, in do_quick_linkup()
9234 dd_dev_err(dd, "Continuing with quick linkup\n"); in do_quick_linkup()
9237 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */ in do_quick_linkup()
9238 set_8051_lcb_access(dd); in do_quick_linkup()
9245 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP); in do_quick_linkup()
9247 dd_dev_err(dd, in do_quick_linkup()
9251 set_host_lcb_access(dd); in do_quick_linkup()
9252 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ in do_quick_linkup()
9265 static int init_loopback(struct hfi1_devdata *dd) in init_loopback() argument
9267 dd_dev_info(dd, "Entering loopback mode\n"); in init_loopback()
9270 write_csr(dd, DC_DC8051_CFG_MODE, in init_loopback()
9271 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK)); in init_loopback()
9279 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) && in init_loopback()
9298 if (dd->icode == ICODE_FPGA_EMULATION) { in init_loopback()
9299 dd_dev_err(dd, in init_loopback()
9310 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback); in init_loopback()
9345 struct hfi1_devdata *dd = ppd->dd; in set_local_link_attributes() local
9352 fabric_serdes_reset(dd); in set_local_link_attributes()
9355 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, in set_local_link_attributes()
9360 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { in set_local_link_attributes()
9376 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion, in set_local_link_attributes()
9381 ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION); in set_local_link_attributes()
9383 dd_dev_err(dd, in set_local_link_attributes()
9392 ret = write_vc_local_phy(dd, in set_local_link_attributes()
9399 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init, in set_local_link_attributes()
9416 if (dd->dc8051_ver >= dc8051_ver(1, 25, 0)) in set_local_link_attributes()
9419 ret = write_vc_local_link_mode(dd, misc_bits, 0, in set_local_link_attributes()
9426 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev); in set_local_link_attributes()
9431 dd_dev_err(dd, in set_local_link_attributes()
9451 dd_dev_info(ppd->dd, in start_link()
9469 struct hfi1_devdata *dd = ppd->dd; in wait_for_qsfp_init() local
9488 mask = read_csr(dd, dd->hfi1_id ? in wait_for_qsfp_init()
9493 dd_dev_info(dd, "%s: No IntN detected, reset complete\n", in wait_for_qsfp_init()
9503 struct hfi1_devdata *dd = ppd->dd; in set_qsfp_int_n() local
9506 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK); in set_qsfp_int_n()
9512 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, in set_qsfp_int_n()
9518 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); in set_qsfp_int_n()
9523 struct hfi1_devdata *dd = ppd->dd; in reset_qsfp() local
9532 qsfp_mask = read_csr(dd, in reset_qsfp()
9533 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); in reset_qsfp()
9535 write_csr(dd, in reset_qsfp()
9536 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); in reset_qsfp()
9541 write_csr(dd, in reset_qsfp()
9542 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); in reset_qsfp()
9563 struct hfi1_devdata *dd = ppd->dd; in handle_qsfp_error_conditions() local
9567 dd_dev_err(dd, "%s: QSFP cable temperature too high\n", in handle_qsfp_error_conditions()
9572 dd_dev_err(dd, "%s: QSFP cable temperature too low\n", in handle_qsfp_error_conditions()
9583 dd_dev_err(dd, "%s: QSFP supply voltage too high\n", in handle_qsfp_error_conditions()
9588 dd_dev_err(dd, "%s: QSFP supply voltage too low\n", in handle_qsfp_error_conditions()
9595 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n", in handle_qsfp_error_conditions()
9600 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n", in handle_qsfp_error_conditions()
9605 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n", in handle_qsfp_error_conditions()
9610 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n", in handle_qsfp_error_conditions()
9615 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n", in handle_qsfp_error_conditions()
9620 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n", in handle_qsfp_error_conditions()
9625 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n", in handle_qsfp_error_conditions()
9630 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n", in handle_qsfp_error_conditions()
9635 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n", in handle_qsfp_error_conditions()
9640 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n", in handle_qsfp_error_conditions()
9645 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n", in handle_qsfp_error_conditions()
9650 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n", in handle_qsfp_error_conditions()
9664 struct hfi1_devdata *dd; in qsfp_event() local
9668 dd = ppd->dd; in qsfp_event()
9675 dd_dev_info(ppd->dd, in qsfp_event()
9685 dc_start(dd); in qsfp_event()
9704 if (one_qsfp_read(ppd, dd->hfi1_id, 6, in qsfp_event()
9706 dd_dev_info(dd, in qsfp_event()
9722 void init_qsfp_int(struct hfi1_devdata *dd) in init_qsfp_int() argument
9724 struct hfi1_pportdata *ppd = dd->pport; in init_qsfp_int()
9729 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, in init_qsfp_int()
9731 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, in init_qsfp_int()
9739 write_csr(dd, in init_qsfp_int()
9740 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, in init_qsfp_int()
9744 if (!dd->hfi1_id) in init_qsfp_int()
9745 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true); in init_qsfp_int()
9747 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true); in init_qsfp_int()
9753 static void init_lcb(struct hfi1_devdata *dd) in init_lcb() argument
9756 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) in init_lcb()
9762 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01); in init_lcb()
9763 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00); in init_lcb()
9764 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00); in init_lcb()
9765 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110); in init_lcb()
9766 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08); in init_lcb()
9767 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02); in init_lcb()
9768 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00); in init_lcb()
9788 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1); in test_qsfp_read()
9815 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n"); in try_start_link()
9818 dd_dev_info(ppd->dd, in try_start_link()
9843 struct hfi1_devdata *dd = ppd->dd; in bringup_serdes() local
9848 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK); in bringup_serdes()
9852 if (dd->base_guid) in bringup_serdes()
9853 guid = dd->base_guid + ppd->port - 1; in bringup_serdes()
9861 init_lcb(dd); in bringup_serdes()
9864 ret = init_loopback(dd); in bringup_serdes()
9882 struct hfi1_devdata *dd = ppd->dd; in hfi1_quiet_serdes() local
9905 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); in hfi1_quiet_serdes()
9909 static inline int init_cpu_counters(struct hfi1_devdata *dd) in init_cpu_counters() argument
9914 ppd = (struct hfi1_pportdata *)(dd + 1); in init_cpu_counters()
9915 for (i = 0; i < dd->num_pports; i++, ppd++) { in init_cpu_counters()
9933 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index, in hfi1_put_tid() argument
9938 if (!(dd->flags & HFI1_PRESENT)) in hfi1_put_tid()
9945 dd_dev_err(dd, in hfi1_put_tid()
9950 trace_hfi1_put_tid(dd, index, type, pa, order); in hfi1_put_tid()
9957 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg); in hfi1_put_tid()
9958 writeq(reg, dd->rcvarray_wc + (index * 8)); in hfi1_put_tid()
9973 struct hfi1_devdata *dd = rcd->dd; in hfi1_clear_tids() local
9979 hfi1_put_tid(dd, i, PT_INVALID, 0, 0); in hfi1_clear_tids()
9983 hfi1_put_tid(dd, i, PT_INVALID, 0, 0); in hfi1_clear_tids()
10020 struct hfi1_devdata *dd = ppd->dd; in hfi1_get_ib_cfg() local
10067 dd, in hfi1_get_ib_cfg()
10091 u32 lrh_max_header_bytes(struct hfi1_devdata *dd) in lrh_max_header_bytes() argument
10103 return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; in lrh_max_header_bytes()
10119 struct hfi1_devdata *dd = ppd->dd; in set_send_length() local
10120 u32 max_hb = lrh_max_header_bytes(dd), dcmtu; in set_send_length()
10121 u32 maxvlmtu = dd->vld[15].mtu; in set_send_length()
10122 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) in set_send_length()
10129 if (dd->vld[i].mtu > maxvlmtu) in set_send_length()
10130 maxvlmtu = dd->vld[i].mtu; in set_send_length()
10132 len1 |= (((dd->vld[i].mtu + max_hb) >> 2) in set_send_length()
10136 len2 |= (((dd->vld[i].mtu + max_hb) >> 2) in set_send_length()
10140 write_csr(dd, SEND_LEN_CHECK0, len1); in set_send_length()
10141 write_csr(dd, SEND_LEN_CHECK1, len2); in set_send_length()
10145 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50), in set_send_length()
10146 sc_mtu_to_threshold(dd->vld[i].sc, in set_send_length()
10147 dd->vld[i].mtu, in set_send_length()
10148 get_hdrqentsize(dd->rcd[0]))); in set_send_length()
10151 pio_select_send_context_vl(dd, j, i), in set_send_length()
10154 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), in set_send_length()
10155 sc_mtu_to_threshold(dd->vld[15].sc, in set_send_length()
10156 dd->vld[15].mtu, in set_send_length()
10157 dd->rcd[0]->rcvhdrqentsize)); in set_send_length()
10158 sc_set_cr_threshold(dd->vld[15].sc, thres); in set_send_length()
10163 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG); in set_send_length()
10167 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1); in set_send_length()
10174 struct hfi1_devdata *dd = ppd->dd; in set_lidlmc() local
10176 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); in set_lidlmc()
10190 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1); in set_lidlmc()
10200 for (i = 0; i < chip_send_contexts(dd); i++) { in set_lidlmc()
10203 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg); in set_lidlmc()
10207 sdma_update_lmc(dd, mask, lid); in set_lidlmc()
10284 struct hfi1_devdata *dd = ppd->dd; in decode_state_complete() local
10303 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n", in decode_state_complete()
10305 dd_dev_err(dd, " last reported state state: %s (0x%x)\n", in decode_state_complete()
10307 dd_dev_err(dd, " state successfully completed: %s\n", in decode_state_complete()
10309 dd_dev_err(dd, " fail reason 0x%x: %s\n", in decode_state_complete()
10311 dd_dev_err(dd, " passing lane mask: 0x%x", lanes); in decode_state_complete()
10324 read_last_local_state(ppd->dd, &last_local_state); in check_lni_states()
10325 read_last_remote_state(ppd->dd, &last_remote_state); in check_lni_states()
10340 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms) in wait_link_transfer_active() argument
10348 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE); in wait_link_transfer_active()
10352 dd_dev_err(dd, in wait_link_transfer_active()
10364 struct hfi1_devdata *dd = ppd->dd; in force_logical_link_state_down() local
10369 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1); in force_logical_link_state_down()
10370 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, in force_logical_link_state_down()
10373 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0); in force_logical_link_state_down()
10374 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0); in force_logical_link_state_down()
10375 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110); in force_logical_link_state_down()
10376 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2); in force_logical_link_state_down()
10378 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); in force_logical_link_state_down()
10379 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET); in force_logical_link_state_down()
10381 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1); in force_logical_link_state_down()
10382 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT); in force_logical_link_state_down()
10384 wait_link_transfer_active(dd, 100); in force_logical_link_state_down()
10389 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1); in force_logical_link_state_down()
10390 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0); in force_logical_link_state_down()
10391 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0); in force_logical_link_state_down()
10393 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n"); in force_logical_link_state_down()
10406 struct hfi1_devdata *dd = ppd->dd; in goto_offline() local
10411 update_lcb_cache(dd); in goto_offline()
10417 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE); in goto_offline()
10420 dd_dev_err(dd, in goto_offline()
10440 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT); in goto_offline()
10443 release_chip_resource(dd, qsfp_resource(dd)); in goto_offline()
10446 dd_dev_err(dd, in goto_offline()
10465 set_host_lcb_access(dd); in goto_offline()
10466 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ in goto_offline()
10485 ret = wait_fm_ready(dd, 7000); in goto_offline()
10487 dd_dev_err(dd, in goto_offline()
10503 handle_linkup_change(dd, 0); in goto_offline()
10594 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", in driver_pstate()
10618 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", in driver_lstate()
10651 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i)); in data_vls_operational()
10652 if ((reg && !ppd->dd->vld[i].mtu) || in data_vls_operational()
10653 (!reg && ppd->dd->vld[i].mtu)) in data_vls_operational()
10670 struct hfi1_devdata *dd = ppd->dd; in set_link_state() local
10685 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__, in set_link_state()
10709 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) { in set_link_state()
10729 dd_dev_err(dd, in set_link_state()
10737 dd_dev_err(dd, in set_link_state()
10749 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); in set_link_state()
10751 handle_linkup_change(dd, 1); in set_link_state()
10752 pio_kernel_linkup(dd); in set_link_state()
10769 dd_dev_err(dd, in set_link_state()
10776 set_logical_state(dd, LSTATE_ARMED); in set_link_state()
10779 dd_dev_err(dd, in set_link_state()
10791 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) in set_link_state()
10798 set_logical_state(dd, LSTATE_ACTIVE); in set_link_state()
10801 dd_dev_err(dd, in set_link_state()
10806 sdma_all_running(dd); in set_link_state()
10811 event.device = &dd->verbs_dev.rdi.ibdev; in set_link_state()
10819 dd->dc_shutdown) in set_link_state()
10820 dc_start(dd); in set_link_state()
10822 write_csr(dd, DCC_CFG_LED_CNTRL, 0); in set_link_state()
10838 set_all_slowpath(ppd->dd); in set_link_state()
10847 ret = do_quick_linkup(dd); in set_link_state()
10849 ret1 = set_physical_link_state(dd, PLS_POLLING); in set_link_state()
10854 dd_dev_err(dd, in set_link_state()
10893 if (!dd->dc_shutdown) { in set_link_state()
10894 ret1 = set_physical_link_state(dd, PLS_DISABLED); in set_link_state()
10896 dd_dev_err(dd, in set_link_state()
10904 dd_dev_err(dd, in set_link_state()
10909 dc_shutdown(dd); in set_link_state()
10915 dc_start(dd); in set_link_state()
10932 ret1 = set_physical_link_state(dd, PLS_LINKUP); in set_link_state()
10934 dd_dev_err(dd, in set_link_state()
10946 dd_dev_info(dd, "%s: state 0x%x: not supported\n", in set_link_state()
10955 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n", in set_link_state()
10986 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg); in hfi1_set_ib_cfg()
11044 dd_dev_info(ppd->dd, in hfi1_set_ib_cfg()
11119 struct hfi1_devdata *dd = ppd->dd; in set_vl_weights() local
11129 drain = !is_ax(dd) && is_up; in set_vl_weights()
11138 ret = stop_drain_data_vls(dd); in set_vl_weights()
11142 dd, in set_vl_weights()
11158 write_csr(dd, target + (i * 8), reg); in set_vl_weights()
11160 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE); in set_vl_weights()
11163 open_fill_data_vls(dd); /* reopen all VLs */ in set_vl_weights()
11174 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr, in read_one_cm_vl() argument
11177 u64 reg = read_csr(dd, csr); in read_one_cm_vl()
11190 static int get_buffer_control(struct hfi1_devdata *dd, in get_buffer_control() argument
11201 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]); in get_buffer_control()
11204 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]); in get_buffer_control()
11206 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); in get_buffer_control()
11217 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) in get_sc2vlnt() argument
11223 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0); in get_sc2vlnt()
11231 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16); in get_sc2vlnt()
11241 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems, in get_vlarb_preempt() argument
11252 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) in set_sc2vlnt() argument
11254 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, in set_sc2vlnt()
11272 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, in set_sc2vlnt()
11292 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what, in nonzero_msg() argument
11296 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n", in nonzero_msg()
11301 static void set_global_shared(struct hfi1_devdata *dd, u16 limit) in set_global_shared() argument
11305 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); in set_global_shared()
11308 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); in set_global_shared()
11312 static void set_global_limit(struct hfi1_devdata *dd, u16 limit) in set_global_limit() argument
11316 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); in set_global_limit()
11319 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); in set_global_limit()
11323 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit) in set_vl_shared() argument
11333 reg = read_csr(dd, addr); in set_vl_shared()
11336 write_csr(dd, addr, reg); in set_vl_shared()
11340 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit) in set_vl_dedicated() argument
11350 reg = read_csr(dd, addr); in set_vl_dedicated()
11353 write_csr(dd, addr, reg); in set_vl_dedicated()
11357 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask, in wait_for_vl_status_clear() argument
11365 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask; in wait_for_vl_status_clear()
11374 dd_dev_err(dd, in wait_for_vl_status_clear()
11381 dd_dev_err(dd, in wait_for_vl_status_clear()
11412 struct hfi1_devdata *dd = ppd->dd; in set_buffer_control() local
11448 nonzero_msg(dd, i, "dedicated", in set_buffer_control()
11450 nonzero_msg(dd, i, "shared", in set_buffer_control()
11458 get_buffer_control(dd, &cur_bc, &cur_total); in set_buffer_control()
11497 set_global_limit(dd, new_total); in set_buffer_control()
11505 (is_ax(dd) && any_shared_limit_changing)) { in set_buffer_control()
11506 set_global_shared(dd, 0); in set_buffer_control()
11516 set_vl_shared(dd, i, 0); in set_buffer_control()
11521 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask, in set_buffer_control()
11530 set_vl_dedicated(dd, i, in set_buffer_control()
11538 wait_for_vl_status_clear(dd, ld_mask, "dedicated"); in set_buffer_control()
11547 set_vl_dedicated(dd, i, in set_buffer_control()
11560 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared)); in set_buffer_control()
11566 set_global_shared(dd, in set_buffer_control()
11571 set_global_limit(dd, new_total); in set_buffer_control()
11583 ret = sdma_map_init(dd, ppd->port - 1, vl_count ? in set_buffer_control()
11588 ret = pio_map_init(dd, ppd->port - 1, vl_count ? in set_buffer_control()
11630 size = get_buffer_control(ppd->dd, t, NULL); in fm_get_table()
11633 size = get_sc2vlnt(ppd->dd, t); in fm_get_table()
11638 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t); in fm_get_table()
11688 set_sc2vlnt(ppd->dd, t); in fm_set_table()
11701 static int disable_data_vls(struct hfi1_devdata *dd) in disable_data_vls() argument
11703 if (is_ax(dd)) in disable_data_vls()
11706 pio_send_control(dd, PSC_DATA_VL_DISABLE); in disable_data_vls()
11719 int open_fill_data_vls(struct hfi1_devdata *dd) in open_fill_data_vls() argument
11721 if (is_ax(dd)) in open_fill_data_vls()
11724 pio_send_control(dd, PSC_DATA_VL_ENABLE); in open_fill_data_vls()
11734 static void drain_data_vls(struct hfi1_devdata *dd) in drain_data_vls() argument
11736 sc_wait(dd); in drain_data_vls()
11737 sdma_wait(dd); in drain_data_vls()
11738 pause_for_credit_return(dd); in drain_data_vls()
11751 int stop_drain_data_vls(struct hfi1_devdata *dd) in stop_drain_data_vls() argument
11755 ret = disable_data_vls(dd); in stop_drain_data_vls()
11757 drain_data_vls(dd); in stop_drain_data_vls()
11766 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns) in ns_to_cclock() argument
11770 if (dd->icode == ICODE_FPGA_EMULATION) in ns_to_cclock()
11783 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks) in cclock_to_ns() argument
11787 if (dd->icode == ICODE_FPGA_EMULATION) in cclock_to_ns()
11804 struct hfi1_devdata *dd = rcd->dd; in adjust_rcv_timeout() local
11829 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */ in adjust_rcv_timeout()
11831 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr); in adjust_rcv_timeout()
11839 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT, in adjust_rcv_timeout()
11847 struct hfi1_devdata *dd = rcd->dd; in update_usrhead() local
11860 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg); in update_usrhead()
11865 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); in update_usrhead()
11872 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) in hdrqempty()
11878 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); in hdrqempty()
11944 int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt) in hfi1_validate_rcvhdrcnt() argument
11947 dd_dev_err(dd, "Receive header queue count too small\n"); in hfi1_validate_rcvhdrcnt()
11952 dd_dev_err(dd, in hfi1_validate_rcvhdrcnt()
11959 dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n", in hfi1_validate_rcvhdrcnt()
11974 void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt) in set_hdrq_regs() argument
11980 write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg); in set_hdrq_regs()
11984 write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg); in set_hdrq_regs()
11987 write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg); in set_hdrq_regs()
11993 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, in set_hdrq_regs()
11994 dd->rcvhdrtail_dummy_dma); in set_hdrq_regs()
11997 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, in hfi1_rcvctrl() argument
12011 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL); in hfi1_rcvctrl()
12016 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, in hfi1_rcvctrl()
12019 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, in hfi1_rcvctrl()
12035 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr; in hfi1_rcvctrl()
12047 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0); in hfi1_rcvctrl()
12051 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0); in hfi1_rcvctrl()
12060 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg); in hfi1_rcvctrl()
12074 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg); in hfi1_rcvctrl()
12076 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT); in hfi1_rcvctrl()
12079 write_csr(dd, RCV_VL15, 0); in hfi1_rcvctrl()
12085 if (dd->rcvhdrtail_dummy_dma) { in hfi1_rcvctrl()
12086 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, in hfi1_rcvctrl()
12087 dd->rcvhdrtail_dummy_dma); in hfi1_rcvctrl()
12095 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, in hfi1_rcvctrl()
12100 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, in hfi1_rcvctrl()
12134 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, in hfi1_rcvctrl()
12137 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, in hfi1_rcvctrl()
12141 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl); in hfi1_rcvctrl()
12146 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); in hfi1_rcvctrl()
12148 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n", in hfi1_rcvctrl()
12150 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); in hfi1_rcvctrl()
12151 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10); in hfi1_rcvctrl()
12152 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00); in hfi1_rcvctrl()
12153 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); in hfi1_rcvctrl()
12154 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); in hfi1_rcvctrl()
12155 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n", in hfi1_rcvctrl()
12166 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT, in hfi1_rcvctrl()
12172 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); in hfi1_rcvctrl()
12181 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, in hfi1_rcvctrl()
12182 dd->rcvhdrtail_dummy_dma); in hfi1_rcvctrl()
12185 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp) in hfi1_read_cntrs() argument
12191 ret = dd->cntrnameslen; in hfi1_read_cntrs()
12192 *namep = dd->cntrnames; in hfi1_read_cntrs()
12197 ret = (dd->ndevcntrs) * sizeof(u64); in hfi1_read_cntrs()
12200 *cntrp = dd->cntrs; in hfi1_read_cntrs()
12216 dd, j, in hfi1_read_cntrs()
12223 dd->cntrs[entry->offset + j] = in hfi1_read_cntrs()
12229 for (j = 0; j < chip_sdma_engines(dd); in hfi1_read_cntrs()
12232 entry->rw_cntr(entry, dd, j, in hfi1_read_cntrs()
12237 dd->cntrs[entry->offset + j] = in hfi1_read_cntrs()
12241 val = entry->rw_cntr(entry, dd, in hfi1_read_cntrs()
12244 dd->cntrs[entry->offset] = val; in hfi1_read_cntrs()
12262 ret = ppd->dd->portcntrnameslen; in hfi1_read_portcntrs()
12263 *namep = ppd->dd->portcntrnames; in hfi1_read_portcntrs()
12268 ret = ppd->dd->nportcntrs * sizeof(u64); in hfi1_read_portcntrs()
12305 static void free_cntrs(struct hfi1_devdata *dd) in free_cntrs() argument
12310 if (dd->synth_stats_timer.function) in free_cntrs()
12311 del_timer_sync(&dd->synth_stats_timer); in free_cntrs()
12312 cancel_work_sync(&dd->update_cntr_work); in free_cntrs()
12313 ppd = (struct hfi1_pportdata *)(dd + 1); in free_cntrs()
12314 for (i = 0; i < dd->num_pports; i++, ppd++) { in free_cntrs()
12326 kfree(dd->portcntrnames); in free_cntrs()
12327 dd->portcntrnames = NULL; in free_cntrs()
12328 kfree(dd->cntrs); in free_cntrs()
12329 dd->cntrs = NULL; in free_cntrs()
12330 kfree(dd->scntrs); in free_cntrs()
12331 dd->scntrs = NULL; in free_cntrs()
12332 kfree(dd->cntrnames); in free_cntrs()
12333 dd->cntrnames = NULL; in free_cntrs()
12334 if (dd->update_cntr_wq) { in free_cntrs()
12335 destroy_workqueue(dd->update_cntr_wq); in free_cntrs()
12336 dd->update_cntr_wq = NULL; in free_cntrs()
12340 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry, in read_dev_port_cntr() argument
12347 dd_dev_err(dd, "Counter %s not enabled", entry->name); in read_dev_port_cntr()
12391 static u64 write_dev_port_cntr(struct hfi1_devdata *dd, in write_dev_port_cntr() argument
12398 dd_dev_err(dd, "Counter %s not enabled", entry->name); in write_dev_port_cntr()
12425 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl) in read_dev_cntr() argument
12431 sval = dd->scntrs + entry->offset; in read_dev_cntr()
12436 return read_dev_port_cntr(dd, entry, sval, dd, vl); in read_dev_cntr()
12439 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data) in write_dev_cntr() argument
12445 sval = dd->scntrs + entry->offset; in write_dev_cntr()
12450 return write_dev_port_cntr(dd, entry, sval, dd, vl, data); in write_dev_cntr()
12464 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && in read_port_cntr()
12470 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl); in read_port_cntr()
12484 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && in write_port_cntr()
12490 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data); in write_port_cntr()
12502 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata, in do_update_synth_timer() local
12512 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); in do_update_synth_timer()
12515 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); in do_update_synth_timer()
12520 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx); in do_update_synth_timer()
12522 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) { in do_update_synth_timer()
12529 dd->unit); in do_update_synth_timer()
12531 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx); in do_update_synth_timer()
12533 "[%d] total flits 0x%llx limit 0x%llx", dd->unit, in do_update_synth_timer()
12537 dd->unit); in do_update_synth_timer()
12543 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit); in do_update_synth_timer()
12548 read_dev_cntr(dd, i, vl); in do_update_synth_timer()
12550 read_dev_cntr(dd, i, CNTR_INVALID_VL); in do_update_synth_timer()
12553 ppd = (struct hfi1_pportdata *)(dd + 1); in do_update_synth_timer()
12554 for (i = 0; i < dd->num_pports; i++, ppd++) { in do_update_synth_timer()
12573 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, in do_update_synth_timer()
12577 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, in do_update_synth_timer()
12581 dd->unit, dd->last_tx, dd->last_rx); in do_update_synth_timer()
12584 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); in do_update_synth_timer()
12590 struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer); in update_synth_timer() local
12592 queue_work(dd->update_cntr_wq, &dd->update_cntr_work); in update_synth_timer()
12593 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); in update_synth_timer()
12597 static int init_cntrs(struct hfi1_devdata *dd) in init_cntrs() argument
12606 u32 sdma_engines = chip_sdma_engines(dd); in init_cntrs()
12609 timer_setup(&dd->synth_stats_timer, update_synth_timer, 0); in init_cntrs()
12616 dd->ndevcntrs = 0; in init_cntrs()
12626 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12635 dd->ndevcntrs++; in init_cntrs()
12638 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12647 dd->ndevcntrs++; in init_cntrs()
12655 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12656 dd->ndevcntrs++; in init_cntrs()
12661 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64), in init_cntrs()
12663 if (!dd->cntrs) in init_cntrs()
12666 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12667 if (!dd->scntrs) in init_cntrs()
12671 dd->cntrnameslen = sz; in init_cntrs()
12672 dd->cntrnames = kmalloc(sz, GFP_KERNEL); in init_cntrs()
12673 if (!dd->cntrnames) in init_cntrs()
12677 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) { in init_cntrs()
12734 rcv_ctxts = dd->num_rcv_contexts; in init_cntrs()
12742 dd->nportcntrs = 0; in init_cntrs()
12750 port_cntrs[i].offset = dd->nportcntrs; in init_cntrs()
12759 dd->nportcntrs++; in init_cntrs()
12767 port_cntrs[i].offset = dd->nportcntrs; in init_cntrs()
12768 dd->nportcntrs++; in init_cntrs()
12773 dd->portcntrnameslen = sz; in init_cntrs()
12774 dd->portcntrnames = kmalloc(sz, GFP_KERNEL); in init_cntrs()
12775 if (!dd->portcntrnames) in init_cntrs()
12779 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) { in init_cntrs()
12814 ppd = (struct hfi1_pportdata *)(dd + 1); in init_cntrs()
12815 for (i = 0; i < dd->num_pports; i++, ppd++) { in init_cntrs()
12816 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12820 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12826 if (init_cpu_counters(dd)) in init_cntrs()
12829 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d", in init_cntrs()
12830 WQ_MEM_RECLAIM, dd->unit); in init_cntrs()
12831 if (!dd->update_cntr_wq) in init_cntrs()
12834 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer); in init_cntrs()
12836 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); in init_cntrs()
12839 free_cntrs(dd); in init_cntrs()
12843 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate) in chip_to_opa_lstate() argument
12855 dd_dev_err(dd, in chip_to_opa_lstate()
12862 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate) in chip_to_opa_pstate() argument
12879 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n", in chip_to_opa_pstate()
12959 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n", in update_statusp()
12981 new_state = chip_to_opa_lstate(ppd->dd, in wait_logical_linkstate()
12982 read_logical_state(ppd->dd)); in wait_logical_linkstate()
12986 dd_dev_err(ppd->dd, in wait_logical_linkstate()
12999 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state); in log_state_transition()
13001 dd_dev_info(ppd->dd, in log_state_transition()
13012 u32 read_state = read_physical_state(ppd->dd); in log_physical_state()
13017 dd_dev_err(ppd->dd, in log_physical_state()
13040 read_state = read_physical_state(ppd->dd); in wait_physical_linkstate()
13044 dd_dev_err(ppd->dd, in wait_physical_linkstate()
13073 read_state = read_physical_state(ppd->dd); in wait_phys_link_offline_substates()
13077 dd_dev_err(ppd->dd, in wait_phys_link_offline_substates()
13106 read_state = read_physical_state(ppd->dd); in wait_phys_link_out_of_offline()
13110 dd_dev_err(ppd->dd, in wait_phys_link_out_of_offline()
13131 struct hfi1_devdata *dd = sc->dd; in hfi1_init_ctxt() local
13136 reg = read_kctxt_csr(dd, sc->hw_context, in hfi1_init_ctxt()
13142 write_kctxt_csr(dd, sc->hw_context, in hfi1_init_ctxt()
13147 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp) in hfi1_tempsense_rd() argument
13152 if (dd->icode != ICODE_RTL_SILICON) { in hfi1_tempsense_rd()
13154 dd_dev_info(dd, "%s: tempsense not supported by HW\n", in hfi1_tempsense_rd()
13158 reg = read_csr(dd, ASIC_STS_THERM); in hfi1_tempsense_rd()
13183 static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits, in read_mod_write() argument
13190 spin_lock_irqsave(&dd->irq_src_lock, flags); in read_mod_write()
13191 reg = read_csr(dd, CCE_INT_MASK + (8 * idx)); in read_mod_write()
13196 write_csr(dd, CCE_INT_MASK + (8 * idx), reg); in read_mod_write()
13197 spin_unlock_irqrestore(&dd->irq_src_lock, flags); in read_mod_write()
13209 int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set) in set_intr_bits() argument
13225 read_mod_write(dd, src - 1, bits, set); in set_intr_bits()
13230 read_mod_write(dd, last, bits, set); in set_intr_bits()
13238 void clear_all_interrupts(struct hfi1_devdata *dd) in clear_all_interrupts() argument
13243 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0); in clear_all_interrupts()
13245 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13246 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13247 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13248 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13249 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13250 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13251 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13252 for (i = 0; i < chip_send_contexts(dd); i++) in clear_all_interrupts()
13253 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13254 for (i = 0; i < chip_sdma_engines(dd); i++) in clear_all_interrupts()
13255 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13257 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0); in clear_all_interrupts()
13258 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0); in clear_all_interrupts()
13259 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0); in clear_all_interrupts()
13266 void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) in remap_intr() argument
13275 dd->gi_mask[m] &= ~((u64)1 << n); in remap_intr()
13277 dd_dev_err(dd, "remap interrupt err\n"); in remap_intr()
13284 reg = read_csr(dd, CCE_INT_MAP + (8 * m)); in remap_intr()
13287 write_csr(dd, CCE_INT_MAP + (8 * m), reg); in remap_intr()
13290 void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr) in remap_sdma_interrupts() argument
13299 remap_intr(dd, IS_SDMA_START + engine, msix_intr); in remap_sdma_interrupts()
13300 remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr); in remap_sdma_interrupts()
13301 remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr); in remap_sdma_interrupts()
13308 void reset_interrupts(struct hfi1_devdata *dd) in reset_interrupts() argument
13314 dd->gi_mask[i] = ~(u64)0; in reset_interrupts()
13318 write_csr(dd, CCE_INT_MAP + (8 * i), 0); in reset_interrupts()
13326 static int set_up_interrupts(struct hfi1_devdata *dd) in set_up_interrupts() argument
13331 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); in set_up_interrupts()
13334 clear_all_interrupts(dd); in set_up_interrupts()
13337 reset_interrupts(dd); in set_up_interrupts()
13340 ret = msix_initialize(dd); in set_up_interrupts()
13344 ret = msix_request_irqs(dd); in set_up_interrupts()
13346 msix_clean_up_interrupts(dd); in set_up_interrupts()
13362 static int set_up_context_variables(struct hfi1_devdata *dd) in set_up_context_variables() argument
13370 u32 send_contexts = chip_send_contexts(dd); in set_up_context_variables()
13371 u32 rcv_contexts = chip_rcv_contexts(dd); in set_up_context_variables()
13394 dd_dev_err(dd, in set_up_context_variables()
13414 dd_dev_err(dd, in set_up_context_variables()
13423 hfi1_num_netdev_contexts(dd, rcv_contexts - in set_up_context_variables()
13449 dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n"); in set_up_context_variables()
13452 dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n", in set_up_context_variables()
13458 dd->num_rcv_contexts = in set_up_context_variables()
13460 dd->n_krcv_queues = num_kernel_contexts; in set_up_context_variables()
13461 dd->first_dyn_alloc_ctxt = num_kernel_contexts; in set_up_context_variables()
13462 dd->num_netdev_contexts = num_netdev_contexts; in set_up_context_variables()
13463 dd->num_user_contexts = n_usr_ctxts; in set_up_context_variables()
13464 dd->freectxts = n_usr_ctxts; in set_up_context_variables()
13465 dd_dev_info(dd, in set_up_context_variables()
13468 (int)dd->num_rcv_contexts, in set_up_context_variables()
13469 (int)dd->n_krcv_queues, in set_up_context_variables()
13470 dd->num_netdev_contexts, in set_up_context_variables()
13471 dd->num_user_contexts); in set_up_context_variables()
13484 dd->rcv_entries.group_size = RCV_INCREMENT; in set_up_context_variables()
13485 ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size; in set_up_context_variables()
13486 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts; in set_up_context_variables()
13487 dd->rcv_entries.nctxt_extra = ngroups - in set_up_context_variables()
13488 (dd->num_rcv_contexts * dd->rcv_entries.ngroups); in set_up_context_variables()
13489 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n", in set_up_context_variables()
13490 dd->rcv_entries.ngroups, in set_up_context_variables()
13491 dd->rcv_entries.nctxt_extra); in set_up_context_variables()
13492 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size > in set_up_context_variables()
13494 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) / in set_up_context_variables()
13495 dd->rcv_entries.group_size; in set_up_context_variables()
13496 dd_dev_info(dd, in set_up_context_variables()
13498 dd->rcv_entries.ngroups); in set_up_context_variables()
13499 dd->rcv_entries.nctxt_extra = 0; in set_up_context_variables()
13504 ret = init_sc_pools_and_sizes(dd); in set_up_context_variables()
13506 dd->num_send_contexts = ret; in set_up_context_variables()
13508 dd, in set_up_context_variables()
13511 dd->num_send_contexts, in set_up_context_variables()
13512 dd->sc_sizes[SC_KERNEL].count, in set_up_context_variables()
13513 dd->sc_sizes[SC_ACK].count, in set_up_context_variables()
13514 dd->sc_sizes[SC_USER].count, in set_up_context_variables()
13515 dd->sc_sizes[SC_VL15].count); in set_up_context_variables()
13529 struct hfi1_devdata *dd = ppd->dd; in set_partition_keys() local
13533 dd_dev_info(dd, "Setting partition keys\n"); in set_partition_keys()
13534 for (i = 0; i < hfi1_get_npkeys(dd); i++) { in set_partition_keys()
13541 write_csr(dd, RCV_PARTITION_KEY + in set_partition_keys()
13548 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK); in set_partition_keys()
13559 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd) in write_uninitialized_csrs_and_memories() argument
13565 write_csr(dd, CCE_INT_MAP + (8 * i), 0); in write_uninitialized_csrs_and_memories()
13568 for (i = 0; i < chip_send_contexts(dd); i++) in write_uninitialized_csrs_and_memories()
13569 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0); in write_uninitialized_csrs_and_memories()
13581 for (i = 0; i < chip_rcv_contexts(dd); i++) { in write_uninitialized_csrs_and_memories()
13582 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0); in write_uninitialized_csrs_and_memories()
13583 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0); in write_uninitialized_csrs_and_memories()
13585 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0); in write_uninitialized_csrs_and_memories()
13589 for (i = 0; i < chip_rcv_array_count(dd); i++) in write_uninitialized_csrs_and_memories()
13590 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0); in write_uninitialized_csrs_and_memories()
13594 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0); in write_uninitialized_csrs_and_memories()
13600 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits, in clear_cce_status() argument
13607 reg = read_csr(dd, CCE_STATUS); in clear_cce_status()
13612 write_csr(dd, CCE_CTRL, ctrl_bits); in clear_cce_status()
13617 reg = read_csr(dd, CCE_STATUS); in clear_cce_status()
13621 dd_dev_err(dd, in clear_cce_status()
13631 static void reset_cce_csrs(struct hfi1_devdata *dd) in reset_cce_csrs() argument
13639 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK); in reset_cce_csrs()
13640 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK); in reset_cce_csrs()
13641 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK); in reset_cce_csrs()
13643 write_csr(dd, CCE_SCRATCH + (8 * i), 0); in reset_cce_csrs()
13645 write_csr(dd, CCE_ERR_MASK, 0); in reset_cce_csrs()
13646 write_csr(dd, CCE_ERR_CLEAR, ~0ull); in reset_cce_csrs()
13649 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0); in reset_cce_csrs()
13650 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR); in reset_cce_csrs()
13653 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0); in reset_cce_csrs()
13654 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i), in reset_cce_csrs()
13659 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull); in reset_cce_csrs()
13660 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull); in reset_cce_csrs()
13663 write_csr(dd, CCE_INT_MAP, 0); in reset_cce_csrs()
13666 write_csr(dd, CCE_INT_MASK + (8 * i), 0); in reset_cce_csrs()
13667 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull); in reset_cce_csrs()
13672 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0); in reset_cce_csrs()
13676 static void reset_misc_csrs(struct hfi1_devdata *dd) in reset_misc_csrs() argument
13681 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0); in reset_misc_csrs()
13682 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0); in reset_misc_csrs()
13683 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0); in reset_misc_csrs()
13690 write_csr(dd, MISC_CFG_RSA_CMD, 1); in reset_misc_csrs()
13691 write_csr(dd, MISC_CFG_RSA_MU, 0); in reset_misc_csrs()
13692 write_csr(dd, MISC_CFG_FW_CTRL, 0); in reset_misc_csrs()
13698 write_csr(dd, MISC_ERR_MASK, 0); in reset_misc_csrs()
13699 write_csr(dd, MISC_ERR_CLEAR, ~0ull); in reset_misc_csrs()
13704 static void reset_txe_csrs(struct hfi1_devdata *dd) in reset_txe_csrs() argument
13711 write_csr(dd, SEND_CTRL, 0); in reset_txe_csrs()
13712 __cm_reset(dd, 0); /* reset CM internal state */ in reset_txe_csrs()
13717 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0); in reset_txe_csrs()
13718 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */ in reset_txe_csrs()
13720 write_csr(dd, SEND_PIO_ERR_MASK, 0); in reset_txe_csrs()
13721 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull); in reset_txe_csrs()
13724 write_csr(dd, SEND_DMA_ERR_MASK, 0); in reset_txe_csrs()
13725 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull); in reset_txe_csrs()
13728 write_csr(dd, SEND_EGRESS_ERR_MASK, 0); in reset_txe_csrs()
13729 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull); in reset_txe_csrs()
13731 write_csr(dd, SEND_BTH_QP, 0); in reset_txe_csrs()
13732 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0); in reset_txe_csrs()
13733 write_csr(dd, SEND_SC2VLT0, 0); in reset_txe_csrs()
13734 write_csr(dd, SEND_SC2VLT1, 0); in reset_txe_csrs()
13735 write_csr(dd, SEND_SC2VLT2, 0); in reset_txe_csrs()
13736 write_csr(dd, SEND_SC2VLT3, 0); in reset_txe_csrs()
13737 write_csr(dd, SEND_LEN_CHECK0, 0); in reset_txe_csrs()
13738 write_csr(dd, SEND_LEN_CHECK1, 0); in reset_txe_csrs()
13740 write_csr(dd, SEND_ERR_MASK, 0); in reset_txe_csrs()
13741 write_csr(dd, SEND_ERR_CLEAR, ~0ull); in reset_txe_csrs()
13744 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0); in reset_txe_csrs()
13746 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0); in reset_txe_csrs()
13747 for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++) in reset_txe_csrs()
13748 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0); in reset_txe_csrs()
13750 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0); in reset_txe_csrs()
13752 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0); in reset_txe_csrs()
13753 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR); in reset_txe_csrs()
13754 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR); in reset_txe_csrs()
13756 write_csr(dd, SEND_CM_TIMER_CTRL, 0); in reset_txe_csrs()
13757 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0); in reset_txe_csrs()
13758 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0); in reset_txe_csrs()
13759 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0); in reset_txe_csrs()
13760 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0); in reset_txe_csrs()
13762 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); in reset_txe_csrs()
13763 write_csr(dd, SEND_CM_CREDIT_VL15, 0); in reset_txe_csrs()
13768 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull); in reset_txe_csrs()
13775 for (i = 0; i < chip_send_contexts(dd); i++) { in reset_txe_csrs()
13776 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0); in reset_txe_csrs()
13777 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0); in reset_txe_csrs()
13778 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0); in reset_txe_csrs()
13779 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0); in reset_txe_csrs()
13780 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0); in reset_txe_csrs()
13781 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull); in reset_txe_csrs()
13782 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0); in reset_txe_csrs()
13783 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0); in reset_txe_csrs()
13784 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0); in reset_txe_csrs()
13785 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0); in reset_txe_csrs()
13786 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0); in reset_txe_csrs()
13787 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0); in reset_txe_csrs()
13793 for (i = 0; i < chip_sdma_engines(dd); i++) { in reset_txe_csrs()
13794 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0); in reset_txe_csrs()
13796 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0); in reset_txe_csrs()
13797 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0); in reset_txe_csrs()
13798 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0); in reset_txe_csrs()
13800 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0); in reset_txe_csrs()
13801 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0); in reset_txe_csrs()
13803 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0); in reset_txe_csrs()
13804 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0); in reset_txe_csrs()
13807 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0); in reset_txe_csrs()
13808 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull); in reset_txe_csrs()
13810 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0); in reset_txe_csrs()
13811 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0); in reset_txe_csrs()
13812 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0); in reset_txe_csrs()
13813 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0); in reset_txe_csrs()
13814 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0); in reset_txe_csrs()
13815 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0); in reset_txe_csrs()
13816 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0); in reset_txe_csrs()
13824 static void init_rbufs(struct hfi1_devdata *dd) in init_rbufs() argument
13835 reg = read_csr(dd, RCV_STATUS); in init_rbufs()
13847 dd_dev_err(dd, in init_rbufs()
13856 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK); in init_rbufs()
13864 read_csr(dd, RCV_CTRL); in init_rbufs()
13871 reg = read_csr(dd, RCV_STATUS); in init_rbufs()
13877 dd_dev_err(dd, in init_rbufs()
13886 static void reset_rxe_csrs(struct hfi1_devdata *dd) in reset_rxe_csrs() argument
13893 write_csr(dd, RCV_CTRL, 0); in reset_rxe_csrs()
13894 init_rbufs(dd); in reset_rxe_csrs()
13899 write_csr(dd, RCV_BTH_QP, 0); in reset_rxe_csrs()
13900 write_csr(dd, RCV_MULTICAST, 0); in reset_rxe_csrs()
13901 write_csr(dd, RCV_BYPASS, 0); in reset_rxe_csrs()
13902 write_csr(dd, RCV_VL15, 0); in reset_rxe_csrs()
13904 write_csr(dd, RCV_ERR_INFO, in reset_rxe_csrs()
13907 write_csr(dd, RCV_ERR_MASK, 0); in reset_rxe_csrs()
13908 write_csr(dd, RCV_ERR_CLEAR, ~0ull); in reset_rxe_csrs()
13911 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0); in reset_rxe_csrs()
13913 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0); in reset_rxe_csrs()
13915 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0); in reset_rxe_csrs()
13917 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0); in reset_rxe_csrs()
13919 clear_rsm_rule(dd, i); in reset_rxe_csrs()
13921 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0); in reset_rxe_csrs()
13926 for (i = 0; i < chip_rcv_contexts(dd); i++) { in reset_rxe_csrs()
13928 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0); in reset_rxe_csrs()
13930 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0); in reset_rxe_csrs()
13931 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0); in reset_rxe_csrs()
13932 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0); in reset_rxe_csrs()
13933 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0); in reset_rxe_csrs()
13934 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0); in reset_rxe_csrs()
13935 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0); in reset_rxe_csrs()
13936 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0); in reset_rxe_csrs()
13937 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0); in reset_rxe_csrs()
13938 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0); in reset_rxe_csrs()
13939 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0); in reset_rxe_csrs()
13943 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0); in reset_rxe_csrs()
13945 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0); in reset_rxe_csrs()
13948 write_uctxt_csr(dd, i, in reset_rxe_csrs()
13965 static void init_sc2vl_tables(struct hfi1_devdata *dd) in init_sc2vl_tables() argument
13971 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL( in init_sc2vl_tables()
13977 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL( in init_sc2vl_tables()
13983 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL( in init_sc2vl_tables()
13989 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL( in init_sc2vl_tables()
13997 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL( in init_sc2vl_tables()
14001 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL( in init_sc2vl_tables()
14009 *((u8 *)(dd->sc2vl) + i) = (u8)i; in init_sc2vl_tables()
14011 *((u8 *)(dd->sc2vl) + i) = 0; in init_sc2vl_tables()
14024 static int init_chip(struct hfi1_devdata *dd) in init_chip() argument
14041 write_csr(dd, SEND_CTRL, 0); in init_chip()
14042 for (i = 0; i < chip_send_contexts(dd); i++) in init_chip()
14043 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0); in init_chip()
14044 for (i = 0; i < chip_sdma_engines(dd); i++) in init_chip()
14045 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0); in init_chip()
14047 write_csr(dd, RCV_CTRL, 0); in init_chip()
14048 for (i = 0; i < chip_rcv_contexts(dd); i++) in init_chip()
14049 write_csr(dd, RCV_CTXT_CTRL, 0); in init_chip()
14052 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull); in init_chip()
14060 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK); in init_chip()
14061 (void)read_csr(dd, CCE_DC_CTRL); in init_chip()
14069 dd_dev_info(dd, "Resetting CSRs with FLR\n"); in init_chip()
14072 pcie_flr(dd->pcidev); in init_chip()
14075 ret = restore_pci_variables(dd); in init_chip()
14077 dd_dev_err(dd, "%s: Could not restore PCI variables\n", in init_chip()
14082 if (is_ax(dd)) { in init_chip()
14083 dd_dev_info(dd, "Resetting CSRs with FLR\n"); in init_chip()
14084 pcie_flr(dd->pcidev); in init_chip()
14085 ret = restore_pci_variables(dd); in init_chip()
14087 dd_dev_err(dd, "%s: Could not restore PCI variables\n", in init_chip()
14093 dd_dev_info(dd, "Resetting CSRs with writes\n"); in init_chip()
14094 reset_cce_csrs(dd); in init_chip()
14095 reset_txe_csrs(dd); in init_chip()
14096 reset_rxe_csrs(dd); in init_chip()
14097 reset_misc_csrs(dd); in init_chip()
14100 write_csr(dd, CCE_DC_CTRL, 0); in init_chip()
14103 setextled(dd, 0); in init_chip()
14115 write_csr(dd, ASIC_QSFP1_OUT, 0x1f); in init_chip()
14116 write_csr(dd, ASIC_QSFP2_OUT, 0x1f); in init_chip()
14117 init_chip_resources(dd); in init_chip()
14121 static void init_early_variables(struct hfi1_devdata *dd) in init_early_variables() argument
14126 dd->vau = CM_VAU; in init_early_variables()
14127 dd->link_credits = CM_GLOBAL_CREDITS; in init_early_variables()
14128 if (is_ax(dd)) in init_early_variables()
14129 dd->link_credits--; in init_early_variables()
14130 dd->vcu = cu_to_vcu(hfi1_cu); in init_early_variables()
14132 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau); in init_early_variables()
14133 if (dd->vl15_init > dd->link_credits) in init_early_variables()
14134 dd->vl15_init = dd->link_credits; in init_early_variables()
14136 write_uninitialized_csrs_and_memories(dd); in init_early_variables()
14139 for (i = 0; i < dd->num_pports; i++) { in init_early_variables()
14140 struct hfi1_pportdata *ppd = &dd->pport[i]; in init_early_variables()
14144 init_sc2vl_tables(dd); in init_early_variables()
14147 static void init_kdeth_qp(struct hfi1_devdata *dd) in init_kdeth_qp() argument
14149 write_csr(dd, SEND_BTH_QP, in init_kdeth_qp()
14153 write_csr(dd, RCV_BTH_QP, in init_kdeth_qp()
14163 u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx) in hfi1_get_qp_map() argument
14165 u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8); in hfi1_get_qp_map()
14188 static void init_qpmap_table(struct hfi1_devdata *dd, in init_qpmap_table() argument
14203 write_csr(dd, regno, reg); in init_qpmap_table()
14209 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK in init_qpmap_table()
14237 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd) in alloc_rsm_map_table() argument
14240 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */ in alloc_rsm_map_table()
14255 static void complete_rsm_map_table(struct hfi1_devdata *dd, in complete_rsm_map_table() argument
14263 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]); in complete_rsm_map_table()
14266 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); in complete_rsm_map_table()
14271 static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) in has_rsm_rule() argument
14273 return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0; in has_rsm_rule()
14279 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index, in add_rsm_rule() argument
14282 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), in add_rsm_rule()
14286 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), in add_rsm_rule()
14293 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), in add_rsm_rule()
14303 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) in clear_rsm_rule() argument
14305 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0); in clear_rsm_rule()
14306 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0); in clear_rsm_rule()
14307 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0); in clear_rsm_rule()
14368 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt) in init_qos() argument
14377 rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n); in init_qos()
14425 add_rsm_rule(dd, RSM_INS_VERBS, &rrd); in init_qos()
14430 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT); in init_qos()
14431 dd->qos_shift = n + 1; in init_qos()
14434 dd->qos_shift = 1; in init_qos()
14435 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1); in init_qos()
14438 static void init_fecn_handling(struct hfi1_devdata *dd, in init_fecn_handling() argument
14451 start = dd->first_dyn_alloc_ctxt; in init_fecn_handling()
14453 total_cnt = dd->num_rcv_contexts - start; in init_fecn_handling()
14457 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n"); in init_fecn_handling()
14473 for (i = start, idx = rmt->used; i < dd->num_rcv_contexts; in init_fecn_handling()
14507 add_rsm_rule(dd, RSM_INS_FECN, &rrd); in init_fecn_handling()
14517 static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd) in hfi1_netdev_update_rmt() argument
14523 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); in hfi1_netdev_update_rmt()
14524 int ctxt_count = hfi1_netdev_ctxt_count(dd); in hfi1_netdev_update_rmt()
14527 if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) { in hfi1_netdev_update_rmt()
14528 dd_dev_info(dd, "Contexts are already mapped in RMT\n"); in hfi1_netdev_update_rmt()
14533 dd_dev_err(dd, "Not enough RMT entries used = %d\n", in hfi1_netdev_update_rmt()
14538 dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n", in hfi1_netdev_update_rmt()
14544 reg = read_csr(dd, regoff); in hfi1_netdev_update_rmt()
14549 reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8); in hfi1_netdev_update_rmt()
14554 dev_dbg(&(dd)->pcidev->dev, in hfi1_netdev_update_rmt()
14558 write_csr(dd, regoff, reg); in hfi1_netdev_update_rmt()
14561 reg = read_csr(dd, regoff); in hfi1_netdev_update_rmt()
14568 static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd, in hfi1_enable_rsm_rule() argument
14571 if (!hfi1_netdev_update_rmt(dd)) { in hfi1_enable_rsm_rule()
14572 dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule); in hfi1_enable_rsm_rule()
14576 add_rsm_rule(dd, rule, rrd); in hfi1_enable_rsm_rule()
14577 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); in hfi1_enable_rsm_rule()
14580 void hfi1_init_aip_rsm(struct hfi1_devdata *dd) in hfi1_init_aip_rsm() argument
14586 if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) { in hfi1_init_aip_rsm()
14587 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); in hfi1_init_aip_rsm()
14604 hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd); in hfi1_init_aip_rsm()
14609 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd) in hfi1_init_vnic_rsm() argument
14611 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); in hfi1_init_vnic_rsm()
14631 hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, &rrd); in hfi1_init_vnic_rsm()
14634 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd) in hfi1_deinit_vnic_rsm() argument
14636 clear_rsm_rule(dd, RSM_INS_VNIC); in hfi1_deinit_vnic_rsm()
14639 void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd) in hfi1_deinit_aip_rsm() argument
14642 if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1) in hfi1_deinit_aip_rsm()
14643 clear_rsm_rule(dd, RSM_INS_AIP); in hfi1_deinit_aip_rsm()
14646 static int init_rxe(struct hfi1_devdata *dd) in init_rxe() argument
14652 write_csr(dd, RCV_ERR_MASK, ~0ull); in init_rxe()
14654 rmt = alloc_rsm_map_table(dd); in init_rxe()
14659 init_qos(dd, rmt); in init_rxe()
14660 init_fecn_handling(dd, rmt); in init_rxe()
14661 complete_rsm_map_table(dd, rmt); in init_rxe()
14663 hfi1_netdev_set_free_rmt_idx(dd, rmt->used); in init_rxe()
14679 val = read_csr(dd, RCV_BYPASS); in init_rxe()
14683 write_csr(dd, RCV_BYPASS, val); in init_rxe()
14687 static void init_other(struct hfi1_devdata *dd) in init_other() argument
14690 write_csr(dd, CCE_ERR_MASK, ~0ull); in init_other()
14692 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK); in init_other()
14694 write_csr(dd, DCC_ERR_FLG_EN, ~0ull); in init_other()
14695 write_csr(dd, DC_DC8051_ERR_EN, ~0ull); in init_other()
14706 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu, in assign_cm_au_table() argument
14709 write_csr(dd, csr0to3, in assign_cm_au_table()
14716 write_csr(dd, csr4to7, in assign_cm_au_table()
14727 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu) in assign_local_cm_au_table() argument
14729 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3, in assign_local_cm_au_table()
14733 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu) in assign_remote_cm_au_table() argument
14735 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3, in assign_remote_cm_au_table()
14739 static void init_txe(struct hfi1_devdata *dd) in init_txe() argument
14744 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull); in init_txe()
14745 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull); in init_txe()
14746 write_csr(dd, SEND_ERR_MASK, ~0ull); in init_txe()
14747 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull); in init_txe()
14750 for (i = 0; i < chip_send_contexts(dd); i++) in init_txe()
14751 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull); in init_txe()
14752 for (i = 0; i < chip_sdma_engines(dd); i++) in init_txe()
14753 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull); in init_txe()
14756 assign_local_cm_au_table(dd, dd->vcu); in init_txe()
14762 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR) in init_txe()
14763 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE); in init_txe()
14766 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, in hfi1_set_ctxt_jkey() argument
14782 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg); in hfi1_set_ctxt_jkey()
14786 if (!is_ax(dd)) { in hfi1_set_ctxt_jkey()
14787 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); in hfi1_set_ctxt_jkey()
14789 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); in hfi1_set_ctxt_jkey()
14796 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg); in hfi1_set_ctxt_jkey()
14801 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) in hfi1_clear_ctxt_jkey() argument
14810 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0); in hfi1_clear_ctxt_jkey()
14816 if (!is_ax(dd)) { in hfi1_clear_ctxt_jkey()
14817 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); in hfi1_clear_ctxt_jkey()
14819 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); in hfi1_clear_ctxt_jkey()
14822 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0); in hfi1_clear_ctxt_jkey()
14827 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, in hfi1_set_ctxt_pkey() argument
14839 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg); in hfi1_set_ctxt_pkey()
14840 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); in hfi1_set_ctxt_pkey()
14843 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); in hfi1_set_ctxt_pkey()
14848 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt) in hfi1_clear_ctxt_pkey() argument
14857 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); in hfi1_clear_ctxt_pkey()
14859 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); in hfi1_clear_ctxt_pkey()
14860 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0); in hfi1_clear_ctxt_pkey()
14869 void hfi1_start_cleanup(struct hfi1_devdata *dd) in hfi1_start_cleanup() argument
14871 aspm_exit(dd); in hfi1_start_cleanup()
14872 free_cntrs(dd); in hfi1_start_cleanup()
14873 free_rcverr(dd); in hfi1_start_cleanup()
14874 finish_chip_resources(dd); in hfi1_start_cleanup()
14885 static int init_asic_data(struct hfi1_devdata *dd) in init_asic_data() argument
14893 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL); in init_asic_data()
14900 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) && in init_asic_data()
14901 dd->unit != peer->unit) in init_asic_data()
14907 dd->asic_data = peer->asic_data; in init_asic_data()
14910 dd->asic_data = asic_data; in init_asic_data()
14911 mutex_init(&dd->asic_data->asic_resource_mutex); in init_asic_data()
14913 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */ in init_asic_data()
14918 ret = set_up_i2c(dd, dd->asic_data); in init_asic_data()
14929 static int obtain_boardname(struct hfi1_devdata *dd) in obtain_boardname() argument
14937 ret = read_hfi1_efi_var(dd, "description", &size, in obtain_boardname()
14938 (void **)&dd->boardname); in obtain_boardname()
14940 dd_dev_info(dd, "Board description not found\n"); in obtain_boardname()
14942 dd->boardname = kstrdup(generic, GFP_KERNEL); in obtain_boardname()
14943 if (!dd->boardname) in obtain_boardname()
14957 static int check_int_registers(struct hfi1_devdata *dd) in check_int_registers() argument
14964 mask = read_csr(dd, CCE_INT_MASK); in check_int_registers()
14965 write_csr(dd, CCE_INT_MASK, 0ull); in check_int_registers()
14966 reg = read_csr(dd, CCE_INT_MASK); in check_int_registers()
14971 write_csr(dd, CCE_INT_CLEAR, all_bits); in check_int_registers()
14972 reg = read_csr(dd, CCE_INT_STATUS); in check_int_registers()
14977 write_csr(dd, CCE_INT_FORCE, all_bits); in check_int_registers()
14978 reg = read_csr(dd, CCE_INT_STATUS); in check_int_registers()
14983 write_csr(dd, CCE_INT_CLEAR, all_bits); in check_int_registers()
14984 write_csr(dd, CCE_INT_MASK, mask); in check_int_registers()
14988 write_csr(dd, CCE_INT_MASK, mask); in check_int_registers()
14989 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n"); in check_int_registers()
15000 int hfi1_init_dd(struct hfi1_devdata *dd) in hfi1_init_dd() argument
15002 struct pci_dev *pdev = dd->pcidev; in hfi1_init_dd()
15013 u32 sdma_engines = chip_sdma_engines(dd); in hfi1_init_dd()
15015 ppd = dd->pport; in hfi1_init_dd()
15016 for (i = 0; i < dd->num_pports; i++, ppd++) { in hfi1_init_dd()
15019 hfi1_init_pportdata(pdev, ppd, dd, 0, 1); in hfi1_init_dd()
15035 dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n", in hfi1_init_dd()
15043 dd->vld[vl].mtu = hfi1_max_mtu; in hfi1_init_dd()
15044 dd->vld[15].mtu = MAX_MAD_PACKET; in hfi1_init_dd()
15066 ret = hfi1_pcie_ddinit(dd, pdev); in hfi1_init_dd()
15071 ret = save_pci_variables(dd); in hfi1_init_dd()
15075 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT) in hfi1_init_dd()
15077 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT) in hfi1_init_dd()
15086 ret = check_int_registers(dd); in hfi1_init_dd()
15095 reg = read_csr(dd, CCE_REVISION2); in hfi1_init_dd()
15096 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT) in hfi1_init_dd()
15099 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT; in hfi1_init_dd()
15100 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT; in hfi1_init_dd()
15101 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n", in hfi1_init_dd()
15102 dd->icode < ARRAY_SIZE(inames) ? in hfi1_init_dd()
15103 inames[dd->icode] : "unknown", (int)dd->irev); in hfi1_init_dd()
15106 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G; in hfi1_init_dd()
15108 dd->pport->link_speed_enabled = dd->pport->link_speed_supported; in hfi1_init_dd()
15110 dd->pport->link_speed_active = OPA_LINK_SPEED_25G; in hfi1_init_dd()
15113 ppd = dd->pport; in hfi1_init_dd()
15114 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) { in hfi1_init_dd()
15123 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n", in hfi1_init_dd()
15138 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64; in hfi1_init_dd()
15139 if (dd->rcv_intr_timeout_csr > in hfi1_init_dd()
15141 dd->rcv_intr_timeout_csr = in hfi1_init_dd()
15143 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout) in hfi1_init_dd()
15144 dd->rcv_intr_timeout_csr = 1; in hfi1_init_dd()
15147 read_guid(dd); in hfi1_init_dd()
15150 ret = init_asic_data(dd); in hfi1_init_dd()
15155 ret = init_chip(dd); in hfi1_init_dd()
15160 ret = pcie_speeds(dd); in hfi1_init_dd()
15165 ret = eprom_init(dd); in hfi1_init_dd()
15170 get_platform_config(dd); in hfi1_init_dd()
15173 ret = hfi1_firmware_init(dd); in hfi1_init_dd()
15189 ret = do_pcie_gen3_transition(dd); in hfi1_init_dd()
15197 tune_pcie_caps(dd); in hfi1_init_dd()
15200 init_early_variables(dd); in hfi1_init_dd()
15202 parse_platform_config(dd); in hfi1_init_dd()
15204 ret = obtain_boardname(dd); in hfi1_init_dd()
15208 snprintf(dd->boardversion, BOARD_VERS_MAX, in hfi1_init_dd()
15211 (u32)dd->majrev, in hfi1_init_dd()
15212 (u32)dd->minrev, in hfi1_init_dd()
15213 (dd->revision >> CCE_REVISION_SW_SHIFT) in hfi1_init_dd()
15217 ret = hfi1_alloc_rx(dd); in hfi1_init_dd()
15221 ret = set_up_context_variables(dd); in hfi1_init_dd()
15226 ret = init_rxe(dd); in hfi1_init_dd()
15231 init_txe(dd); in hfi1_init_dd()
15233 init_other(dd); in hfi1_init_dd()
15235 init_kdeth_qp(dd); in hfi1_init_dd()
15237 ret = hfi1_dev_affinity_init(dd); in hfi1_init_dd()
15242 ret = init_send_contexts(dd); in hfi1_init_dd()
15246 ret = hfi1_create_kctxts(dd); in hfi1_init_dd()
15254 aspm_init(dd); in hfi1_init_dd()
15256 ret = init_pervl_scs(dd); in hfi1_init_dd()
15261 for (i = 0; i < dd->num_pports; ++i) { in hfi1_init_dd()
15262 ret = sdma_init(dd, i); in hfi1_init_dd()
15268 ret = set_up_interrupts(dd); in hfi1_init_dd()
15272 ret = hfi1_comp_vectors_set_up(dd); in hfi1_init_dd()
15277 init_lcb_access(dd); in hfi1_init_dd()
15284 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n", in hfi1_init_dd()
15285 (dd->base_guid & 0xFFFFFF) | in hfi1_init_dd()
15286 ((dd->base_guid >> 11) & 0xF000000)); in hfi1_init_dd()
15288 dd->oui1 = dd->base_guid >> 56 & 0xFF; in hfi1_init_dd()
15289 dd->oui2 = dd->base_guid >> 48 & 0xFF; in hfi1_init_dd()
15290 dd->oui3 = dd->base_guid >> 40 & 0xFF; in hfi1_init_dd()
15292 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */ in hfi1_init_dd()
15296 thermal_init(dd); in hfi1_init_dd()
15298 ret = init_cntrs(dd); in hfi1_init_dd()
15302 ret = init_rcverr(dd); in hfi1_init_dd()
15306 init_completion(&dd->user_comp); in hfi1_init_dd()
15309 refcount_set(&dd->user_refcount, 1); in hfi1_init_dd()
15314 free_rcverr(dd); in hfi1_init_dd()
15316 free_cntrs(dd); in hfi1_init_dd()
15318 hfi1_comp_vectors_clean_up(dd); in hfi1_init_dd()
15319 msix_clean_up_interrupts(dd); in hfi1_init_dd()
15321 hfi1_free_rx(dd); in hfi1_init_dd()
15322 hfi1_pcie_ddcleanup(dd); in hfi1_init_dd()
15324 hfi1_free_devdata(dd); in hfi1_init_dd()
15385 dd_dev_err((dd), \
15399 static int thermal_init(struct hfi1_devdata *dd) in thermal_init() argument
15403 if (dd->icode != ICODE_RTL_SILICON || in thermal_init()
15404 check_chip_resource(dd, CR_THERM_INIT, NULL)) in thermal_init()
15407 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); in thermal_init()
15409 THERM_FAILURE(dd, ret, "Acquire SBus"); in thermal_init()
15413 dd_dev_info(dd, "Initializing thermal sensor\n"); in thermal_init()
15415 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0); in thermal_init()
15419 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, in thermal_init()
15422 THERM_FAILURE(dd, ret, "Bus Reset"); in thermal_init()
15426 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, in thermal_init()
15429 THERM_FAILURE(dd, ret, "Therm Block Reset"); in thermal_init()
15433 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1, in thermal_init()
15436 THERM_FAILURE(dd, ret, "Write Clock Div"); in thermal_init()
15440 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3, in thermal_init()
15444 THERM_FAILURE(dd, ret, "Write Mode Sel"); in thermal_init()
15448 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, in thermal_init()
15451 THERM_FAILURE(dd, ret, "Write Reset Deassert"); in thermal_init()
15458 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1); in thermal_init()
15461 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0); in thermal_init()
15463 THERM_FAILURE(dd, ret, "Unable to set thermal init flag"); in thermal_init()
15466 release_chip_resource(dd, CR_SBUS); in thermal_init()
15470 static void handle_temp_err(struct hfi1_devdata *dd) in handle_temp_err() argument
15472 struct hfi1_pportdata *ppd = &dd->pport[0]; in handle_temp_err()
15478 dd_dev_emerg(dd, in handle_temp_err()
15480 dd->flags |= HFI1_FORCED_FREEZE; in handle_temp_err()
15495 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) | in handle_temp_err()
15501 dc_shutdown(dd); in handle_temp_err()