1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2017 Hisilicon Limited. 4 */ 5 6 #include "hisi_sas.h" 7 #define DRV_NAME "hisi_sas_v3_hw" 8 9 /* global registers need init */ 10 #define DLVRY_QUEUE_ENABLE 0x0 11 #define IOST_BASE_ADDR_LO 0x8 12 #define IOST_BASE_ADDR_HI 0xc 13 #define ITCT_BASE_ADDR_LO 0x10 14 #define ITCT_BASE_ADDR_HI 0x14 15 #define IO_BROKEN_MSG_ADDR_LO 0x18 16 #define IO_BROKEN_MSG_ADDR_HI 0x1c 17 #define PHY_CONTEXT 0x20 18 #define PHY_STATE 0x24 19 #define PHY_PORT_NUM_MA 0x28 20 #define PHY_CONN_RATE 0x30 21 #define ITCT_CLR 0x44 22 #define ITCT_CLR_EN_OFF 16 23 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) 24 #define ITCT_DEV_OFF 0 25 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) 26 #define SAS_AXI_USER3 0x50 27 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 28 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c 29 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 30 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 31 #define CFG_MAX_TAG 0x68 32 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 33 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 34 #define HGC_GET_ITV_TIME 0x90 35 #define DEVICE_MSG_WORK_MODE 0x94 36 #define OPENA_WT_CONTI_TIME 0x9c 37 #define I_T_NEXUS_LOSS_TIME 0xa0 38 #define MAX_CON_TIME_LIMIT_TIME 0xa4 39 #define BUS_INACTIVE_LIMIT_TIME 0xa8 40 #define REJECT_TO_OPEN_LIMIT_TIME 0xac 41 #define CQ_INT_CONVERGE_EN 0xb0 42 #define CFG_AGING_TIME 0xbc 43 #define HGC_DFX_CFG2 0xc0 44 #define CFG_ABT_SET_QUERY_IPTT 0xd4 45 #define CFG_SET_ABORTED_IPTT_OFF 0 46 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) 47 #define CFG_SET_ABORTED_EN_OFF 12 48 #define CFG_ABT_SET_IPTT_DONE 0xd8 49 #define CFG_ABT_SET_IPTT_DONE_OFF 0 50 #define HGC_IOMB_PROC1_STATUS 0x104 51 #define HGC_LM_DFX_STATUS2 0x128 52 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0 53 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \ 54 HGC_LM_DFX_STATUS2_IOSTLIST_OFF) 55 #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12 56 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \ 57 HGC_LM_DFX_STATUS2_ITCTLIST_OFF) 58 #define HGC_CQE_ECC_ADDR 0x13c 59 #define HGC_CQE_ECC_1B_ADDR_OFF 0 60 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF) 61 #define HGC_CQE_ECC_MB_ADDR_OFF 8 62 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF) 63 #define HGC_IOST_ECC_ADDR 0x140 64 #define HGC_IOST_ECC_1B_ADDR_OFF 0 65 #define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF) 66 #define HGC_IOST_ECC_MB_ADDR_OFF 16 67 #define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF) 68 #define HGC_DQE_ECC_ADDR 0x144 69 #define HGC_DQE_ECC_1B_ADDR_OFF 0 70 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF) 71 #define HGC_DQE_ECC_MB_ADDR_OFF 16 72 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) 73 #define CHNL_INT_STATUS 0x148 74 #define TAB_DFX 0x14c 75 #define HGC_ITCT_ECC_ADDR 0x150 76 #define HGC_ITCT_ECC_1B_ADDR_OFF 0 77 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ 78 HGC_ITCT_ECC_1B_ADDR_OFF) 79 #define HGC_ITCT_ECC_MB_ADDR_OFF 16 80 #define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \ 81 HGC_ITCT_ECC_MB_ADDR_OFF) 82 #define HGC_AXI_FIFO_ERR_INFO 0x154 83 #define AXI_ERR_INFO_OFF 0 84 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) 85 #define FIFO_ERR_INFO_OFF 8 86 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) 87 #define TAB_RD_TYPE 0x15c 88 #define INT_COAL_EN 0x19c 89 #define OQ_INT_COAL_TIME 0x1a0 90 #define OQ_INT_COAL_CNT 0x1a4 91 #define ENT_INT_COAL_TIME 0x1a8 92 #define ENT_INT_COAL_CNT 0x1ac 93 #define OQ_INT_SRC 0x1b0 94 #define OQ_INT_SRC_MSK 0x1b4 95 #define ENT_INT_SRC1 0x1b8 96 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 97 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) 98 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 99 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) 100 #define ENT_INT_SRC2 0x1bc 101 #define ENT_INT_SRC3 0x1c0 102 #define ENT_INT_SRC3_WP_DEPTH_OFF 8 103 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 104 #define ENT_INT_SRC3_RP_DEPTH_OFF 10 105 #define ENT_INT_SRC3_AXI_OFF 11 106 #define ENT_INT_SRC3_FIFO_OFF 12 107 #define ENT_INT_SRC3_LM_OFF 14 108 #define ENT_INT_SRC3_ITC_INT_OFF 15 109 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) 110 #define ENT_INT_SRC3_ABT_OFF 16 111 #define ENT_INT_SRC3_DQE_POISON_OFF 18 112 #define ENT_INT_SRC3_IOST_POISON_OFF 19 113 #define ENT_INT_SRC3_ITCT_POISON_OFF 20 114 #define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF 21 115 #define ENT_INT_SRC_MSK1 0x1c4 116 #define ENT_INT_SRC_MSK2 0x1c8 117 #define ENT_INT_SRC_MSK3 0x1cc 118 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 119 #define CHNL_PHYUPDOWN_INT_MSK 0x1d0 120 #define CHNL_ENT_INT_MSK 0x1d4 121 #define HGC_COM_INT_MSK 0x1d8 122 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) 123 #define SAS_ECC_INTR 0x1e8 124 #define SAS_ECC_INTR_DQE_ECC_1B_OFF 0 125 #define SAS_ECC_INTR_DQE_ECC_MB_OFF 1 126 #define SAS_ECC_INTR_IOST_ECC_1B_OFF 2 127 #define SAS_ECC_INTR_IOST_ECC_MB_OFF 3 128 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF 4 129 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF 5 130 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 6 131 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 7 132 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 8 133 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 9 134 #define SAS_ECC_INTR_CQE_ECC_1B_OFF 10 135 #define SAS_ECC_INTR_CQE_ECC_MB_OFF 11 136 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 12 137 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 13 138 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 14 139 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 15 140 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 16 141 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 17 142 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 18 143 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 19 144 #define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF 20 145 #define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF 21 146 #define SAS_ECC_INTR_MSK 0x1ec 147 #define HGC_ERR_STAT_EN 0x238 148 #define CQE_SEND_CNT 0x248 149 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 150 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 151 #define DLVRY_Q_0_DEPTH 0x268 152 #define DLVRY_Q_0_WR_PTR 0x26c 153 #define DLVRY_Q_0_RD_PTR 0x270 154 #define HYPER_STREAM_ID_EN_CFG 0xc80 155 #define OQ0_INT_SRC_MSK 0xc90 156 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0 157 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4 158 #define COMPL_Q_0_DEPTH 0x4e8 159 #define COMPL_Q_0_WR_PTR 0x4ec 160 #define COMPL_Q_0_RD_PTR 0x4f0 161 #define HGC_RXM_DFX_STATUS14 0xae8 162 #define HGC_RXM_DFX_STATUS14_MEM0_OFF 0 163 #define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \ 164 HGC_RXM_DFX_STATUS14_MEM0_OFF) 165 #define HGC_RXM_DFX_STATUS14_MEM1_OFF 9 166 #define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \ 167 HGC_RXM_DFX_STATUS14_MEM1_OFF) 168 #define HGC_RXM_DFX_STATUS14_MEM2_OFF 18 169 #define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \ 170 HGC_RXM_DFX_STATUS14_MEM2_OFF) 171 #define HGC_RXM_DFX_STATUS15 0xaec 172 #define HGC_RXM_DFX_STATUS15_MEM3_OFF 0 173 #define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \ 174 HGC_RXM_DFX_STATUS15_MEM3_OFF) 175 #define AWQOS_AWCACHE_CFG 0xc84 176 #define ARQOS_ARCACHE_CFG 0xc88 177 #define HILINK_ERR_DFX 0xe04 178 #define SAS_GPIO_CFG_0 0x1000 179 #define SAS_GPIO_CFG_1 0x1004 180 #define SAS_GPIO_TX_0_1 0x1040 181 #define SAS_CFG_DRIVE_VLD 0x1070 182 183 /* phy registers requiring init */ 184 #define PORT_BASE (0x2000) 185 #define PHY_CFG (PORT_BASE + 0x0) 186 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4) 187 #define PHY_CFG_ENA_OFF 0 188 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) 189 #define PHY_CFG_DC_OPT_OFF 2 190 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) 191 #define PHY_CFG_PHY_RST_OFF 3 192 #define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF) 193 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) 194 #define CFG_PROG_PHY_LINK_RATE_OFF 0 195 #define CFG_PROG_PHY_LINK_RATE_MSK (0xff << CFG_PROG_PHY_LINK_RATE_OFF) 196 #define CFG_PROG_OOB_PHY_LINK_RATE_OFF 8 197 #define CFG_PROG_OOB_PHY_LINK_RATE_MSK (0xf << CFG_PROG_OOB_PHY_LINK_RATE_OFF) 198 #define PHY_CTRL (PORT_BASE + 0x14) 199 #define PHY_CTRL_RESET_OFF 0 200 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) 201 #define CMD_HDR_PIR_OFF 8 202 #define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF) 203 #define SERDES_CFG (PORT_BASE + 0x1c) 204 #define CFG_ALOS_CHK_DISABLE_OFF 9 205 #define CFG_ALOS_CHK_DISABLE_MSK (0x1 << CFG_ALOS_CHK_DISABLE_OFF) 206 #define SAS_PHY_BIST_CTRL (PORT_BASE + 0x2c) 207 #define CFG_BIST_MODE_SEL_OFF 0 208 #define CFG_BIST_MODE_SEL_MSK (0xf << CFG_BIST_MODE_SEL_OFF) 209 #define CFG_LOOP_TEST_MODE_OFF 14 210 #define CFG_LOOP_TEST_MODE_MSK (0x3 << CFG_LOOP_TEST_MODE_OFF) 211 #define CFG_RX_BIST_EN_OFF 16 212 #define CFG_RX_BIST_EN_MSK (0x1 << CFG_RX_BIST_EN_OFF) 213 #define CFG_TX_BIST_EN_OFF 17 214 #define CFG_TX_BIST_EN_MSK (0x1 << CFG_TX_BIST_EN_OFF) 215 #define CFG_BIST_TEST_OFF 18 216 #define CFG_BIST_TEST_MSK (0x1 << CFG_BIST_TEST_OFF) 217 #define SAS_PHY_BIST_CODE (PORT_BASE + 0x30) 218 #define SAS_PHY_BIST_CODE1 (PORT_BASE + 0x34) 219 #define SAS_BIST_ERR_CNT (PORT_BASE + 0x38) 220 #define SL_CFG (PORT_BASE + 0x84) 221 #define AIP_LIMIT (PORT_BASE + 0x90) 222 #define SL_CONTROL (PORT_BASE + 0x94) 223 #define SL_CONTROL_NOTIFY_EN_OFF 0 224 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) 225 #define SL_CTA_OFF 17 226 #define SL_CTA_MSK (0x1 << SL_CTA_OFF) 227 #define RX_PRIMS_STATUS (PORT_BASE + 0x98) 228 #define RX_BCAST_CHG_OFF 1 229 #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) 230 #define TX_ID_DWORD0 (PORT_BASE + 0x9c) 231 #define TX_ID_DWORD1 (PORT_BASE + 0xa0) 232 #define TX_ID_DWORD2 (PORT_BASE + 0xa4) 233 #define TX_ID_DWORD3 (PORT_BASE + 0xa8) 234 #define TX_ID_DWORD4 (PORT_BASE + 0xaC) 235 #define TX_ID_DWORD5 (PORT_BASE + 0xb0) 236 #define TX_ID_DWORD6 (PORT_BASE + 0xb4) 237 #define TXID_AUTO (PORT_BASE + 0xb8) 238 #define CT3_OFF 1 239 #define CT3_MSK (0x1 << CT3_OFF) 240 #define TX_HARDRST_OFF 2 241 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) 242 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) 243 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) 244 #define STP_LINK_TIMER (PORT_BASE + 0x120) 245 #define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124) 246 #define CON_CFG_DRIVER (PORT_BASE + 0x130) 247 #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) 248 #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) 249 #define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c) 250 #define CHL_INT0 (PORT_BASE + 0x1b4) 251 #define CHL_INT0_HOTPLUG_TOUT_OFF 0 252 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) 253 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1 254 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) 255 #define CHL_INT0_SL_PHY_ENABLE_OFF 2 256 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) 257 #define CHL_INT0_NOT_RDY_OFF 4 258 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) 259 #define CHL_INT0_PHY_RDY_OFF 5 260 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) 261 #define CHL_INT1 (PORT_BASE + 0x1b8) 262 #define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF 15 263 #define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF 16 264 #define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF 17 265 #define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF 18 266 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19 267 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20 268 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21 269 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 270 #define CHL_INT1_DMAC_TX_FIFO_ERR_OFF 23 271 #define CHL_INT1_DMAC_RX_FIFO_ERR_OFF 24 272 #define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF 26 273 #define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF 27 274 #define CHL_INT2 (PORT_BASE + 0x1bc) 275 #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 276 #define CHL_INT2_RX_DISP_ERR_OFF 28 277 #define CHL_INT2_RX_CODE_ERR_OFF 29 278 #define CHL_INT2_RX_INVLD_DW_OFF 30 279 #define CHL_INT2_STP_LINK_TIMEOUT_OFF 31 280 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) 281 #define CHL_INT1_MSK (PORT_BASE + 0x1c4) 282 #define CHL_INT2_MSK (PORT_BASE + 0x1c8) 283 #define SAS_EC_INT_COAL_TIME (PORT_BASE + 0x1cc) 284 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) 285 #define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4) 286 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) 287 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) 288 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) 289 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) 290 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) 291 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) 292 #define DMA_TX_STATUS (PORT_BASE + 0x2d0) 293 #define DMA_TX_STATUS_BUSY_OFF 0 294 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) 295 #define DMA_RX_STATUS (PORT_BASE + 0x2e8) 296 #define DMA_RX_STATUS_BUSY_OFF 0 297 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) 298 299 #define COARSETUNE_TIME (PORT_BASE + 0x304) 300 #define TXDEEMPH_G1 (PORT_BASE + 0x350) 301 #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380) 302 #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384) 303 #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390) 304 #define ERR_CNT_CODE_ERR (PORT_BASE + 0x394) 305 #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398) 306 307 #define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */ 308 #if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW) 309 #error Max ITCT exceeded 310 #endif 311 312 #define AXI_MASTER_CFG_BASE (0x5000) 313 #define AM_CTRL_GLOBAL (0x0) 314 #define AM_CTRL_SHUTDOWN_REQ_OFF 0 315 #define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF) 316 #define AM_CURR_TRANS_RETURN (0x150) 317 318 #define AM_CFG_MAX_TRANS (0x5010) 319 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) 320 #define AXI_CFG (0x5100) 321 #define AM_ROB_ECC_ERR_ADDR (0x510c) 322 #define AM_ROB_ECC_ERR_ADDR_OFF 0 323 #define AM_ROB_ECC_ERR_ADDR_MSK 0xffffffff 324 325 /* RAS registers need init */ 326 #define RAS_BASE (0x6000) 327 #define SAS_RAS_INTR0 (RAS_BASE) 328 #define SAS_RAS_INTR1 (RAS_BASE + 0x04) 329 #define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08) 330 #define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c) 331 #define CFG_SAS_RAS_INTR_MASK (RAS_BASE + 0x1c) 332 #define SAS_RAS_INTR2 (RAS_BASE + 0x20) 333 #define SAS_RAS_INTR2_MASK (RAS_BASE + 0x24) 334 335 /* HW dma structures */ 336 /* Delivery queue header */ 337 /* dw0 */ 338 #define CMD_HDR_ABORT_FLAG_OFF 0 339 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) 340 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 341 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) 342 #define CMD_HDR_RESP_REPORT_OFF 5 343 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) 344 #define CMD_HDR_TLR_CTRL_OFF 6 345 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) 346 #define CMD_HDR_PORT_OFF 18 347 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) 348 #define CMD_HDR_PRIORITY_OFF 27 349 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) 350 #define CMD_HDR_CMD_OFF 29 351 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) 352 /* dw1 */ 353 #define CMD_HDR_UNCON_CMD_OFF 3 354 #define CMD_HDR_DIR_OFF 5 355 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) 356 #define CMD_HDR_RESET_OFF 7 357 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) 358 #define CMD_HDR_VDTL_OFF 10 359 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) 360 #define CMD_HDR_FRAME_TYPE_OFF 11 361 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) 362 #define CMD_HDR_DEV_ID_OFF 16 363 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) 364 /* dw2 */ 365 #define CMD_HDR_CFL_OFF 0 366 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) 367 #define CMD_HDR_NCQ_TAG_OFF 10 368 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) 369 #define CMD_HDR_MRFL_OFF 15 370 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) 371 #define CMD_HDR_SG_MOD_OFF 24 372 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) 373 /* dw3 */ 374 #define CMD_HDR_IPTT_OFF 0 375 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) 376 /* dw6 */ 377 #define CMD_HDR_DIF_SGL_LEN_OFF 0 378 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) 379 #define CMD_HDR_DATA_SGL_LEN_OFF 16 380 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) 381 /* dw7 */ 382 #define CMD_HDR_ADDR_MODE_SEL_OFF 15 383 #define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF) 384 #define CMD_HDR_ABORT_IPTT_OFF 16 385 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) 386 387 /* Completion header */ 388 /* dw0 */ 389 #define CMPLT_HDR_CMPLT_OFF 0 390 #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF) 391 #define CMPLT_HDR_ERROR_PHASE_OFF 2 392 #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF) 393 #define CMPLT_HDR_RSPNS_XFRD_OFF 10 394 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) 395 #define CMPLT_HDR_ERX_OFF 12 396 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) 397 #define CMPLT_HDR_ABORT_STAT_OFF 13 398 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) 399 /* abort_stat */ 400 #define STAT_IO_NOT_VALID 0x1 401 #define STAT_IO_NO_DEVICE 0x2 402 #define STAT_IO_COMPLETE 0x3 403 #define STAT_IO_ABORTED 0x4 404 /* dw1 */ 405 #define CMPLT_HDR_IPTT_OFF 0 406 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) 407 #define CMPLT_HDR_DEV_ID_OFF 16 408 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) 409 /* dw3 */ 410 #define CMPLT_HDR_IO_IN_TARGET_OFF 17 411 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF) 412 413 /* ITCT header */ 414 /* qw0 */ 415 #define ITCT_HDR_DEV_TYPE_OFF 0 416 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) 417 #define ITCT_HDR_VALID_OFF 2 418 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) 419 #define ITCT_HDR_MCR_OFF 5 420 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) 421 #define ITCT_HDR_VLN_OFF 9 422 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) 423 #define ITCT_HDR_SMP_TIMEOUT_OFF 16 424 #define ITCT_HDR_AWT_CONTINUE_OFF 25 425 #define ITCT_HDR_PORT_ID_OFF 28 426 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) 427 /* qw2 */ 428 #define ITCT_HDR_INLT_OFF 0 429 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) 430 #define ITCT_HDR_RTOLT_OFF 48 431 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) 432 433 struct hisi_sas_protect_iu_v3_hw { 434 u32 dw0; 435 u32 lbrtcv; 436 u32 lbrtgv; 437 u32 dw3; 438 u32 dw4; 439 u32 dw5; 440 u32 rsv; 441 }; 442 443 struct hisi_sas_complete_v3_hdr { 444 __le32 dw0; 445 __le32 dw1; 446 __le32 act; 447 __le32 dw3; 448 }; 449 450 struct hisi_sas_err_record_v3 { 451 /* dw0 */ 452 __le32 trans_tx_fail_type; 453 454 /* dw1 */ 455 __le32 trans_rx_fail_type; 456 457 /* dw2 */ 458 __le16 dma_tx_err_type; 459 __le16 sipc_rx_err_type; 460 461 /* dw3 */ 462 __le32 dma_rx_err_type; 463 }; 464 465 #define RX_DATA_LEN_UNDERFLOW_OFF 6 466 #define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF) 467 468 #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096 469 #define HISI_SAS_MSI_COUNT_V3_HW 32 470 471 #define DIR_NO_DATA 0 472 #define DIR_TO_INI 1 473 #define DIR_TO_DEVICE 2 474 #define DIR_RESERVED 3 475 476 #define FIS_CMD_IS_UNCONSTRAINED(fis) \ 477 ((fis.command == ATA_CMD_READ_LOG_EXT) || \ 478 (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \ 479 ((fis.command == ATA_CMD_DEV_RESET) && \ 480 ((fis.control & ATA_SRST) != 0))) 481 482 #define T10_INSRT_EN_OFF 0 483 #define T10_INSRT_EN_MSK (1 << T10_INSRT_EN_OFF) 484 #define T10_RMV_EN_OFF 1 485 #define T10_RMV_EN_MSK (1 << T10_RMV_EN_OFF) 486 #define T10_RPLC_EN_OFF 2 487 #define T10_RPLC_EN_MSK (1 << T10_RPLC_EN_OFF) 488 #define T10_CHK_EN_OFF 3 489 #define T10_CHK_EN_MSK (1 << T10_CHK_EN_OFF) 490 #define INCR_LBRT_OFF 5 491 #define INCR_LBRT_MSK (1 << INCR_LBRT_OFF) 492 #define USR_DATA_BLOCK_SZ_OFF 20 493 #define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF) 494 #define T10_CHK_MSK_OFF 16 495 #define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF) 496 #define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF) 497 498 #define BASE_VECTORS_V3_HW 16 499 #define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1) 500 501 #define CHNL_INT_STS_MSK 0xeeeeeeee 502 #define CHNL_INT_STS_PHY_MSK 0xe 503 #define CHNL_INT_STS_INT0_MSK BIT(1) 504 #define CHNL_INT_STS_INT1_MSK BIT(2) 505 #define CHNL_INT_STS_INT2_MSK BIT(3) 506 #define CHNL_WIDTH 4 507 508 enum { 509 DSM_FUNC_ERR_HANDLE_MSI = 0, 510 }; 511 512 static bool hisi_sas_intr_conv; 513 MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)"); 514 515 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ 516 static int prot_mask; 517 module_param(prot_mask, int, 0); 518 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 "); 519 520 static bool auto_affine_msi_experimental; 521 module_param(auto_affine_msi_experimental, bool, 0444); 522 MODULE_PARM_DESC(auto_affine_msi_experimental, "Enable auto-affinity of MSI IRQs as experimental:\n" 523 "default is off"); 524 525 static void debugfs_work_handler_v3_hw(struct work_struct *work); 526 527 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) 528 { 529 void __iomem *regs = hisi_hba->regs + off; 530 531 return readl(regs); 532 } 533 534 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) 535 { 536 void __iomem *regs = hisi_hba->regs + off; 537 538 writel(val, regs); 539 } 540 541 static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, 542 u32 off, u32 val) 543 { 544 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 545 546 writel(val, regs); 547 } 548 549 static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, 550 int phy_no, u32 off) 551 { 552 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 553 554 return readl(regs); 555 } 556 557 #define hisi_sas_read32_poll_timeout(off, val, cond, delay_us, \ 558 timeout_us) \ 559 ({ \ 560 void __iomem *regs = hisi_hba->regs + off; \ 561 readl_poll_timeout(regs, val, cond, delay_us, timeout_us); \ 562 }) 563 564 #define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us, \ 565 timeout_us) \ 566 ({ \ 567 void __iomem *regs = hisi_hba->regs + off; \ 568 readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\ 569 }) 570 571 static void init_reg_v3_hw(struct hisi_hba *hisi_hba) 572 { 573 int i, j; 574 575 /* Global registers init */ 576 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 577 (u32)((1ULL << hisi_hba->queue_count) - 1)); 578 hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0); 579 hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); 580 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); 581 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); 582 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); 583 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); 584 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); 585 hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN, 586 hisi_sas_intr_conv); 587 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); 588 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); 589 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); 590 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); 591 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); 592 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); 593 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff); 594 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); 595 hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); 596 hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); 597 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555); 598 hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0); 599 hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0); 600 for (i = 0; i < hisi_hba->queue_count; i++) 601 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0); 602 603 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); 604 605 for (i = 0; i < hisi_hba->n_phy; i++) { 606 enum sas_linkrate max; 607 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 608 struct asd_sas_phy *sas_phy = &phy->sas_phy; 609 u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, i, 610 PROG_PHY_LINK_RATE); 611 612 prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK; 613 if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate < 614 SAS_LINK_RATE_1_5_GBPS)) 615 max = SAS_LINK_RATE_12_0_GBPS; 616 else 617 max = sas_phy->phy->maximum_linkrate; 618 prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); 619 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 620 prog_phy_link_rate); 621 hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00); 622 hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80); 623 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); 624 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 625 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); 626 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 627 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff); 628 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe); 629 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); 630 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); 631 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); 632 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); 633 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); 634 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); 635 hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); 636 hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); 637 hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32); 638 hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME, 639 0x30f4240); 640 /* used for 12G negotiate */ 641 hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); 642 hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff); 643 644 /* get default FFE configuration for BIST */ 645 for (j = 0; j < FFE_CFG_MAX; j++) { 646 u32 val = hisi_sas_phy_read32(hisi_hba, i, 647 TXDEEMPH_G1 + (j * 0x4)); 648 hisi_hba->debugfs_bist_ffe[i][j] = val; 649 } 650 } 651 652 for (i = 0; i < hisi_hba->queue_count; i++) { 653 /* Delivery queue */ 654 hisi_sas_write32(hisi_hba, 655 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), 656 upper_32_bits(hisi_hba->cmd_hdr_dma[i])); 657 658 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), 659 lower_32_bits(hisi_hba->cmd_hdr_dma[i])); 660 661 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), 662 HISI_SAS_QUEUE_SLOTS); 663 664 /* Completion queue */ 665 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), 666 upper_32_bits(hisi_hba->complete_hdr_dma[i])); 667 668 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), 669 lower_32_bits(hisi_hba->complete_hdr_dma[i])); 670 671 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), 672 HISI_SAS_QUEUE_SLOTS); 673 } 674 675 /* itct */ 676 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, 677 lower_32_bits(hisi_hba->itct_dma)); 678 679 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, 680 upper_32_bits(hisi_hba->itct_dma)); 681 682 /* iost */ 683 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, 684 lower_32_bits(hisi_hba->iost_dma)); 685 686 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, 687 upper_32_bits(hisi_hba->iost_dma)); 688 689 /* breakpoint */ 690 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, 691 lower_32_bits(hisi_hba->breakpoint_dma)); 692 693 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, 694 upper_32_bits(hisi_hba->breakpoint_dma)); 695 696 /* SATA broken msg */ 697 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, 698 lower_32_bits(hisi_hba->sata_breakpoint_dma)); 699 700 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, 701 upper_32_bits(hisi_hba->sata_breakpoint_dma)); 702 703 /* SATA initial fis */ 704 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, 705 lower_32_bits(hisi_hba->initial_fis_dma)); 706 707 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, 708 upper_32_bits(hisi_hba->initial_fis_dma)); 709 710 /* RAS registers init */ 711 hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0); 712 hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0); 713 hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0); 714 hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0); 715 716 /* LED registers init */ 717 hisi_sas_write32(hisi_hba, SAS_CFG_DRIVE_VLD, 0x80000ff); 718 hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1, 0x80808080); 719 hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1 + 0x4, 0x80808080); 720 /* Configure blink generator rate A to 1Hz and B to 4Hz */ 721 hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_1, 0x121700); 722 hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_0, 0x800000); 723 } 724 725 static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 726 { 727 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 728 729 cfg &= ~PHY_CFG_DC_OPT_MSK; 730 cfg |= 1 << PHY_CFG_DC_OPT_OFF; 731 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 732 } 733 734 static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 735 { 736 struct sas_identify_frame identify_frame; 737 u32 *identify_buffer; 738 739 memset(&identify_frame, 0, sizeof(identify_frame)); 740 identify_frame.dev_type = SAS_END_DEVICE; 741 identify_frame.frame_type = 0; 742 identify_frame._un1 = 1; 743 identify_frame.initiator_bits = SAS_PROTOCOL_ALL; 744 identify_frame.target_bits = SAS_PROTOCOL_NONE; 745 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 746 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 747 identify_frame.phy_id = phy_no; 748 identify_buffer = (u32 *)(&identify_frame); 749 750 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, 751 __swab32(identify_buffer[0])); 752 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, 753 __swab32(identify_buffer[1])); 754 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, 755 __swab32(identify_buffer[2])); 756 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, 757 __swab32(identify_buffer[3])); 758 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, 759 __swab32(identify_buffer[4])); 760 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, 761 __swab32(identify_buffer[5])); 762 } 763 764 static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, 765 struct hisi_sas_device *sas_dev) 766 { 767 struct domain_device *device = sas_dev->sas_device; 768 struct device *dev = hisi_hba->dev; 769 u64 qw0, device_id = sas_dev->device_id; 770 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; 771 struct domain_device *parent_dev = device->parent; 772 struct asd_sas_port *sas_port = device->port; 773 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 774 u64 sas_addr; 775 776 memset(itct, 0, sizeof(*itct)); 777 778 /* qw0 */ 779 qw0 = 0; 780 switch (sas_dev->dev_type) { 781 case SAS_END_DEVICE: 782 case SAS_EDGE_EXPANDER_DEVICE: 783 case SAS_FANOUT_EXPANDER_DEVICE: 784 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; 785 break; 786 case SAS_SATA_DEV: 787 case SAS_SATA_PENDING: 788 if (parent_dev && dev_is_expander(parent_dev->dev_type)) 789 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; 790 else 791 qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; 792 break; 793 default: 794 dev_warn(dev, "setup itct: unsupported dev type (%d)\n", 795 sas_dev->dev_type); 796 } 797 798 qw0 |= ((1 << ITCT_HDR_VALID_OFF) | 799 (device->linkrate << ITCT_HDR_MCR_OFF) | 800 (1 << ITCT_HDR_VLN_OFF) | 801 (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) | 802 (1 << ITCT_HDR_AWT_CONTINUE_OFF) | 803 (port->id << ITCT_HDR_PORT_ID_OFF)); 804 itct->qw0 = cpu_to_le64(qw0); 805 806 /* qw1 */ 807 memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE); 808 itct->sas_addr = cpu_to_le64(__swab64(sas_addr)); 809 810 /* qw2 */ 811 if (!dev_is_sata(device)) 812 itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | 813 (0x1ULL << ITCT_HDR_RTOLT_OFF)); 814 } 815 816 static int clear_itct_v3_hw(struct hisi_hba *hisi_hba, 817 struct hisi_sas_device *sas_dev) 818 { 819 DECLARE_COMPLETION_ONSTACK(completion); 820 u64 dev_id = sas_dev->device_id; 821 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; 822 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 823 struct device *dev = hisi_hba->dev; 824 825 sas_dev->completion = &completion; 826 827 /* clear the itct interrupt state */ 828 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) 829 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 830 ENT_INT_SRC3_ITC_INT_MSK); 831 832 /* clear the itct table */ 833 reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); 834 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); 835 836 if (!wait_for_completion_timeout(sas_dev->completion, 837 CLEAR_ITCT_TIMEOUT * HZ)) { 838 dev_warn(dev, "failed to clear ITCT\n"); 839 return -ETIMEDOUT; 840 } 841 842 memset(itct, 0, sizeof(struct hisi_sas_itct)); 843 return 0; 844 } 845 846 static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, 847 struct domain_device *device) 848 { 849 struct hisi_sas_slot *slot, *slot2; 850 struct hisi_sas_device *sas_dev = device->lldd_dev; 851 u32 cfg_abt_set_query_iptt; 852 853 cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba, 854 CFG_ABT_SET_QUERY_IPTT); 855 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) { 856 cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK; 857 cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) | 858 (slot->idx << CFG_SET_ABORTED_IPTT_OFF); 859 hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, 860 cfg_abt_set_query_iptt); 861 } 862 cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF); 863 hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, 864 cfg_abt_set_query_iptt); 865 hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE, 866 1 << CFG_ABT_SET_IPTT_DONE_OFF); 867 } 868 869 static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) 870 { 871 struct device *dev = hisi_hba->dev; 872 int ret; 873 u32 val; 874 875 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 876 877 /* Disable all of the PHYs */ 878 hisi_sas_stop_phys(hisi_hba); 879 udelay(50); 880 881 /* Ensure axi bus idle */ 882 ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val, 883 20000, 1000000); 884 if (ret) { 885 dev_err(dev, "axi bus is not idle, ret = %d!\n", ret); 886 return -EIO; 887 } 888 889 if (ACPI_HANDLE(dev)) { 890 acpi_status s; 891 892 s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); 893 if (ACPI_FAILURE(s)) { 894 dev_err(dev, "Reset failed\n"); 895 return -EIO; 896 } 897 } else { 898 dev_err(dev, "no reset method!\n"); 899 return -EINVAL; 900 } 901 902 return 0; 903 } 904 905 static int hw_init_v3_hw(struct hisi_hba *hisi_hba) 906 { 907 struct device *dev = hisi_hba->dev; 908 struct acpi_device *acpi_dev; 909 union acpi_object *obj; 910 guid_t guid; 911 int rc; 912 913 rc = reset_hw_v3_hw(hisi_hba); 914 if (rc) { 915 dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc); 916 return rc; 917 } 918 919 msleep(100); 920 init_reg_v3_hw(hisi_hba); 921 922 if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) { 923 dev_err(dev, "Parse GUID failed\n"); 924 return -EINVAL; 925 } 926 927 /* 928 * This DSM handles some hardware-related configurations: 929 * 1. Switch over to MSI error handling in kernel 930 * 2. BIOS *may* reset some register values through this method 931 */ 932 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0, 933 DSM_FUNC_ERR_HANDLE_MSI, NULL); 934 if (!obj) 935 dev_warn(dev, "can not find DSM method, ignore\n"); 936 else 937 ACPI_FREE(obj); 938 939 acpi_dev = ACPI_COMPANION(dev); 940 if (!acpi_device_power_manageable(acpi_dev)) 941 dev_notice(dev, "neither _PS0 nor _PR0 is defined\n"); 942 return 0; 943 } 944 945 static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 946 { 947 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 948 949 cfg |= PHY_CFG_ENA_MSK; 950 cfg &= ~PHY_CFG_PHY_RST_MSK; 951 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 952 } 953 954 static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 955 { 956 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 957 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); 958 static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | 959 BIT(CHL_INT2_RX_CODE_ERR_OFF) | 960 BIT(CHL_INT2_RX_INVLD_DW_OFF); 961 u32 state; 962 963 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, msk | irq_msk); 964 965 cfg &= ~PHY_CFG_ENA_MSK; 966 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 967 968 mdelay(50); 969 970 state = hisi_sas_read32(hisi_hba, PHY_STATE); 971 if (state & BIT(phy_no)) { 972 cfg |= PHY_CFG_PHY_RST_MSK; 973 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 974 } 975 976 udelay(1); 977 978 hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); 979 hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); 980 hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); 981 982 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, msk); 983 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, irq_msk); 984 } 985 986 static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 987 { 988 config_id_frame_v3_hw(hisi_hba, phy_no); 989 config_phy_opt_mode_v3_hw(hisi_hba, phy_no); 990 enable_phy_v3_hw(hisi_hba, phy_no); 991 } 992 993 static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 994 { 995 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 996 u32 txid_auto; 997 998 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 999 if (phy->identify.device_type == SAS_END_DEVICE) { 1000 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 1001 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 1002 txid_auto | TX_HARDRST_MSK); 1003 } 1004 msleep(100); 1005 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1006 } 1007 1008 static enum sas_linkrate phy_get_max_linkrate_v3_hw(void) 1009 { 1010 return SAS_LINK_RATE_12_0_GBPS; 1011 } 1012 1013 static void phys_init_v3_hw(struct hisi_hba *hisi_hba) 1014 { 1015 int i; 1016 1017 for (i = 0; i < hisi_hba->n_phy; i++) { 1018 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 1019 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1020 1021 if (!sas_phy->phy->enabled) 1022 continue; 1023 1024 hisi_sas_phy_enable(hisi_hba, i, 1); 1025 } 1026 } 1027 1028 static void sl_notify_ssp_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1029 { 1030 u32 sl_control; 1031 1032 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1033 sl_control |= SL_CONTROL_NOTIFY_EN_MSK; 1034 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 1035 msleep(1); 1036 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1037 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; 1038 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 1039 } 1040 1041 static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) 1042 { 1043 int i, bitmap = 0; 1044 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 1045 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1046 1047 for (i = 0; i < hisi_hba->n_phy; i++) 1048 if (phy_state & BIT(i)) 1049 if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) 1050 bitmap |= BIT(i); 1051 1052 return bitmap; 1053 } 1054 1055 static void start_delivery_v3_hw(struct hisi_sas_dq *dq) 1056 { 1057 struct hisi_hba *hisi_hba = dq->hisi_hba; 1058 struct hisi_sas_slot *s, *s1, *s2 = NULL; 1059 int dlvry_queue = dq->id; 1060 int wp; 1061 1062 list_for_each_entry_safe(s, s1, &dq->list, delivery) { 1063 if (!s->ready) 1064 break; 1065 s2 = s; 1066 list_del(&s->delivery); 1067 } 1068 1069 if (!s2) 1070 return; 1071 1072 /* 1073 * Ensure that memories for slots built on other CPUs is observed. 1074 */ 1075 smp_rmb(); 1076 wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; 1077 1078 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); 1079 } 1080 1081 static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, 1082 struct hisi_sas_slot *slot, 1083 struct hisi_sas_cmd_hdr *hdr, 1084 struct scatterlist *scatter, 1085 int n_elem) 1086 { 1087 struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); 1088 struct scatterlist *sg; 1089 int i; 1090 1091 for_each_sg(scatter, sg, n_elem, i) { 1092 struct hisi_sas_sge *entry = &sge_page->sge[i]; 1093 1094 entry->addr = cpu_to_le64(sg_dma_address(sg)); 1095 entry->page_ctrl_0 = entry->page_ctrl_1 = 0; 1096 entry->data_len = cpu_to_le32(sg_dma_len(sg)); 1097 entry->data_off = 0; 1098 } 1099 1100 hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); 1101 1102 hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); 1103 } 1104 1105 static void prep_prd_sge_dif_v3_hw(struct hisi_hba *hisi_hba, 1106 struct hisi_sas_slot *slot, 1107 struct hisi_sas_cmd_hdr *hdr, 1108 struct scatterlist *scatter, 1109 int n_elem) 1110 { 1111 struct hisi_sas_sge_dif_page *sge_dif_page; 1112 struct scatterlist *sg; 1113 int i; 1114 1115 sge_dif_page = hisi_sas_sge_dif_addr_mem(slot); 1116 1117 for_each_sg(scatter, sg, n_elem, i) { 1118 struct hisi_sas_sge *entry = &sge_dif_page->sge[i]; 1119 1120 entry->addr = cpu_to_le64(sg_dma_address(sg)); 1121 entry->page_ctrl_0 = 0; 1122 entry->page_ctrl_1 = 0; 1123 entry->data_len = cpu_to_le32(sg_dma_len(sg)); 1124 entry->data_off = 0; 1125 } 1126 1127 hdr->dif_prd_table_addr = 1128 cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot)); 1129 1130 hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DIF_SGL_LEN_OFF); 1131 } 1132 1133 static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd) 1134 { 1135 unsigned char prot_flags = scsi_cmnd->prot_flags; 1136 1137 if (prot_flags & SCSI_PROT_REF_CHECK) 1138 return T10_CHK_APP_TAG_MSK; 1139 return T10_CHK_REF_TAG_MSK | T10_CHK_APP_TAG_MSK; 1140 } 1141 1142 static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd, 1143 struct hisi_sas_protect_iu_v3_hw *prot) 1144 { 1145 unsigned char prot_op = scsi_get_prot_op(scsi_cmnd); 1146 unsigned int interval = scsi_prot_interval(scsi_cmnd); 1147 u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request); 1148 1149 switch (prot_op) { 1150 case SCSI_PROT_READ_INSERT: 1151 prot->dw0 |= T10_INSRT_EN_MSK; 1152 prot->lbrtgv = lbrt_chk_val; 1153 break; 1154 case SCSI_PROT_READ_STRIP: 1155 prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); 1156 prot->lbrtcv = lbrt_chk_val; 1157 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); 1158 break; 1159 case SCSI_PROT_READ_PASS: 1160 prot->dw0 |= T10_CHK_EN_MSK; 1161 prot->lbrtcv = lbrt_chk_val; 1162 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); 1163 break; 1164 case SCSI_PROT_WRITE_INSERT: 1165 prot->dw0 |= T10_INSRT_EN_MSK; 1166 prot->lbrtgv = lbrt_chk_val; 1167 break; 1168 case SCSI_PROT_WRITE_STRIP: 1169 prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); 1170 prot->lbrtcv = lbrt_chk_val; 1171 break; 1172 case SCSI_PROT_WRITE_PASS: 1173 prot->dw0 |= T10_CHK_EN_MSK; 1174 prot->lbrtcv = lbrt_chk_val; 1175 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); 1176 break; 1177 default: 1178 WARN(1, "prot_op(0x%x) is not valid\n", prot_op); 1179 break; 1180 } 1181 1182 switch (interval) { 1183 case 512: 1184 break; 1185 case 4096: 1186 prot->dw0 |= (0x1 << USR_DATA_BLOCK_SZ_OFF); 1187 break; 1188 case 520: 1189 prot->dw0 |= (0x2 << USR_DATA_BLOCK_SZ_OFF); 1190 break; 1191 default: 1192 WARN(1, "protection interval (0x%x) invalid\n", 1193 interval); 1194 break; 1195 } 1196 1197 prot->dw0 |= INCR_LBRT_MSK; 1198 } 1199 1200 static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, 1201 struct hisi_sas_slot *slot) 1202 { 1203 struct sas_task *task = slot->task; 1204 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1205 struct domain_device *device = task->dev; 1206 struct hisi_sas_device *sas_dev = device->lldd_dev; 1207 struct hisi_sas_port *port = slot->port; 1208 struct sas_ssp_task *ssp_task = &task->ssp_task; 1209 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 1210 struct hisi_sas_tmf_task *tmf = slot->tmf; 1211 int has_data = 0, priority = !!tmf; 1212 unsigned char prot_op; 1213 u8 *buf_cmd; 1214 u32 dw1 = 0, dw2 = 0, len = 0; 1215 1216 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | 1217 (2 << CMD_HDR_TLR_CTRL_OFF) | 1218 (port->id << CMD_HDR_PORT_OFF) | 1219 (priority << CMD_HDR_PRIORITY_OFF) | 1220 (1 << CMD_HDR_CMD_OFF)); /* ssp */ 1221 1222 dw1 = 1 << CMD_HDR_VDTL_OFF; 1223 if (tmf) { 1224 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; 1225 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; 1226 } else { 1227 prot_op = scsi_get_prot_op(scsi_cmnd); 1228 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; 1229 switch (scsi_cmnd->sc_data_direction) { 1230 case DMA_TO_DEVICE: 1231 has_data = 1; 1232 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1233 break; 1234 case DMA_FROM_DEVICE: 1235 has_data = 1; 1236 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1237 break; 1238 default: 1239 dw1 &= ~CMD_HDR_DIR_MSK; 1240 } 1241 } 1242 1243 /* map itct entry */ 1244 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1245 1246 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) 1247 + 3) / 4) << CMD_HDR_CFL_OFF) | 1248 ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | 1249 (2 << CMD_HDR_SG_MOD_OFF); 1250 hdr->dw2 = cpu_to_le32(dw2); 1251 hdr->transfer_tags = cpu_to_le32(slot->idx); 1252 1253 if (has_data) { 1254 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, 1255 slot->n_elem); 1256 1257 if (scsi_prot_sg_count(scsi_cmnd)) 1258 prep_prd_sge_dif_v3_hw(hisi_hba, slot, hdr, 1259 scsi_prot_sglist(scsi_cmnd), 1260 slot->n_elem_dif); 1261 } 1262 1263 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1264 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1265 1266 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + 1267 sizeof(struct ssp_frame_hdr); 1268 1269 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 1270 if (!tmf) { 1271 buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3); 1272 memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 1273 } else { 1274 buf_cmd[10] = tmf->tmf; 1275 switch (tmf->tmf) { 1276 case TMF_ABORT_TASK: 1277 case TMF_QUERY_TASK: 1278 buf_cmd[12] = 1279 (tmf->tag_of_task_to_be_managed >> 8) & 0xff; 1280 buf_cmd[13] = 1281 tmf->tag_of_task_to_be_managed & 0xff; 1282 break; 1283 default: 1284 break; 1285 } 1286 } 1287 1288 if (has_data && (prot_op != SCSI_PROT_NORMAL)) { 1289 struct hisi_sas_protect_iu_v3_hw prot; 1290 u8 *buf_cmd_prot; 1291 1292 hdr->dw7 |= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF); 1293 dw1 |= CMD_HDR_PIR_MSK; 1294 buf_cmd_prot = hisi_sas_cmd_hdr_addr_mem(slot) + 1295 sizeof(struct ssp_frame_hdr) + 1296 sizeof(struct ssp_command_iu); 1297 1298 memset(&prot, 0, sizeof(struct hisi_sas_protect_iu_v3_hw)); 1299 fill_prot_v3_hw(scsi_cmnd, &prot); 1300 memcpy(buf_cmd_prot, &prot, 1301 sizeof(struct hisi_sas_protect_iu_v3_hw)); 1302 /* 1303 * For READ, we need length of info read to memory, while for 1304 * WRITE we need length of data written to the disk. 1305 */ 1306 if (prot_op == SCSI_PROT_WRITE_INSERT || 1307 prot_op == SCSI_PROT_READ_INSERT || 1308 prot_op == SCSI_PROT_WRITE_PASS || 1309 prot_op == SCSI_PROT_READ_PASS) { 1310 unsigned int interval = scsi_prot_interval(scsi_cmnd); 1311 unsigned int ilog2_interval = ilog2(interval); 1312 1313 len = (task->total_xfer_len >> ilog2_interval) * 8; 1314 } 1315 } 1316 1317 hdr->dw1 = cpu_to_le32(dw1); 1318 1319 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len); 1320 } 1321 1322 static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, 1323 struct hisi_sas_slot *slot) 1324 { 1325 struct sas_task *task = slot->task; 1326 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1327 struct domain_device *device = task->dev; 1328 struct hisi_sas_port *port = slot->port; 1329 struct scatterlist *sg_req; 1330 struct hisi_sas_device *sas_dev = device->lldd_dev; 1331 dma_addr_t req_dma_addr; 1332 unsigned int req_len; 1333 1334 /* req */ 1335 sg_req = &task->smp_task.smp_req; 1336 req_len = sg_dma_len(sg_req); 1337 req_dma_addr = sg_dma_address(sg_req); 1338 1339 /* create header */ 1340 /* dw0 */ 1341 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | 1342 (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ 1343 (2 << CMD_HDR_CMD_OFF)); /* smp */ 1344 1345 /* map itct entry */ 1346 hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | 1347 (1 << CMD_HDR_FRAME_TYPE_OFF) | 1348 (DIR_NO_DATA << CMD_HDR_DIR_OFF)); 1349 1350 /* dw2 */ 1351 hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | 1352 (HISI_SAS_MAX_SMP_RESP_SZ / 4 << 1353 CMD_HDR_MRFL_OFF)); 1354 1355 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); 1356 1357 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); 1358 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1359 } 1360 1361 static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, 1362 struct hisi_sas_slot *slot) 1363 { 1364 struct sas_task *task = slot->task; 1365 struct domain_device *device = task->dev; 1366 struct domain_device *parent_dev = device->parent; 1367 struct hisi_sas_device *sas_dev = device->lldd_dev; 1368 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1369 struct asd_sas_port *sas_port = device->port; 1370 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 1371 u8 *buf_cmd; 1372 int has_data = 0, hdr_tag = 0; 1373 u32 dw1 = 0, dw2 = 0; 1374 1375 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); 1376 if (parent_dev && dev_is_expander(parent_dev->dev_type)) 1377 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); 1378 else 1379 hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF); 1380 1381 switch (task->data_dir) { 1382 case DMA_TO_DEVICE: 1383 has_data = 1; 1384 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1385 break; 1386 case DMA_FROM_DEVICE: 1387 has_data = 1; 1388 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1389 break; 1390 default: 1391 dw1 &= ~CMD_HDR_DIR_MSK; 1392 } 1393 1394 if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && 1395 (task->ata_task.fis.control & ATA_SRST)) 1396 dw1 |= 1 << CMD_HDR_RESET_OFF; 1397 1398 dw1 |= (hisi_sas_get_ata_protocol( 1399 &task->ata_task.fis, task->data_dir)) 1400 << CMD_HDR_FRAME_TYPE_OFF; 1401 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1402 1403 if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis)) 1404 dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; 1405 1406 hdr->dw1 = cpu_to_le32(dw1); 1407 1408 /* dw2 */ 1409 if (task->ata_task.use_ncq) { 1410 struct ata_queued_cmd *qc = task->uldd_task; 1411 1412 hdr_tag = qc->tag; 1413 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 1414 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; 1415 } 1416 1417 dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | 1418 2 << CMD_HDR_SG_MOD_OFF; 1419 hdr->dw2 = cpu_to_le32(dw2); 1420 1421 /* dw3 */ 1422 hdr->transfer_tags = cpu_to_le32(slot->idx); 1423 1424 if (has_data) 1425 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, 1426 slot->n_elem); 1427 1428 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1429 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1430 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1431 1432 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot); 1433 1434 if (likely(!task->ata_task.device_control_reg_update)) 1435 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 1436 /* fill in command FIS */ 1437 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 1438 } 1439 1440 static void prep_abort_v3_hw(struct hisi_hba *hisi_hba, 1441 struct hisi_sas_slot *slot, 1442 int device_id, int abort_flag, int tag_to_abort) 1443 { 1444 struct sas_task *task = slot->task; 1445 struct domain_device *dev = task->dev; 1446 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1447 struct hisi_sas_port *port = slot->port; 1448 1449 /* dw0 */ 1450 hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /*abort*/ 1451 (port->id << CMD_HDR_PORT_OFF) | 1452 (dev_is_sata(dev) 1453 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | 1454 (abort_flag 1455 << CMD_HDR_ABORT_FLAG_OFF)); 1456 1457 /* dw1 */ 1458 hdr->dw1 = cpu_to_le32(device_id 1459 << CMD_HDR_DEV_ID_OFF); 1460 1461 /* dw7 */ 1462 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF); 1463 hdr->transfer_tags = cpu_to_le32(slot->idx); 1464 } 1465 1466 static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1467 { 1468 int i; 1469 irqreturn_t res; 1470 u32 context, port_id, link_rate; 1471 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1472 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1473 struct device *dev = hisi_hba->dev; 1474 unsigned long flags; 1475 1476 del_timer(&phy->timer); 1477 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); 1478 1479 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 1480 port_id = (port_id >> (4 * phy_no)) & 0xf; 1481 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 1482 link_rate = (link_rate >> (phy_no * 4)) & 0xf; 1483 1484 if (port_id == 0xf) { 1485 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); 1486 res = IRQ_NONE; 1487 goto end; 1488 } 1489 sas_phy->linkrate = link_rate; 1490 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 1491 1492 /* Check for SATA dev */ 1493 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); 1494 if (context & (1 << phy_no)) { 1495 struct hisi_sas_initial_fis *initial_fis; 1496 struct dev_to_host_fis *fis; 1497 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 1498 struct Scsi_Host *shost = hisi_hba->shost; 1499 1500 dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate); 1501 initial_fis = &hisi_hba->initial_fis[phy_no]; 1502 fis = &initial_fis->fis; 1503 1504 /* check ERR bit of Status Register */ 1505 if (fis->status & ATA_ERR) { 1506 dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", 1507 phy_no, fis->status); 1508 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1509 res = IRQ_NONE; 1510 goto end; 1511 } 1512 1513 sas_phy->oob_mode = SATA_OOB_MODE; 1514 attached_sas_addr[0] = 0x50; 1515 attached_sas_addr[6] = shost->host_no; 1516 attached_sas_addr[7] = phy_no; 1517 memcpy(sas_phy->attached_sas_addr, 1518 attached_sas_addr, 1519 SAS_ADDR_SIZE); 1520 memcpy(sas_phy->frame_rcvd, fis, 1521 sizeof(struct dev_to_host_fis)); 1522 phy->phy_type |= PORT_TYPE_SATA; 1523 phy->identify.device_type = SAS_SATA_DEV; 1524 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 1525 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 1526 } else { 1527 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; 1528 struct sas_identify_frame *id = 1529 (struct sas_identify_frame *)frame_rcvd; 1530 1531 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); 1532 for (i = 0; i < 6; i++) { 1533 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, 1534 RX_IDAF_DWORD0 + (i * 4)); 1535 frame_rcvd[i] = __swab32(idaf); 1536 } 1537 sas_phy->oob_mode = SAS_OOB_MODE; 1538 memcpy(sas_phy->attached_sas_addr, 1539 &id->sas_addr, 1540 SAS_ADDR_SIZE); 1541 phy->phy_type |= PORT_TYPE_SAS; 1542 phy->identify.device_type = id->dev_type; 1543 phy->frame_rcvd_size = sizeof(struct sas_identify_frame); 1544 if (phy->identify.device_type == SAS_END_DEVICE) 1545 phy->identify.target_port_protocols = 1546 SAS_PROTOCOL_SSP; 1547 else if (phy->identify.device_type != SAS_PHY_UNUSED) 1548 phy->identify.target_port_protocols = 1549 SAS_PROTOCOL_SMP; 1550 } 1551 1552 phy->port_id = port_id; 1553 phy->phy_attached = 1; 1554 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); 1555 res = IRQ_HANDLED; 1556 spin_lock_irqsave(&phy->lock, flags); 1557 if (phy->reset_completion) { 1558 phy->in_reset = 0; 1559 complete(phy->reset_completion); 1560 } 1561 spin_unlock_irqrestore(&phy->lock, flags); 1562 end: 1563 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1564 CHL_INT0_SL_PHY_ENABLE_MSK); 1565 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); 1566 1567 return res; 1568 } 1569 1570 static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1571 { 1572 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1573 u32 phy_state, sl_ctrl, txid_auto; 1574 struct device *dev = hisi_hba->dev; 1575 1576 atomic_inc(&phy->down_cnt); 1577 1578 del_timer(&phy->timer); 1579 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 1580 1581 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1582 dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state); 1583 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); 1584 1585 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1586 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, 1587 sl_ctrl&(~SL_CTA_MSK)); 1588 1589 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 1590 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 1591 txid_auto | CT3_MSK); 1592 1593 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); 1594 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); 1595 1596 return IRQ_HANDLED; 1597 } 1598 1599 static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1600 { 1601 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1602 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1603 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1604 u32 bcast_status; 1605 1606 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); 1607 bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); 1608 if ((bcast_status & RX_BCAST_CHG_MSK) && 1609 !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1610 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); 1611 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1612 CHL_INT0_SL_RX_BCST_ACK_MSK); 1613 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); 1614 1615 return IRQ_HANDLED; 1616 } 1617 1618 static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) 1619 { 1620 struct hisi_hba *hisi_hba = p; 1621 u32 irq_msk; 1622 int phy_no = 0; 1623 irqreturn_t res = IRQ_NONE; 1624 1625 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) 1626 & 0x11111111; 1627 while (irq_msk) { 1628 if (irq_msk & 1) { 1629 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, 1630 CHL_INT0); 1631 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1632 int rdy = phy_state & (1 << phy_no); 1633 1634 if (rdy) { 1635 if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK) 1636 /* phy up */ 1637 if (phy_up_v3_hw(phy_no, hisi_hba) 1638 == IRQ_HANDLED) 1639 res = IRQ_HANDLED; 1640 if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK) 1641 /* phy bcast */ 1642 if (phy_bcast_v3_hw(phy_no, hisi_hba) 1643 == IRQ_HANDLED) 1644 res = IRQ_HANDLED; 1645 } else { 1646 if (irq_value & CHL_INT0_NOT_RDY_MSK) 1647 /* phy down */ 1648 if (phy_down_v3_hw(phy_no, hisi_hba) 1649 == IRQ_HANDLED) 1650 res = IRQ_HANDLED; 1651 } 1652 } 1653 irq_msk >>= 4; 1654 phy_no++; 1655 } 1656 1657 return res; 1658 } 1659 1660 static const struct hisi_sas_hw_error port_axi_error[] = { 1661 { 1662 .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF), 1663 .msg = "dmac_tx_ecc_bad_err", 1664 }, 1665 { 1666 .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF), 1667 .msg = "dmac_rx_ecc_bad_err", 1668 }, 1669 { 1670 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF), 1671 .msg = "dma_tx_axi_wr_err", 1672 }, 1673 { 1674 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF), 1675 .msg = "dma_tx_axi_rd_err", 1676 }, 1677 { 1678 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF), 1679 .msg = "dma_rx_axi_wr_err", 1680 }, 1681 { 1682 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF), 1683 .msg = "dma_rx_axi_rd_err", 1684 }, 1685 { 1686 .irq_msk = BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF), 1687 .msg = "dma_tx_fifo_err", 1688 }, 1689 { 1690 .irq_msk = BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF), 1691 .msg = "dma_rx_fifo_err", 1692 }, 1693 { 1694 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF), 1695 .msg = "dma_tx_axi_ruser_err", 1696 }, 1697 { 1698 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF), 1699 .msg = "dma_rx_axi_ruser_err", 1700 }, 1701 }; 1702 1703 static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1704 { 1705 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1); 1706 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1_MSK); 1707 struct device *dev = hisi_hba->dev; 1708 int i; 1709 1710 irq_value &= ~irq_msk; 1711 if (!irq_value) 1712 return; 1713 1714 for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) { 1715 const struct hisi_sas_hw_error *error = &port_axi_error[i]; 1716 1717 if (!(irq_value & error->irq_msk)) 1718 continue; 1719 1720 dev_err(dev, "%s error (phy%d 0x%x) found!\n", 1721 error->msg, phy_no, irq_value); 1722 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1723 } 1724 1725 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value); 1726 } 1727 1728 static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1729 { 1730 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1731 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1732 struct sas_phy *sphy = sas_phy->phy; 1733 unsigned long flags; 1734 u32 reg_value; 1735 1736 spin_lock_irqsave(&phy->lock, flags); 1737 1738 /* loss dword sync */ 1739 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST); 1740 sphy->loss_of_dword_sync_count += reg_value; 1741 1742 /* phy reset problem */ 1743 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB); 1744 sphy->phy_reset_problem_count += reg_value; 1745 1746 /* invalid dword */ 1747 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); 1748 sphy->invalid_dword_count += reg_value; 1749 1750 /* disparity err */ 1751 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); 1752 sphy->running_disparity_error_count += reg_value; 1753 1754 /* code violation error */ 1755 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); 1756 phy->code_violation_err_count += reg_value; 1757 1758 spin_unlock_irqrestore(&phy->lock, flags); 1759 } 1760 1761 static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1762 { 1763 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); 1764 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); 1765 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1766 struct pci_dev *pci_dev = hisi_hba->pci_dev; 1767 struct device *dev = hisi_hba->dev; 1768 static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | 1769 BIT(CHL_INT2_RX_CODE_ERR_OFF) | 1770 BIT(CHL_INT2_RX_INVLD_DW_OFF); 1771 1772 irq_value &= ~irq_msk; 1773 if (!irq_value) 1774 return; 1775 1776 if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) { 1777 dev_warn(dev, "phy%d identify timeout\n", phy_no); 1778 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1779 } 1780 1781 if (irq_value & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) { 1782 u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, 1783 STP_LINK_TIMEOUT_STATE); 1784 1785 dev_warn(dev, "phy%d stp link timeout (0x%x)\n", 1786 phy_no, reg_value); 1787 if (reg_value & BIT(4)) 1788 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1789 } 1790 1791 if (pci_dev->revision > 0x20 && (irq_value & msk)) { 1792 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1793 struct sas_phy *sphy = sas_phy->phy; 1794 1795 phy_get_events_v3_hw(hisi_hba, phy_no); 1796 1797 if (irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) 1798 dev_info(dev, "phy%d invalid dword cnt: %u\n", phy_no, 1799 sphy->invalid_dword_count); 1800 1801 if (irq_value & BIT(CHL_INT2_RX_CODE_ERR_OFF)) 1802 dev_info(dev, "phy%d code violation cnt: %u\n", phy_no, 1803 phy->code_violation_err_count); 1804 1805 if (irq_value & BIT(CHL_INT2_RX_DISP_ERR_OFF)) 1806 dev_info(dev, "phy%d disparity error cnt: %u\n", phy_no, 1807 sphy->running_disparity_error_count); 1808 } 1809 1810 if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) && 1811 (pci_dev->revision == 0x20)) { 1812 u32 reg_value; 1813 int rc; 1814 1815 rc = hisi_sas_read32_poll_timeout_atomic( 1816 HILINK_ERR_DFX, reg_value, 1817 !((reg_value >> 8) & BIT(phy_no)), 1818 1000, 10000); 1819 if (rc) 1820 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1821 } 1822 1823 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value); 1824 } 1825 1826 static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1827 { 1828 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); 1829 1830 if (irq_value0 & CHL_INT0_PHY_RDY_MSK) 1831 hisi_sas_phy_oob_ready(hisi_hba, phy_no); 1832 1833 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1834 irq_value0 & (~CHL_INT0_SL_RX_BCST_ACK_MSK) 1835 & (~CHL_INT0_SL_PHY_ENABLE_MSK) 1836 & (~CHL_INT0_NOT_RDY_MSK)); 1837 } 1838 1839 static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) 1840 { 1841 struct hisi_hba *hisi_hba = p; 1842 u32 irq_msk; 1843 int phy_no = 0; 1844 1845 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) 1846 & CHNL_INT_STS_MSK; 1847 1848 while (irq_msk) { 1849 if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH))) 1850 handle_chl_int0_v3_hw(hisi_hba, phy_no); 1851 1852 if (irq_msk & (CHNL_INT_STS_INT1_MSK << (phy_no * CHNL_WIDTH))) 1853 handle_chl_int1_v3_hw(hisi_hba, phy_no); 1854 1855 if (irq_msk & (CHNL_INT_STS_INT2_MSK << (phy_no * CHNL_WIDTH))) 1856 handle_chl_int2_v3_hw(hisi_hba, phy_no); 1857 1858 irq_msk &= ~(CHNL_INT_STS_PHY_MSK << (phy_no * CHNL_WIDTH)); 1859 phy_no++; 1860 } 1861 1862 return IRQ_HANDLED; 1863 } 1864 1865 static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = { 1866 { 1867 .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF), 1868 .msk = HGC_DQE_ECC_MB_ADDR_MSK, 1869 .shift = HGC_DQE_ECC_MB_ADDR_OFF, 1870 .msg = "hgc_dqe_eccbad_intr", 1871 .reg = HGC_DQE_ECC_ADDR, 1872 }, 1873 { 1874 .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF), 1875 .msk = HGC_IOST_ECC_MB_ADDR_MSK, 1876 .shift = HGC_IOST_ECC_MB_ADDR_OFF, 1877 .msg = "hgc_iost_eccbad_intr", 1878 .reg = HGC_IOST_ECC_ADDR, 1879 }, 1880 { 1881 .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF), 1882 .msk = HGC_ITCT_ECC_MB_ADDR_MSK, 1883 .shift = HGC_ITCT_ECC_MB_ADDR_OFF, 1884 .msg = "hgc_itct_eccbad_intr", 1885 .reg = HGC_ITCT_ECC_ADDR, 1886 }, 1887 { 1888 .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF), 1889 .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, 1890 .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, 1891 .msg = "hgc_iostl_eccbad_intr", 1892 .reg = HGC_LM_DFX_STATUS2, 1893 }, 1894 { 1895 .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF), 1896 .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, 1897 .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, 1898 .msg = "hgc_itctl_eccbad_intr", 1899 .reg = HGC_LM_DFX_STATUS2, 1900 }, 1901 { 1902 .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF), 1903 .msk = HGC_CQE_ECC_MB_ADDR_MSK, 1904 .shift = HGC_CQE_ECC_MB_ADDR_OFF, 1905 .msg = "hgc_cqe_eccbad_intr", 1906 .reg = HGC_CQE_ECC_ADDR, 1907 }, 1908 { 1909 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF), 1910 .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, 1911 .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, 1912 .msg = "rxm_mem0_eccbad_intr", 1913 .reg = HGC_RXM_DFX_STATUS14, 1914 }, 1915 { 1916 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF), 1917 .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, 1918 .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, 1919 .msg = "rxm_mem1_eccbad_intr", 1920 .reg = HGC_RXM_DFX_STATUS14, 1921 }, 1922 { 1923 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF), 1924 .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, 1925 .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, 1926 .msg = "rxm_mem2_eccbad_intr", 1927 .reg = HGC_RXM_DFX_STATUS14, 1928 }, 1929 { 1930 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF), 1931 .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, 1932 .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, 1933 .msg = "rxm_mem3_eccbad_intr", 1934 .reg = HGC_RXM_DFX_STATUS15, 1935 }, 1936 { 1937 .irq_msk = BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF), 1938 .msk = AM_ROB_ECC_ERR_ADDR_MSK, 1939 .shift = AM_ROB_ECC_ERR_ADDR_OFF, 1940 .msg = "ooo_ram_eccbad_intr", 1941 .reg = AM_ROB_ECC_ERR_ADDR, 1942 }, 1943 }; 1944 1945 static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba *hisi_hba, 1946 u32 irq_value) 1947 { 1948 struct device *dev = hisi_hba->dev; 1949 const struct hisi_sas_hw_error *ecc_error; 1950 u32 val; 1951 int i; 1952 1953 for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) { 1954 ecc_error = &multi_bit_ecc_errors[i]; 1955 if (irq_value & ecc_error->irq_msk) { 1956 val = hisi_sas_read32(hisi_hba, ecc_error->reg); 1957 val &= ecc_error->msk; 1958 val >>= ecc_error->shift; 1959 dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n", 1960 ecc_error->msg, irq_value, val); 1961 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1962 } 1963 } 1964 } 1965 1966 static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba) 1967 { 1968 u32 irq_value, irq_msk; 1969 1970 irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); 1971 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); 1972 1973 irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); 1974 if (irq_value) 1975 multi_bit_ecc_error_process_v3_hw(hisi_hba, irq_value); 1976 1977 hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value); 1978 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk); 1979 } 1980 1981 static const struct hisi_sas_hw_error axi_error[] = { 1982 { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" }, 1983 { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" }, 1984 { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" }, 1985 { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" }, 1986 { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" }, 1987 { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" }, 1988 { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" }, 1989 { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" }, 1990 {} 1991 }; 1992 1993 static const struct hisi_sas_hw_error fifo_error[] = { 1994 { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" }, 1995 { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" }, 1996 { .msk = BIT(10), .msg = "GETDQE_FIFO" }, 1997 { .msk = BIT(11), .msg = "CMDP_FIFO" }, 1998 { .msk = BIT(12), .msg = "AWTCTRL_FIFO" }, 1999 {} 2000 }; 2001 2002 static const struct hisi_sas_hw_error fatal_axi_error[] = { 2003 { 2004 .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF), 2005 .msg = "write pointer and depth", 2006 }, 2007 { 2008 .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF), 2009 .msg = "iptt no match slot", 2010 }, 2011 { 2012 .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF), 2013 .msg = "read pointer and depth", 2014 }, 2015 { 2016 .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF), 2017 .reg = HGC_AXI_FIFO_ERR_INFO, 2018 .sub = axi_error, 2019 }, 2020 { 2021 .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF), 2022 .reg = HGC_AXI_FIFO_ERR_INFO, 2023 .sub = fifo_error, 2024 }, 2025 { 2026 .irq_msk = BIT(ENT_INT_SRC3_LM_OFF), 2027 .msg = "LM add/fetch list", 2028 }, 2029 { 2030 .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF), 2031 .msg = "SAS_HGC_ABT fetch LM list", 2032 }, 2033 { 2034 .irq_msk = BIT(ENT_INT_SRC3_DQE_POISON_OFF), 2035 .msg = "read dqe poison", 2036 }, 2037 { 2038 .irq_msk = BIT(ENT_INT_SRC3_IOST_POISON_OFF), 2039 .msg = "read iost poison", 2040 }, 2041 { 2042 .irq_msk = BIT(ENT_INT_SRC3_ITCT_POISON_OFF), 2043 .msg = "read itct poison", 2044 }, 2045 { 2046 .irq_msk = BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF), 2047 .msg = "read itct ncq poison", 2048 }, 2049 2050 }; 2051 2052 static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) 2053 { 2054 u32 irq_value, irq_msk; 2055 struct hisi_hba *hisi_hba = p; 2056 struct device *dev = hisi_hba->dev; 2057 struct pci_dev *pdev = hisi_hba->pci_dev; 2058 int i; 2059 2060 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 2061 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00); 2062 2063 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 2064 irq_value &= ~irq_msk; 2065 2066 for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) { 2067 const struct hisi_sas_hw_error *error = &fatal_axi_error[i]; 2068 2069 if (!(irq_value & error->irq_msk)) 2070 continue; 2071 2072 if (error->sub) { 2073 const struct hisi_sas_hw_error *sub = error->sub; 2074 u32 err_value = hisi_sas_read32(hisi_hba, error->reg); 2075 2076 for (; sub->msk || sub->msg; sub++) { 2077 if (!(err_value & sub->msk)) 2078 continue; 2079 2080 dev_err(dev, "%s error (0x%x) found!\n", 2081 sub->msg, irq_value); 2082 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2083 } 2084 } else { 2085 dev_err(dev, "%s error (0x%x) found!\n", 2086 error->msg, irq_value); 2087 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2088 } 2089 2090 if (pdev->revision < 0x21) { 2091 u32 reg_val; 2092 2093 reg_val = hisi_sas_read32(hisi_hba, 2094 AXI_MASTER_CFG_BASE + 2095 AM_CTRL_GLOBAL); 2096 reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; 2097 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 2098 AM_CTRL_GLOBAL, reg_val); 2099 } 2100 } 2101 2102 fatal_ecc_int_v3_hw(hisi_hba); 2103 2104 if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) { 2105 u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); 2106 u32 dev_id = reg_val & ITCT_DEV_MSK; 2107 struct hisi_sas_device *sas_dev = 2108 &hisi_hba->devices[dev_id]; 2109 2110 hisi_sas_write32(hisi_hba, ITCT_CLR, 0); 2111 dev_dbg(dev, "clear ITCT ok\n"); 2112 complete(sas_dev->completion); 2113 } 2114 2115 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00); 2116 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); 2117 2118 return IRQ_HANDLED; 2119 } 2120 2121 static void 2122 slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, 2123 struct hisi_sas_slot *slot) 2124 { 2125 struct task_status_struct *ts = &task->task_status; 2126 struct hisi_sas_complete_v3_hdr *complete_queue = 2127 hisi_hba->complete_hdr[slot->cmplt_queue]; 2128 struct hisi_sas_complete_v3_hdr *complete_hdr = 2129 &complete_queue[slot->cmplt_queue_slot]; 2130 struct hisi_sas_err_record_v3 *record = 2131 hisi_sas_status_buf_addr_mem(slot); 2132 u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type); 2133 u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type); 2134 u32 dw3 = le32_to_cpu(complete_hdr->dw3); 2135 2136 switch (task->task_proto) { 2137 case SAS_PROTOCOL_SSP: 2138 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { 2139 ts->residual = trans_tx_fail_type; 2140 ts->stat = SAS_DATA_UNDERRUN; 2141 } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { 2142 ts->stat = SAS_QUEUE_FULL; 2143 slot->abort = 1; 2144 } else { 2145 ts->stat = SAS_OPEN_REJECT; 2146 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2147 } 2148 break; 2149 case SAS_PROTOCOL_SATA: 2150 case SAS_PROTOCOL_STP: 2151 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2152 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { 2153 ts->residual = trans_tx_fail_type; 2154 ts->stat = SAS_DATA_UNDERRUN; 2155 } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { 2156 ts->stat = SAS_PHY_DOWN; 2157 slot->abort = 1; 2158 } else { 2159 ts->stat = SAS_OPEN_REJECT; 2160 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2161 } 2162 hisi_sas_sata_done(task, slot); 2163 break; 2164 case SAS_PROTOCOL_SMP: 2165 ts->stat = SAM_STAT_CHECK_CONDITION; 2166 break; 2167 default: 2168 break; 2169 } 2170 } 2171 2172 static void slot_complete_v3_hw(struct hisi_hba *hisi_hba, 2173 struct hisi_sas_slot *slot) 2174 { 2175 struct sas_task *task = slot->task; 2176 struct hisi_sas_device *sas_dev; 2177 struct device *dev = hisi_hba->dev; 2178 struct task_status_struct *ts; 2179 struct domain_device *device; 2180 struct sas_ha_struct *ha; 2181 struct hisi_sas_complete_v3_hdr *complete_queue = 2182 hisi_hba->complete_hdr[slot->cmplt_queue]; 2183 struct hisi_sas_complete_v3_hdr *complete_hdr = 2184 &complete_queue[slot->cmplt_queue_slot]; 2185 unsigned long flags; 2186 bool is_internal = slot->is_internal; 2187 u32 dw0, dw1, dw3; 2188 2189 if (unlikely(!task || !task->lldd_task || !task->dev)) 2190 return; 2191 2192 ts = &task->task_status; 2193 device = task->dev; 2194 ha = device->port->ha; 2195 sas_dev = device->lldd_dev; 2196 2197 spin_lock_irqsave(&task->task_state_lock, flags); 2198 task->task_state_flags &= 2199 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 2200 spin_unlock_irqrestore(&task->task_state_lock, flags); 2201 2202 memset(ts, 0, sizeof(*ts)); 2203 ts->resp = SAS_TASK_COMPLETE; 2204 2205 if (unlikely(!sas_dev)) { 2206 dev_dbg(dev, "slot complete: port has not device\n"); 2207 ts->stat = SAS_PHY_DOWN; 2208 goto out; 2209 } 2210 2211 dw0 = le32_to_cpu(complete_hdr->dw0); 2212 dw1 = le32_to_cpu(complete_hdr->dw1); 2213 dw3 = le32_to_cpu(complete_hdr->dw3); 2214 2215 /* 2216 * Use SAS+TMF status codes 2217 */ 2218 switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) { 2219 case STAT_IO_ABORTED: 2220 /* this IO has been aborted by abort command */ 2221 ts->stat = SAS_ABORTED_TASK; 2222 goto out; 2223 case STAT_IO_COMPLETE: 2224 /* internal abort command complete */ 2225 ts->stat = TMF_RESP_FUNC_SUCC; 2226 goto out; 2227 case STAT_IO_NO_DEVICE: 2228 ts->stat = TMF_RESP_FUNC_COMPLETE; 2229 goto out; 2230 case STAT_IO_NOT_VALID: 2231 /* 2232 * abort single IO, the controller can't find the IO 2233 */ 2234 ts->stat = TMF_RESP_FUNC_FAILED; 2235 goto out; 2236 default: 2237 break; 2238 } 2239 2240 /* check for erroneous completion */ 2241 if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { 2242 u32 *error_info = hisi_sas_status_buf_addr_mem(slot); 2243 2244 slot_err_v3_hw(hisi_hba, task, slot); 2245 if (ts->stat != SAS_DATA_UNDERRUN) 2246 dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", 2247 slot->idx, task, sas_dev->device_id, 2248 dw0, dw1, complete_hdr->act, dw3, 2249 error_info[0], error_info[1], 2250 error_info[2], error_info[3]); 2251 if (unlikely(slot->abort)) { 2252 sas_task_abort(task); 2253 return; 2254 } 2255 goto out; 2256 } 2257 2258 switch (task->task_proto) { 2259 case SAS_PROTOCOL_SSP: { 2260 struct ssp_response_iu *iu = 2261 hisi_sas_status_buf_addr_mem(slot) + 2262 sizeof(struct hisi_sas_err_record); 2263 2264 sas_ssp_task_response(dev, task, iu); 2265 break; 2266 } 2267 case SAS_PROTOCOL_SMP: { 2268 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 2269 void *to = page_address(sg_page(sg_resp)); 2270 2271 ts->stat = SAM_STAT_GOOD; 2272 2273 dma_unmap_sg(dev, &task->smp_task.smp_req, 1, 2274 DMA_TO_DEVICE); 2275 memcpy(to + sg_resp->offset, 2276 hisi_sas_status_buf_addr_mem(slot) + 2277 sizeof(struct hisi_sas_err_record), 2278 sg_resp->length); 2279 break; 2280 } 2281 case SAS_PROTOCOL_SATA: 2282 case SAS_PROTOCOL_STP: 2283 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2284 ts->stat = SAM_STAT_GOOD; 2285 hisi_sas_sata_done(task, slot); 2286 break; 2287 default: 2288 ts->stat = SAM_STAT_CHECK_CONDITION; 2289 break; 2290 } 2291 2292 if (!slot->port->port_attached) { 2293 dev_warn(dev, "slot complete: port %d has removed\n", 2294 slot->port->sas_port.id); 2295 ts->stat = SAS_PHY_DOWN; 2296 } 2297 2298 out: 2299 spin_lock_irqsave(&task->task_state_lock, flags); 2300 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 2301 spin_unlock_irqrestore(&task->task_state_lock, flags); 2302 dev_info(dev, "slot complete: task(%pK) aborted\n", task); 2303 return; 2304 } 2305 task->task_state_flags |= SAS_TASK_STATE_DONE; 2306 spin_unlock_irqrestore(&task->task_state_lock, flags); 2307 hisi_sas_slot_task_free(hisi_hba, task, slot); 2308 2309 if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { 2310 spin_lock_irqsave(&device->done_lock, flags); 2311 if (test_bit(SAS_HA_FROZEN, &ha->state)) { 2312 spin_unlock_irqrestore(&device->done_lock, flags); 2313 dev_info(dev, "slot complete: task(%pK) ignored\n ", 2314 task); 2315 return; 2316 } 2317 spin_unlock_irqrestore(&device->done_lock, flags); 2318 } 2319 2320 if (task->task_done) 2321 task->task_done(task); 2322 } 2323 2324 static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) 2325 { 2326 struct hisi_sas_cq *cq = p; 2327 struct hisi_hba *hisi_hba = cq->hisi_hba; 2328 struct hisi_sas_slot *slot; 2329 struct hisi_sas_complete_v3_hdr *complete_queue; 2330 u32 rd_point = cq->rd_point, wr_point; 2331 int queue = cq->id; 2332 2333 complete_queue = hisi_hba->complete_hdr[queue]; 2334 2335 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + 2336 (0x14 * queue)); 2337 2338 while (rd_point != wr_point) { 2339 struct hisi_sas_complete_v3_hdr *complete_hdr; 2340 struct device *dev = hisi_hba->dev; 2341 u32 dw1; 2342 int iptt; 2343 2344 complete_hdr = &complete_queue[rd_point]; 2345 dw1 = le32_to_cpu(complete_hdr->dw1); 2346 2347 iptt = dw1 & CMPLT_HDR_IPTT_MSK; 2348 if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { 2349 slot = &hisi_hba->slot_info[iptt]; 2350 slot->cmplt_queue_slot = rd_point; 2351 slot->cmplt_queue = queue; 2352 slot_complete_v3_hw(hisi_hba, slot); 2353 } else 2354 dev_err(dev, "IPTT %d is invalid, discard it.\n", iptt); 2355 2356 if (++rd_point >= HISI_SAS_QUEUE_SLOTS) 2357 rd_point = 0; 2358 } 2359 2360 /* update rd_point */ 2361 cq->rd_point = rd_point; 2362 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 2363 2364 return IRQ_HANDLED; 2365 } 2366 2367 static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) 2368 { 2369 struct hisi_sas_cq *cq = p; 2370 struct hisi_hba *hisi_hba = cq->hisi_hba; 2371 int queue = cq->id; 2372 2373 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); 2374 2375 return IRQ_WAKE_THREAD; 2376 } 2377 2378 static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) 2379 { 2380 int vectors; 2381 int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi; 2382 struct Scsi_Host *shost = hisi_hba->shost; 2383 struct irq_affinity desc = { 2384 .pre_vectors = BASE_VECTORS_V3_HW, 2385 }; 2386 2387 min_msi = MIN_AFFINE_VECTORS_V3_HW; 2388 vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev, 2389 min_msi, max_msi, 2390 PCI_IRQ_MSI | 2391 PCI_IRQ_AFFINITY, 2392 &desc); 2393 if (vectors < 0) 2394 return -ENOENT; 2395 2396 2397 hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW; 2398 shost->nr_hw_queues = hisi_hba->cq_nvecs; 2399 2400 return 0; 2401 } 2402 2403 static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) 2404 { 2405 struct device *dev = hisi_hba->dev; 2406 struct pci_dev *pdev = hisi_hba->pci_dev; 2407 int rc, i; 2408 2409 rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), 2410 int_phy_up_down_bcast_v3_hw, 0, 2411 DRV_NAME " phy", hisi_hba); 2412 if (rc) { 2413 dev_err(dev, "could not request phy interrupt, rc=%d\n", rc); 2414 return -ENOENT; 2415 } 2416 2417 rc = devm_request_irq(dev, pci_irq_vector(pdev, 2), 2418 int_chnl_int_v3_hw, 0, 2419 DRV_NAME " channel", hisi_hba); 2420 if (rc) { 2421 dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc); 2422 return -ENOENT; 2423 } 2424 2425 rc = devm_request_irq(dev, pci_irq_vector(pdev, 11), 2426 fatal_axi_int_v3_hw, 0, 2427 DRV_NAME " fatal", hisi_hba); 2428 if (rc) { 2429 dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc); 2430 return -ENOENT; 2431 } 2432 2433 if (hisi_sas_intr_conv) 2434 dev_info(dev, "Enable interrupt converge\n"); 2435 2436 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2437 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2438 int nr = hisi_sas_intr_conv ? 16 : 16 + i; 2439 unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 2440 IRQF_ONESHOT; 2441 2442 cq->irq_no = pci_irq_vector(pdev, nr); 2443 rc = devm_request_threaded_irq(dev, cq->irq_no, 2444 cq_interrupt_v3_hw, 2445 cq_thread_v3_hw, 2446 irqflags, 2447 DRV_NAME " cq", cq); 2448 if (rc) { 2449 dev_err(dev, "could not request cq%d interrupt, rc=%d\n", 2450 i, rc); 2451 return -ENOENT; 2452 } 2453 cq->irq_mask = pci_irq_get_affinity(pdev, i + BASE_VECTORS_V3_HW); 2454 if (!cq->irq_mask) { 2455 dev_err(dev, "could not get cq%d irq affinity!\n", i); 2456 return -ENOENT; 2457 } 2458 } 2459 2460 return 0; 2461 } 2462 2463 static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) 2464 { 2465 int rc; 2466 2467 rc = hw_init_v3_hw(hisi_hba); 2468 if (rc) 2469 return rc; 2470 2471 rc = interrupt_init_v3_hw(hisi_hba); 2472 if (rc) 2473 return rc; 2474 2475 return 0; 2476 } 2477 2478 static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no, 2479 struct sas_phy_linkrates *r) 2480 { 2481 enum sas_linkrate max = r->maximum_linkrate; 2482 u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, phy_no, 2483 PROG_PHY_LINK_RATE); 2484 2485 prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK; 2486 prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); 2487 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 2488 prog_phy_link_rate); 2489 } 2490 2491 static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) 2492 { 2493 struct pci_dev *pdev = hisi_hba->pci_dev; 2494 int i; 2495 2496 synchronize_irq(pci_irq_vector(pdev, 1)); 2497 synchronize_irq(pci_irq_vector(pdev, 2)); 2498 synchronize_irq(pci_irq_vector(pdev, 11)); 2499 for (i = 0; i < hisi_hba->queue_count; i++) 2500 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); 2501 2502 for (i = 0; i < hisi_hba->cq_nvecs; i++) 2503 synchronize_irq(pci_irq_vector(pdev, i + 16)); 2504 2505 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); 2506 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); 2507 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); 2508 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); 2509 2510 for (i = 0; i < hisi_hba->n_phy; i++) { 2511 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 2512 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); 2513 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1); 2514 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1); 2515 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1); 2516 } 2517 } 2518 2519 static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba) 2520 { 2521 return hisi_sas_read32(hisi_hba, PHY_STATE); 2522 } 2523 2524 static int disable_host_v3_hw(struct hisi_hba *hisi_hba) 2525 { 2526 struct device *dev = hisi_hba->dev; 2527 u32 status, reg_val; 2528 int rc; 2529 2530 interrupt_disable_v3_hw(hisi_hba); 2531 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); 2532 2533 hisi_sas_stop_phys(hisi_hba); 2534 2535 mdelay(10); 2536 2537 reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + 2538 AM_CTRL_GLOBAL); 2539 reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; 2540 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 2541 AM_CTRL_GLOBAL, reg_val); 2542 2543 /* wait until bus idle */ 2544 rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE + 2545 AM_CURR_TRANS_RETURN, status, 2546 status == 0x3, 10, 100); 2547 if (rc) { 2548 dev_err(dev, "axi bus is not idle, rc=%d\n", rc); 2549 return rc; 2550 } 2551 2552 return 0; 2553 } 2554 2555 static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) 2556 { 2557 struct device *dev = hisi_hba->dev; 2558 int rc; 2559 2560 rc = disable_host_v3_hw(hisi_hba); 2561 if (rc) { 2562 dev_err(dev, "soft reset: disable host failed rc=%d\n", rc); 2563 return rc; 2564 } 2565 2566 hisi_sas_init_mem(hisi_hba); 2567 2568 return hw_init_v3_hw(hisi_hba); 2569 } 2570 2571 static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type, 2572 u8 reg_index, u8 reg_count, u8 *write_data) 2573 { 2574 struct device *dev = hisi_hba->dev; 2575 u32 *data = (u32 *)write_data; 2576 int i; 2577 2578 switch (reg_type) { 2579 case SAS_GPIO_REG_TX: 2580 if ((reg_index + reg_count) > ((hisi_hba->n_phy + 3) / 4)) { 2581 dev_err(dev, "write gpio: invalid reg range[%d, %d]\n", 2582 reg_index, reg_index + reg_count - 1); 2583 return -EINVAL; 2584 } 2585 2586 for (i = 0; i < reg_count; i++) 2587 hisi_sas_write32(hisi_hba, 2588 SAS_GPIO_TX_0_1 + (reg_index + i) * 4, 2589 data[i]); 2590 break; 2591 default: 2592 dev_err(dev, "write gpio: unsupported or bad reg type %d\n", 2593 reg_type); 2594 return -EINVAL; 2595 } 2596 2597 return 0; 2598 } 2599 2600 static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, 2601 int delay_ms, int timeout_ms) 2602 { 2603 struct device *dev = hisi_hba->dev; 2604 int entries, entries_old = 0, time; 2605 2606 for (time = 0; time < timeout_ms; time += delay_ms) { 2607 entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT); 2608 if (entries == entries_old) 2609 break; 2610 2611 entries_old = entries; 2612 msleep(delay_ms); 2613 } 2614 2615 if (time >= timeout_ms) { 2616 dev_dbg(dev, "Wait commands complete timeout!\n"); 2617 return; 2618 } 2619 2620 dev_dbg(dev, "wait commands complete %dms\n", time); 2621 } 2622 2623 static ssize_t intr_conv_v3_hw_show(struct device *dev, 2624 struct device_attribute *attr, char *buf) 2625 { 2626 return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv); 2627 } 2628 static DEVICE_ATTR_RO(intr_conv_v3_hw); 2629 2630 static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba) 2631 { 2632 /* config those registers between enable and disable PHYs */ 2633 hisi_sas_stop_phys(hisi_hba); 2634 2635 if (hisi_hba->intr_coal_ticks == 0 || 2636 hisi_hba->intr_coal_count == 0) { 2637 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); 2638 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); 2639 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); 2640 } else { 2641 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); 2642 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 2643 hisi_hba->intr_coal_ticks); 2644 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 2645 hisi_hba->intr_coal_count); 2646 } 2647 phys_init_v3_hw(hisi_hba); 2648 } 2649 2650 static ssize_t intr_coal_ticks_v3_hw_show(struct device *dev, 2651 struct device_attribute *attr, 2652 char *buf) 2653 { 2654 struct Scsi_Host *shost = class_to_shost(dev); 2655 struct hisi_hba *hisi_hba = shost_priv(shost); 2656 2657 return scnprintf(buf, PAGE_SIZE, "%u\n", 2658 hisi_hba->intr_coal_ticks); 2659 } 2660 2661 static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev, 2662 struct device_attribute *attr, 2663 const char *buf, size_t count) 2664 { 2665 struct Scsi_Host *shost = class_to_shost(dev); 2666 struct hisi_hba *hisi_hba = shost_priv(shost); 2667 u32 intr_coal_ticks; 2668 int ret; 2669 2670 ret = kstrtou32(buf, 10, &intr_coal_ticks); 2671 if (ret) { 2672 dev_err(dev, "Input data of interrupt coalesce unmatch\n"); 2673 return -EINVAL; 2674 } 2675 2676 if (intr_coal_ticks >= BIT(24)) { 2677 dev_err(dev, "intr_coal_ticks must be less than 2^24!\n"); 2678 return -EINVAL; 2679 } 2680 2681 hisi_hba->intr_coal_ticks = intr_coal_ticks; 2682 2683 config_intr_coal_v3_hw(hisi_hba); 2684 2685 return count; 2686 } 2687 static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw); 2688 2689 static ssize_t intr_coal_count_v3_hw_show(struct device *dev, 2690 struct device_attribute 2691 *attr, char *buf) 2692 { 2693 struct Scsi_Host *shost = class_to_shost(dev); 2694 struct hisi_hba *hisi_hba = shost_priv(shost); 2695 2696 return scnprintf(buf, PAGE_SIZE, "%u\n", 2697 hisi_hba->intr_coal_count); 2698 } 2699 2700 static ssize_t intr_coal_count_v3_hw_store(struct device *dev, 2701 struct device_attribute 2702 *attr, const char *buf, size_t count) 2703 { 2704 struct Scsi_Host *shost = class_to_shost(dev); 2705 struct hisi_hba *hisi_hba = shost_priv(shost); 2706 u32 intr_coal_count; 2707 int ret; 2708 2709 ret = kstrtou32(buf, 10, &intr_coal_count); 2710 if (ret) { 2711 dev_err(dev, "Input data of interrupt coalesce unmatch\n"); 2712 return -EINVAL; 2713 } 2714 2715 if (intr_coal_count >= BIT(8)) { 2716 dev_err(dev, "intr_coal_count must be less than 2^8!\n"); 2717 return -EINVAL; 2718 } 2719 2720 hisi_hba->intr_coal_count = intr_coal_count; 2721 2722 config_intr_coal_v3_hw(hisi_hba); 2723 2724 return count; 2725 } 2726 static DEVICE_ATTR_RW(intr_coal_count_v3_hw); 2727 2728 static int slave_configure_v3_hw(struct scsi_device *sdev) 2729 { 2730 struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev); 2731 struct domain_device *ddev = sdev_to_domain_dev(sdev); 2732 struct hisi_hba *hisi_hba = shost_priv(shost); 2733 struct device *dev = hisi_hba->dev; 2734 int ret = sas_slave_configure(sdev); 2735 2736 if (ret) 2737 return ret; 2738 if (!dev_is_sata(ddev)) 2739 sas_change_queue_depth(sdev, 64); 2740 2741 if (sdev->type == TYPE_ENCLOSURE) 2742 return 0; 2743 2744 if (!device_link_add(&sdev->sdev_gendev, dev, 2745 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)) { 2746 if (pm_runtime_enabled(dev)) { 2747 dev_info(dev, "add device link failed, disable runtime PM for the host\n"); 2748 pm_runtime_disable(dev); 2749 } 2750 } 2751 2752 return 0; 2753 } 2754 2755 static struct device_attribute *host_attrs_v3_hw[] = { 2756 &dev_attr_phy_event_threshold, 2757 &dev_attr_intr_conv_v3_hw, 2758 &dev_attr_intr_coal_ticks_v3_hw, 2759 &dev_attr_intr_coal_count_v3_hw, 2760 NULL 2761 }; 2762 2763 #define HISI_SAS_DEBUGFS_REG(x) {#x, x} 2764 2765 struct hisi_sas_debugfs_reg_lu { 2766 char *name; 2767 int off; 2768 }; 2769 2770 struct hisi_sas_debugfs_reg { 2771 const struct hisi_sas_debugfs_reg_lu *lu; 2772 int count; 2773 int base_off; 2774 }; 2775 2776 static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = { 2777 HISI_SAS_DEBUGFS_REG(PHY_CFG), 2778 HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE), 2779 HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE), 2780 HISI_SAS_DEBUGFS_REG(PHY_CTRL), 2781 HISI_SAS_DEBUGFS_REG(SL_CFG), 2782 HISI_SAS_DEBUGFS_REG(AIP_LIMIT), 2783 HISI_SAS_DEBUGFS_REG(SL_CONTROL), 2784 HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS), 2785 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0), 2786 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1), 2787 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2), 2788 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3), 2789 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4), 2790 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5), 2791 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6), 2792 HISI_SAS_DEBUGFS_REG(TXID_AUTO), 2793 HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0), 2794 HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H), 2795 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER), 2796 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE), 2797 HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER), 2798 HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG), 2799 HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG), 2800 HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG), 2801 HISI_SAS_DEBUGFS_REG(CHL_INT0), 2802 HISI_SAS_DEBUGFS_REG(CHL_INT1), 2803 HISI_SAS_DEBUGFS_REG(CHL_INT2), 2804 HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK), 2805 HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK), 2806 HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK), 2807 HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME), 2808 HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN), 2809 HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER), 2810 HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK), 2811 HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK), 2812 HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK), 2813 HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK), 2814 HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK), 2815 HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK), 2816 HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS), 2817 HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS), 2818 HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME), 2819 HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST), 2820 HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB), 2821 HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW), 2822 HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR), 2823 HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR), 2824 {} 2825 }; 2826 2827 static const struct hisi_sas_debugfs_reg debugfs_port_reg = { 2828 .lu = debugfs_port_reg_lu, 2829 .count = 0x100, 2830 .base_off = PORT_BASE, 2831 }; 2832 2833 static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = { 2834 HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE), 2835 HISI_SAS_DEBUGFS_REG(PHY_CONTEXT), 2836 HISI_SAS_DEBUGFS_REG(PHY_STATE), 2837 HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA), 2838 HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE), 2839 HISI_SAS_DEBUGFS_REG(ITCT_CLR), 2840 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO), 2841 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI), 2842 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO), 2843 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI), 2844 HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG), 2845 HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL), 2846 HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL), 2847 HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME), 2848 HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE), 2849 HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME), 2850 HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME), 2851 HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME), 2852 HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME), 2853 HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME), 2854 HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN), 2855 HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME), 2856 HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2), 2857 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT), 2858 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE), 2859 HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS), 2860 HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS), 2861 HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO), 2862 HISI_SAS_DEBUGFS_REG(INT_COAL_EN), 2863 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME), 2864 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT), 2865 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME), 2866 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT), 2867 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC), 2868 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK), 2869 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1), 2870 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2), 2871 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3), 2872 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1), 2873 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2), 2874 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3), 2875 HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK), 2876 HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK), 2877 HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK), 2878 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR), 2879 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK), 2880 HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN), 2881 HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT), 2882 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH), 2883 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR), 2884 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR), 2885 HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG), 2886 HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK), 2887 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH), 2888 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR), 2889 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR), 2890 HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG), 2891 HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG), 2892 HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX), 2893 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0), 2894 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1), 2895 HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1), 2896 HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD), 2897 {} 2898 }; 2899 2900 static const struct hisi_sas_debugfs_reg debugfs_global_reg = { 2901 .lu = debugfs_global_reg_lu, 2902 .count = 0x800, 2903 }; 2904 2905 static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = { 2906 HISI_SAS_DEBUGFS_REG(AM_CFG_MAX_TRANS), 2907 HISI_SAS_DEBUGFS_REG(AM_CFG_SINGLE_PORT_MAX_TRANS), 2908 HISI_SAS_DEBUGFS_REG(AXI_CFG), 2909 HISI_SAS_DEBUGFS_REG(AM_ROB_ECC_ERR_ADDR), 2910 {} 2911 }; 2912 2913 static const struct hisi_sas_debugfs_reg debugfs_axi_reg = { 2914 .lu = debugfs_axi_reg_lu, 2915 .count = 0x61, 2916 .base_off = AXI_MASTER_CFG_BASE, 2917 }; 2918 2919 static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = { 2920 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0), 2921 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1), 2922 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK), 2923 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK), 2924 HISI_SAS_DEBUGFS_REG(CFG_SAS_RAS_INTR_MASK), 2925 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2), 2926 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2_MASK), 2927 {} 2928 }; 2929 2930 static const struct hisi_sas_debugfs_reg debugfs_ras_reg = { 2931 .lu = debugfs_ras_reg_lu, 2932 .count = 0x10, 2933 .base_off = RAS_BASE, 2934 }; 2935 2936 static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) 2937 { 2938 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 2939 2940 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 2941 2942 wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000); 2943 2944 hisi_sas_sync_irqs(hisi_hba); 2945 } 2946 2947 static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) 2948 { 2949 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 2950 (u32)((1ULL << hisi_hba->queue_count) - 1)); 2951 2952 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 2953 } 2954 2955 static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba, 2956 enum hisi_sas_debugfs_cache_type type, 2957 u32 *cache) 2958 { 2959 u32 cache_dw_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 2960 HISI_SAS_IOST_ITCT_CACHE_NUM; 2961 struct device *dev = hisi_hba->dev; 2962 u32 *buf = cache; 2963 u32 i, val; 2964 2965 hisi_sas_write32(hisi_hba, TAB_RD_TYPE, type); 2966 2967 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_DW_SZ; i++) { 2968 val = hisi_sas_read32(hisi_hba, TAB_DFX); 2969 if (val == 0xffffffff) 2970 break; 2971 } 2972 2973 if (val != 0xffffffff) { 2974 dev_err(dev, "Issue occurred in reading IOST/ITCT cache!\n"); 2975 return; 2976 } 2977 2978 memset(buf, 0, cache_dw_size * 4); 2979 buf[0] = val; 2980 2981 for (i = 1; i < cache_dw_size; i++) 2982 buf[i] = hisi_sas_read32(hisi_hba, TAB_DFX); 2983 } 2984 2985 static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba) 2986 { 2987 u32 reg_val; 2988 int phy_no = hisi_hba->debugfs_bist_phy_no; 2989 int i; 2990 2991 /* disable PHY */ 2992 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 2993 2994 /* update FFE */ 2995 for (i = 0; i < FFE_CFG_MAX; i++) 2996 hisi_sas_phy_write32(hisi_hba, phy_no, TXDEEMPH_G1 + (i * 0x4), 2997 hisi_hba->debugfs_bist_ffe[phy_no][i]); 2998 2999 /* disable ALOS */ 3000 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG); 3001 reg_val |= CFG_ALOS_CHK_DISABLE_MSK; 3002 hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val); 3003 } 3004 3005 static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba) 3006 { 3007 u32 reg_val; 3008 int phy_no = hisi_hba->debugfs_bist_phy_no; 3009 3010 /* disable loopback */ 3011 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL); 3012 reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK | 3013 CFG_BIST_TEST_MSK); 3014 hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, reg_val); 3015 3016 /* enable ALOS */ 3017 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG); 3018 reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK; 3019 hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val); 3020 3021 /* restore the linkrate */ 3022 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); 3023 /* init OOB link rate as 1.5 Gbits */ 3024 reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; 3025 reg_val |= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF); 3026 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val); 3027 3028 /* enable PHY */ 3029 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 3030 } 3031 3032 #define SAS_PHY_BIST_CODE_INIT 0x1 3033 #define SAS_PHY_BIST_CODE1_INIT 0X80 3034 static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) 3035 { 3036 u32 reg_val, mode_tmp; 3037 u32 linkrate = hisi_hba->debugfs_bist_linkrate; 3038 u32 phy_no = hisi_hba->debugfs_bist_phy_no; 3039 u32 *ffe = hisi_hba->debugfs_bist_ffe[phy_no]; 3040 u32 code_mode = hisi_hba->debugfs_bist_code_mode; 3041 u32 path_mode = hisi_hba->debugfs_bist_mode; 3042 u32 *fix_code = &hisi_hba->debugfs_bist_fixed_code[0]; 3043 struct device *dev = hisi_hba->dev; 3044 3045 dev_info(dev, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n", 3046 phy_no, linkrate, code_mode, path_mode, 3047 ffe[FFE_SAS_1_5_GBPS], ffe[FFE_SAS_3_0_GBPS], 3048 ffe[FFE_SAS_6_0_GBPS], ffe[FFE_SAS_12_0_GBPS], 3049 ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS], 3050 ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE], 3051 fix_code[FIXED_CODE_1]); 3052 mode_tmp = path_mode ? 2 : 1; 3053 if (enable) { 3054 /* some preparations before bist test */ 3055 hisi_sas_bist_test_prep_v3_hw(hisi_hba); 3056 3057 /* set linkrate of bit test*/ 3058 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, 3059 PROG_PHY_LINK_RATE); 3060 reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; 3061 reg_val |= (linkrate << CFG_PROG_OOB_PHY_LINK_RATE_OFF); 3062 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 3063 reg_val); 3064 3065 /* set code mode of bit test */ 3066 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, 3067 SAS_PHY_BIST_CTRL); 3068 reg_val &= ~(CFG_BIST_MODE_SEL_MSK | CFG_LOOP_TEST_MODE_MSK | 3069 CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK | 3070 CFG_BIST_TEST_MSK); 3071 reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) | 3072 (mode_tmp << CFG_LOOP_TEST_MODE_OFF) | 3073 CFG_BIST_TEST_MSK); 3074 hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, 3075 reg_val); 3076 3077 /* set the bist init value */ 3078 if (code_mode == HISI_SAS_BIST_CODE_MODE_FIXED_DATA) { 3079 reg_val = hisi_hba->debugfs_bist_fixed_code[0]; 3080 hisi_sas_phy_write32(hisi_hba, phy_no, 3081 SAS_PHY_BIST_CODE, reg_val); 3082 3083 reg_val = hisi_hba->debugfs_bist_fixed_code[1]; 3084 hisi_sas_phy_write32(hisi_hba, phy_no, 3085 SAS_PHY_BIST_CODE1, reg_val); 3086 } else { 3087 hisi_sas_phy_write32(hisi_hba, phy_no, 3088 SAS_PHY_BIST_CODE, 3089 SAS_PHY_BIST_CODE_INIT); 3090 hisi_sas_phy_write32(hisi_hba, phy_no, 3091 SAS_PHY_BIST_CODE1, 3092 SAS_PHY_BIST_CODE1_INIT); 3093 } 3094 3095 mdelay(100); 3096 reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK); 3097 hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, 3098 reg_val); 3099 3100 /* clear error bit */ 3101 mdelay(100); 3102 hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT); 3103 } else { 3104 /* disable bist test and recover it */ 3105 hisi_hba->debugfs_bist_cnt += hisi_sas_phy_read32(hisi_hba, 3106 phy_no, SAS_BIST_ERR_CNT); 3107 hisi_sas_bist_test_restore_v3_hw(hisi_hba); 3108 } 3109 3110 return 0; 3111 } 3112 3113 static int hisi_sas_map_queues(struct Scsi_Host *shost) 3114 { 3115 struct hisi_hba *hisi_hba = shost_priv(shost); 3116 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 3117 3118 return blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, 3119 BASE_VECTORS_V3_HW); 3120 } 3121 3122 static struct scsi_host_template sht_v3_hw = { 3123 .name = DRV_NAME, 3124 .proc_name = DRV_NAME, 3125 .module = THIS_MODULE, 3126 .queuecommand = sas_queuecommand, 3127 .dma_need_drain = ata_scsi_dma_need_drain, 3128 .target_alloc = sas_target_alloc, 3129 .slave_configure = slave_configure_v3_hw, 3130 .scan_finished = hisi_sas_scan_finished, 3131 .scan_start = hisi_sas_scan_start, 3132 .map_queues = hisi_sas_map_queues, 3133 .change_queue_depth = sas_change_queue_depth, 3134 .bios_param = sas_bios_param, 3135 .this_id = -1, 3136 .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, 3137 .sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT, 3138 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 3139 .eh_device_reset_handler = sas_eh_device_reset_handler, 3140 .eh_target_reset_handler = sas_eh_target_reset_handler, 3141 .target_destroy = sas_target_destroy, 3142 .ioctl = sas_ioctl, 3143 #ifdef CONFIG_COMPAT 3144 .compat_ioctl = sas_ioctl, 3145 #endif 3146 .shost_attrs = host_attrs_v3_hw, 3147 .tag_alloc_policy = BLK_TAG_ALLOC_RR, 3148 .host_reset = hisi_sas_host_reset, 3149 .host_tagset = 1, 3150 }; 3151 3152 static const struct hisi_sas_hw hisi_sas_v3_hw = { 3153 .setup_itct = setup_itct_v3_hw, 3154 .get_wideport_bitmap = get_wideport_bitmap_v3_hw, 3155 .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), 3156 .clear_itct = clear_itct_v3_hw, 3157 .sl_notify_ssp = sl_notify_ssp_v3_hw, 3158 .prep_ssp = prep_ssp_v3_hw, 3159 .prep_smp = prep_smp_v3_hw, 3160 .prep_stp = prep_ata_v3_hw, 3161 .prep_abort = prep_abort_v3_hw, 3162 .start_delivery = start_delivery_v3_hw, 3163 .phys_init = phys_init_v3_hw, 3164 .phy_start = start_phy_v3_hw, 3165 .phy_disable = disable_phy_v3_hw, 3166 .phy_hard_reset = phy_hard_reset_v3_hw, 3167 .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw, 3168 .phy_set_linkrate = phy_set_linkrate_v3_hw, 3169 .dereg_device = dereg_device_v3_hw, 3170 .soft_reset = soft_reset_v3_hw, 3171 .get_phys_state = get_phys_state_v3_hw, 3172 .get_events = phy_get_events_v3_hw, 3173 .write_gpio = write_gpio_v3_hw, 3174 .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw, 3175 }; 3176 3177 static struct Scsi_Host * 3178 hisi_sas_shost_alloc_pci(struct pci_dev *pdev) 3179 { 3180 struct Scsi_Host *shost; 3181 struct hisi_hba *hisi_hba; 3182 struct device *dev = &pdev->dev; 3183 3184 shost = scsi_host_alloc(&sht_v3_hw, sizeof(*hisi_hba)); 3185 if (!shost) { 3186 dev_err(dev, "shost alloc failed\n"); 3187 return NULL; 3188 } 3189 hisi_hba = shost_priv(shost); 3190 3191 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 3192 INIT_WORK(&hisi_hba->debugfs_work, debugfs_work_handler_v3_hw); 3193 hisi_hba->hw = &hisi_sas_v3_hw; 3194 hisi_hba->pci_dev = pdev; 3195 hisi_hba->dev = dev; 3196 hisi_hba->shost = shost; 3197 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 3198 3199 if (prot_mask & ~HISI_SAS_PROT_MASK) 3200 dev_err(dev, "unsupported protection mask 0x%x, using default (0x0)\n", 3201 prot_mask); 3202 else 3203 hisi_hba->prot_mask = prot_mask; 3204 3205 if (hisi_sas_get_fw_info(hisi_hba) < 0) 3206 goto err_out; 3207 3208 if (hisi_sas_alloc(hisi_hba)) { 3209 hisi_sas_free(hisi_hba); 3210 goto err_out; 3211 } 3212 3213 return shost; 3214 err_out: 3215 scsi_host_put(shost); 3216 dev_err(dev, "shost alloc failed\n"); 3217 return NULL; 3218 } 3219 3220 static void debugfs_snapshot_cq_reg_v3_hw(struct hisi_hba *hisi_hba) 3221 { 3222 int queue_entry_size = hisi_hba->hw->complete_hdr_size; 3223 int dump_index = hisi_hba->debugfs_dump_index; 3224 int i; 3225 3226 for (i = 0; i < hisi_hba->queue_count; i++) 3227 memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr, 3228 hisi_hba->complete_hdr[i], 3229 HISI_SAS_QUEUE_SLOTS * queue_entry_size); 3230 } 3231 3232 static void debugfs_snapshot_dq_reg_v3_hw(struct hisi_hba *hisi_hba) 3233 { 3234 int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr); 3235 int dump_index = hisi_hba->debugfs_dump_index; 3236 int i; 3237 3238 for (i = 0; i < hisi_hba->queue_count; i++) { 3239 struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr; 3240 int j; 3241 3242 debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr; 3243 cmd_hdr = hisi_hba->cmd_hdr[i]; 3244 3245 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 3246 memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j], 3247 queue_entry_size); 3248 } 3249 } 3250 3251 static void debugfs_snapshot_port_reg_v3_hw(struct hisi_hba *hisi_hba) 3252 { 3253 int dump_index = hisi_hba->debugfs_dump_index; 3254 const struct hisi_sas_debugfs_reg *port = &debugfs_port_reg; 3255 int i, phy_cnt; 3256 u32 offset; 3257 u32 *databuf; 3258 3259 for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { 3260 databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data; 3261 for (i = 0; i < port->count; i++, databuf++) { 3262 offset = port->base_off + 4 * i; 3263 *databuf = hisi_sas_phy_read32(hisi_hba, phy_cnt, 3264 offset); 3265 } 3266 } 3267 } 3268 3269 static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba) 3270 { 3271 int dump_index = hisi_hba->debugfs_dump_index; 3272 u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data; 3273 int i; 3274 3275 for (i = 0; i < debugfs_axi_reg.count; i++, databuf++) 3276 *databuf = hisi_sas_read32(hisi_hba, 4 * i); 3277 } 3278 3279 static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba) 3280 { 3281 int dump_index = hisi_hba->debugfs_dump_index; 3282 u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data; 3283 const struct hisi_sas_debugfs_reg *axi = &debugfs_axi_reg; 3284 int i; 3285 3286 for (i = 0; i < axi->count; i++, databuf++) 3287 *databuf = hisi_sas_read32(hisi_hba, 4 * i + axi->base_off); 3288 } 3289 3290 static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba) 3291 { 3292 int dump_index = hisi_hba->debugfs_dump_index; 3293 u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data; 3294 const struct hisi_sas_debugfs_reg *ras = &debugfs_ras_reg; 3295 int i; 3296 3297 for (i = 0; i < ras->count; i++, databuf++) 3298 *databuf = hisi_sas_read32(hisi_hba, 4 * i + ras->base_off); 3299 } 3300 3301 static void debugfs_snapshot_itct_reg_v3_hw(struct hisi_hba *hisi_hba) 3302 { 3303 int dump_index = hisi_hba->debugfs_dump_index; 3304 void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache; 3305 void *databuf = hisi_hba->debugfs_itct[dump_index].itct; 3306 struct hisi_sas_itct *itct; 3307 int i; 3308 3309 read_iost_itct_cache_v3_hw(hisi_hba, HISI_SAS_ITCT_CACHE, cachebuf); 3310 3311 itct = hisi_hba->itct; 3312 3313 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { 3314 memcpy(databuf, itct, sizeof(struct hisi_sas_itct)); 3315 databuf += sizeof(struct hisi_sas_itct); 3316 } 3317 } 3318 3319 static void debugfs_snapshot_iost_reg_v3_hw(struct hisi_hba *hisi_hba) 3320 { 3321 int dump_index = hisi_hba->debugfs_dump_index; 3322 int max_command_entries = HISI_SAS_MAX_COMMANDS; 3323 void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache; 3324 void *databuf = hisi_hba->debugfs_iost[dump_index].iost; 3325 struct hisi_sas_iost *iost; 3326 int i; 3327 3328 read_iost_itct_cache_v3_hw(hisi_hba, HISI_SAS_IOST_CACHE, cachebuf); 3329 3330 iost = hisi_hba->iost; 3331 3332 for (i = 0; i < max_command_entries; i++, iost++) { 3333 memcpy(databuf, iost, sizeof(struct hisi_sas_iost)); 3334 databuf += sizeof(struct hisi_sas_iost); 3335 } 3336 } 3337 3338 static const char * 3339 debugfs_to_reg_name_v3_hw(int off, int base_off, 3340 const struct hisi_sas_debugfs_reg_lu *lu) 3341 { 3342 for (; lu->name; lu++) { 3343 if (off == lu->off - base_off) 3344 return lu->name; 3345 } 3346 3347 return NULL; 3348 } 3349 3350 static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s, 3351 const struct hisi_sas_debugfs_reg *reg) 3352 { 3353 int i; 3354 3355 for (i = 0; i < reg->count; i++) { 3356 int off = i * 4; 3357 const char *name; 3358 3359 name = debugfs_to_reg_name_v3_hw(off, reg->base_off, 3360 reg->lu); 3361 3362 if (name) 3363 seq_printf(s, "0x%08x 0x%08x %s\n", off, 3364 regs_val[i], name); 3365 else 3366 seq_printf(s, "0x%08x 0x%08x\n", off, 3367 regs_val[i]); 3368 } 3369 } 3370 3371 static int debugfs_global_v3_hw_show(struct seq_file *s, void *p) 3372 { 3373 struct hisi_sas_debugfs_regs *global = s->private; 3374 3375 debugfs_print_reg_v3_hw(global->data, s, 3376 &debugfs_global_reg); 3377 3378 return 0; 3379 } 3380 DEFINE_SHOW_ATTRIBUTE(debugfs_global_v3_hw); 3381 3382 static int debugfs_axi_v3_hw_show(struct seq_file *s, void *p) 3383 { 3384 struct hisi_sas_debugfs_regs *axi = s->private; 3385 3386 debugfs_print_reg_v3_hw(axi->data, s, 3387 &debugfs_axi_reg); 3388 3389 return 0; 3390 } 3391 DEFINE_SHOW_ATTRIBUTE(debugfs_axi_v3_hw); 3392 3393 static int debugfs_ras_v3_hw_show(struct seq_file *s, void *p) 3394 { 3395 struct hisi_sas_debugfs_regs *ras = s->private; 3396 3397 debugfs_print_reg_v3_hw(ras->data, s, 3398 &debugfs_ras_reg); 3399 3400 return 0; 3401 } 3402 DEFINE_SHOW_ATTRIBUTE(debugfs_ras_v3_hw); 3403 3404 static int debugfs_port_v3_hw_show(struct seq_file *s, void *p) 3405 { 3406 struct hisi_sas_debugfs_port *port = s->private; 3407 const struct hisi_sas_debugfs_reg *reg_port = &debugfs_port_reg; 3408 3409 debugfs_print_reg_v3_hw(port->data, s, reg_port); 3410 3411 return 0; 3412 } 3413 DEFINE_SHOW_ATTRIBUTE(debugfs_port_v3_hw); 3414 3415 static void debugfs_show_row_64_v3_hw(struct seq_file *s, int index, 3416 int sz, __le64 *ptr) 3417 { 3418 int i; 3419 3420 /* completion header size not fixed per HW version */ 3421 seq_printf(s, "index %04d:\n\t", index); 3422 for (i = 1; i <= sz / 8; i++, ptr++) { 3423 seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr)); 3424 if (!(i % 2)) 3425 seq_puts(s, "\n\t"); 3426 } 3427 3428 seq_puts(s, "\n"); 3429 } 3430 3431 static void debugfs_show_row_32_v3_hw(struct seq_file *s, int index, 3432 int sz, __le32 *ptr) 3433 { 3434 int i; 3435 3436 /* completion header size not fixed per HW version */ 3437 seq_printf(s, "index %04d:\n\t", index); 3438 for (i = 1; i <= sz / 4; i++, ptr++) { 3439 seq_printf(s, " 0x%08x", le32_to_cpu(*ptr)); 3440 if (!(i % 4)) 3441 seq_puts(s, "\n\t"); 3442 } 3443 seq_puts(s, "\n"); 3444 } 3445 3446 static void debugfs_cq_show_slot_v3_hw(struct seq_file *s, int slot, 3447 struct hisi_sas_debugfs_cq *debugfs_cq) 3448 { 3449 struct hisi_sas_cq *cq = debugfs_cq->cq; 3450 struct hisi_hba *hisi_hba = cq->hisi_hba; 3451 __le32 *complete_hdr = debugfs_cq->complete_hdr + 3452 (hisi_hba->hw->complete_hdr_size * slot); 3453 3454 debugfs_show_row_32_v3_hw(s, slot, 3455 hisi_hba->hw->complete_hdr_size, 3456 complete_hdr); 3457 } 3458 3459 static int debugfs_cq_v3_hw_show(struct seq_file *s, void *p) 3460 { 3461 struct hisi_sas_debugfs_cq *debugfs_cq = s->private; 3462 int slot; 3463 3464 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) 3465 debugfs_cq_show_slot_v3_hw(s, slot, debugfs_cq); 3466 3467 return 0; 3468 } 3469 DEFINE_SHOW_ATTRIBUTE(debugfs_cq_v3_hw); 3470 3471 static void debugfs_dq_show_slot_v3_hw(struct seq_file *s, int slot, 3472 void *dq_ptr) 3473 { 3474 struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr; 3475 void *cmd_queue = debugfs_dq->hdr; 3476 __le32 *cmd_hdr = cmd_queue + 3477 sizeof(struct hisi_sas_cmd_hdr) * slot; 3478 3479 debugfs_show_row_32_v3_hw(s, slot, sizeof(struct hisi_sas_cmd_hdr), 3480 cmd_hdr); 3481 } 3482 3483 static int debugfs_dq_v3_hw_show(struct seq_file *s, void *p) 3484 { 3485 int slot; 3486 3487 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) 3488 debugfs_dq_show_slot_v3_hw(s, slot, s->private); 3489 3490 return 0; 3491 } 3492 DEFINE_SHOW_ATTRIBUTE(debugfs_dq_v3_hw); 3493 3494 static int debugfs_iost_v3_hw_show(struct seq_file *s, void *p) 3495 { 3496 struct hisi_sas_debugfs_iost *debugfs_iost = s->private; 3497 struct hisi_sas_iost *iost = debugfs_iost->iost; 3498 int i, max_command_entries = HISI_SAS_MAX_COMMANDS; 3499 3500 for (i = 0; i < max_command_entries; i++, iost++) { 3501 __le64 *data = &iost->qw0; 3502 3503 debugfs_show_row_64_v3_hw(s, i, sizeof(*iost), data); 3504 } 3505 3506 return 0; 3507 } 3508 DEFINE_SHOW_ATTRIBUTE(debugfs_iost_v3_hw); 3509 3510 static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p) 3511 { 3512 struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private; 3513 struct hisi_sas_iost_itct_cache *iost_cache = 3514 debugfs_iost_cache->cache; 3515 u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; 3516 int i, tab_idx; 3517 __le64 *iost; 3518 3519 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) { 3520 /* 3521 * Data struct of IOST cache: 3522 * Data[1]: BIT0~15: Table index 3523 * Bit16: Valid mask 3524 * Data[2]~[9]: IOST table 3525 */ 3526 tab_idx = (iost_cache->data[1] & 0xffff); 3527 iost = (__le64 *)iost_cache; 3528 3529 debugfs_show_row_64_v3_hw(s, tab_idx, cache_size, iost); 3530 } 3531 3532 return 0; 3533 } 3534 DEFINE_SHOW_ATTRIBUTE(debugfs_iost_cache_v3_hw); 3535 3536 static int debugfs_itct_v3_hw_show(struct seq_file *s, void *p) 3537 { 3538 int i; 3539 struct hisi_sas_debugfs_itct *debugfs_itct = s->private; 3540 struct hisi_sas_itct *itct = debugfs_itct->itct; 3541 3542 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { 3543 __le64 *data = &itct->qw0; 3544 3545 debugfs_show_row_64_v3_hw(s, i, sizeof(*itct), data); 3546 } 3547 3548 return 0; 3549 } 3550 DEFINE_SHOW_ATTRIBUTE(debugfs_itct_v3_hw); 3551 3552 static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p) 3553 { 3554 struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private; 3555 struct hisi_sas_iost_itct_cache *itct_cache = 3556 debugfs_itct_cache->cache; 3557 u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; 3558 int i, tab_idx; 3559 __le64 *itct; 3560 3561 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) { 3562 /* 3563 * Data struct of ITCT cache: 3564 * Data[1]: BIT0~15: Table index 3565 * Bit16: Valid mask 3566 * Data[2]~[9]: ITCT table 3567 */ 3568 tab_idx = itct_cache->data[1] & 0xffff; 3569 itct = (__le64 *)itct_cache; 3570 3571 debugfs_show_row_64_v3_hw(s, tab_idx, cache_size, itct); 3572 } 3573 3574 return 0; 3575 } 3576 DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw); 3577 3578 static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) 3579 { 3580 u64 *debugfs_timestamp; 3581 int dump_index = hisi_hba->debugfs_dump_index; 3582 struct dentry *dump_dentry; 3583 struct dentry *dentry; 3584 char name[256]; 3585 int p; 3586 int c; 3587 int d; 3588 3589 snprintf(name, 256, "%d", dump_index); 3590 3591 dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry); 3592 3593 debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index]; 3594 3595 debugfs_create_u64("timestamp", 0400, dump_dentry, 3596 debugfs_timestamp); 3597 3598 debugfs_create_file("global", 0400, dump_dentry, 3599 &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL], 3600 &debugfs_global_v3_hw_fops); 3601 3602 /* Create port dir and files */ 3603 dentry = debugfs_create_dir("port", dump_dentry); 3604 for (p = 0; p < hisi_hba->n_phy; p++) { 3605 snprintf(name, 256, "%d", p); 3606 3607 debugfs_create_file(name, 0400, dentry, 3608 &hisi_hba->debugfs_port_reg[dump_index][p], 3609 &debugfs_port_v3_hw_fops); 3610 } 3611 3612 /* Create CQ dir and files */ 3613 dentry = debugfs_create_dir("cq", dump_dentry); 3614 for (c = 0; c < hisi_hba->queue_count; c++) { 3615 snprintf(name, 256, "%d", c); 3616 3617 debugfs_create_file(name, 0400, dentry, 3618 &hisi_hba->debugfs_cq[dump_index][c], 3619 &debugfs_cq_v3_hw_fops); 3620 } 3621 3622 /* Create DQ dir and files */ 3623 dentry = debugfs_create_dir("dq", dump_dentry); 3624 for (d = 0; d < hisi_hba->queue_count; d++) { 3625 snprintf(name, 256, "%d", d); 3626 3627 debugfs_create_file(name, 0400, dentry, 3628 &hisi_hba->debugfs_dq[dump_index][d], 3629 &debugfs_dq_v3_hw_fops); 3630 } 3631 3632 debugfs_create_file("iost", 0400, dump_dentry, 3633 &hisi_hba->debugfs_iost[dump_index], 3634 &debugfs_iost_v3_hw_fops); 3635 3636 debugfs_create_file("iost_cache", 0400, dump_dentry, 3637 &hisi_hba->debugfs_iost_cache[dump_index], 3638 &debugfs_iost_cache_v3_hw_fops); 3639 3640 debugfs_create_file("itct", 0400, dump_dentry, 3641 &hisi_hba->debugfs_itct[dump_index], 3642 &debugfs_itct_v3_hw_fops); 3643 3644 debugfs_create_file("itct_cache", 0400, dump_dentry, 3645 &hisi_hba->debugfs_itct_cache[dump_index], 3646 &debugfs_itct_cache_v3_hw_fops); 3647 3648 debugfs_create_file("axi", 0400, dump_dentry, 3649 &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI], 3650 &debugfs_axi_v3_hw_fops); 3651 3652 debugfs_create_file("ras", 0400, dump_dentry, 3653 &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS], 3654 &debugfs_ras_v3_hw_fops); 3655 } 3656 3657 static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba) 3658 { 3659 debugfs_snapshot_prepare_v3_hw(hisi_hba); 3660 3661 debugfs_snapshot_global_reg_v3_hw(hisi_hba); 3662 debugfs_snapshot_port_reg_v3_hw(hisi_hba); 3663 debugfs_snapshot_axi_reg_v3_hw(hisi_hba); 3664 debugfs_snapshot_ras_reg_v3_hw(hisi_hba); 3665 debugfs_snapshot_cq_reg_v3_hw(hisi_hba); 3666 debugfs_snapshot_dq_reg_v3_hw(hisi_hba); 3667 debugfs_snapshot_itct_reg_v3_hw(hisi_hba); 3668 debugfs_snapshot_iost_reg_v3_hw(hisi_hba); 3669 3670 debugfs_create_files_v3_hw(hisi_hba); 3671 3672 debugfs_snapshot_restore_v3_hw(hisi_hba); 3673 } 3674 3675 static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file, 3676 const char __user *user_buf, 3677 size_t count, loff_t *ppos) 3678 { 3679 struct hisi_hba *hisi_hba = file->f_inode->i_private; 3680 char buf[8]; 3681 3682 if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count) 3683 return -EFAULT; 3684 3685 if (count > 8) 3686 return -EFAULT; 3687 3688 if (copy_from_user(buf, user_buf, count)) 3689 return -EFAULT; 3690 3691 if (buf[0] != '1') 3692 return -EFAULT; 3693 3694 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 3695 3696 return count; 3697 } 3698 3699 static const struct file_operations debugfs_trigger_dump_v3_hw_fops = { 3700 .write = &debugfs_trigger_dump_v3_hw_write, 3701 .owner = THIS_MODULE, 3702 }; 3703 3704 enum { 3705 HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0, 3706 HISI_SAS_BIST_LOOPBACK_MODE_SERDES, 3707 HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, 3708 }; 3709 3710 static const struct { 3711 int value; 3712 char *name; 3713 } debugfs_loop_linkrate_v3_hw[] = { 3714 { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" }, 3715 { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" }, 3716 { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, 3717 { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" }, 3718 }; 3719 3720 static int debugfs_bist_linkrate_v3_hw_show(struct seq_file *s, void *p) 3721 { 3722 struct hisi_hba *hisi_hba = s->private; 3723 int i; 3724 3725 for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) { 3726 int match = (hisi_hba->debugfs_bist_linkrate == 3727 debugfs_loop_linkrate_v3_hw[i].value); 3728 3729 seq_printf(s, "%s%s%s ", match ? "[" : "", 3730 debugfs_loop_linkrate_v3_hw[i].name, 3731 match ? "]" : ""); 3732 } 3733 seq_puts(s, "\n"); 3734 3735 return 0; 3736 } 3737 3738 static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp, 3739 const char __user *buf, 3740 size_t count, loff_t *ppos) 3741 { 3742 struct seq_file *m = filp->private_data; 3743 struct hisi_hba *hisi_hba = m->private; 3744 char kbuf[16] = {}, *pkbuf; 3745 bool found = false; 3746 int i; 3747 3748 if (hisi_hba->debugfs_bist_enable) 3749 return -EPERM; 3750 3751 if (count >= sizeof(kbuf)) 3752 return -EOVERFLOW; 3753 3754 if (copy_from_user(kbuf, buf, count)) 3755 return -EINVAL; 3756 3757 pkbuf = strstrip(kbuf); 3758 3759 for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) { 3760 if (!strncmp(debugfs_loop_linkrate_v3_hw[i].name, 3761 pkbuf, 16)) { 3762 hisi_hba->debugfs_bist_linkrate = 3763 debugfs_loop_linkrate_v3_hw[i].value; 3764 found = true; 3765 break; 3766 } 3767 } 3768 3769 if (!found) 3770 return -EINVAL; 3771 3772 return count; 3773 } 3774 3775 static int debugfs_bist_linkrate_v3_hw_open(struct inode *inode, 3776 struct file *filp) 3777 { 3778 return single_open(filp, debugfs_bist_linkrate_v3_hw_show, 3779 inode->i_private); 3780 } 3781 3782 static const struct file_operations debugfs_bist_linkrate_v3_hw_fops = { 3783 .open = debugfs_bist_linkrate_v3_hw_open, 3784 .read = seq_read, 3785 .write = debugfs_bist_linkrate_v3_hw_write, 3786 .llseek = seq_lseek, 3787 .release = single_release, 3788 .owner = THIS_MODULE, 3789 }; 3790 3791 static const struct { 3792 int value; 3793 char *name; 3794 } debugfs_loop_code_mode_v3_hw[] = { 3795 { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" }, 3796 { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" }, 3797 { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" }, 3798 { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" }, 3799 { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" }, 3800 { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" }, 3801 { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" }, 3802 { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" }, 3803 { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" }, 3804 { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" }, 3805 { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" }, 3806 { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" }, 3807 }; 3808 3809 static int debugfs_bist_code_mode_v3_hw_show(struct seq_file *s, void *p) 3810 { 3811 struct hisi_hba *hisi_hba = s->private; 3812 int i; 3813 3814 for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) { 3815 int match = (hisi_hba->debugfs_bist_code_mode == 3816 debugfs_loop_code_mode_v3_hw[i].value); 3817 3818 seq_printf(s, "%s%s%s ", match ? "[" : "", 3819 debugfs_loop_code_mode_v3_hw[i].name, 3820 match ? "]" : ""); 3821 } 3822 seq_puts(s, "\n"); 3823 3824 return 0; 3825 } 3826 3827 static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp, 3828 const char __user *buf, 3829 size_t count, 3830 loff_t *ppos) 3831 { 3832 struct seq_file *m = filp->private_data; 3833 struct hisi_hba *hisi_hba = m->private; 3834 char kbuf[16] = {}, *pkbuf; 3835 bool found = false; 3836 int i; 3837 3838 if (hisi_hba->debugfs_bist_enable) 3839 return -EPERM; 3840 3841 if (count >= sizeof(kbuf)) 3842 return -EINVAL; 3843 3844 if (copy_from_user(kbuf, buf, count)) 3845 return -EOVERFLOW; 3846 3847 pkbuf = strstrip(kbuf); 3848 3849 for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) { 3850 if (!strncmp(debugfs_loop_code_mode_v3_hw[i].name, 3851 pkbuf, 16)) { 3852 hisi_hba->debugfs_bist_code_mode = 3853 debugfs_loop_code_mode_v3_hw[i].value; 3854 found = true; 3855 break; 3856 } 3857 } 3858 3859 if (!found) 3860 return -EINVAL; 3861 3862 return count; 3863 } 3864 3865 static int debugfs_bist_code_mode_v3_hw_open(struct inode *inode, 3866 struct file *filp) 3867 { 3868 return single_open(filp, debugfs_bist_code_mode_v3_hw_show, 3869 inode->i_private); 3870 } 3871 3872 static const struct file_operations debugfs_bist_code_mode_v3_hw_fops = { 3873 .open = debugfs_bist_code_mode_v3_hw_open, 3874 .read = seq_read, 3875 .write = debugfs_bist_code_mode_v3_hw_write, 3876 .llseek = seq_lseek, 3877 .release = single_release, 3878 .owner = THIS_MODULE, 3879 }; 3880 3881 static ssize_t debugfs_bist_phy_v3_hw_write(struct file *filp, 3882 const char __user *buf, 3883 size_t count, loff_t *ppos) 3884 { 3885 struct seq_file *m = filp->private_data; 3886 struct hisi_hba *hisi_hba = m->private; 3887 unsigned int phy_no; 3888 int val; 3889 3890 if (hisi_hba->debugfs_bist_enable) 3891 return -EPERM; 3892 3893 val = kstrtouint_from_user(buf, count, 0, &phy_no); 3894 if (val) 3895 return val; 3896 3897 if (phy_no >= hisi_hba->n_phy) 3898 return -EINVAL; 3899 3900 hisi_hba->debugfs_bist_phy_no = phy_no; 3901 3902 return count; 3903 } 3904 3905 static int debugfs_bist_phy_v3_hw_show(struct seq_file *s, void *p) 3906 { 3907 struct hisi_hba *hisi_hba = s->private; 3908 3909 seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no); 3910 3911 return 0; 3912 } 3913 3914 static int debugfs_bist_phy_v3_hw_open(struct inode *inode, 3915 struct file *filp) 3916 { 3917 return single_open(filp, debugfs_bist_phy_v3_hw_show, 3918 inode->i_private); 3919 } 3920 3921 static const struct file_operations debugfs_bist_phy_v3_hw_fops = { 3922 .open = debugfs_bist_phy_v3_hw_open, 3923 .read = seq_read, 3924 .write = debugfs_bist_phy_v3_hw_write, 3925 .llseek = seq_lseek, 3926 .release = single_release, 3927 .owner = THIS_MODULE, 3928 }; 3929 3930 static const struct { 3931 int value; 3932 char *name; 3933 } debugfs_loop_modes_v3_hw[] = { 3934 { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" }, 3935 { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" }, 3936 { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" }, 3937 }; 3938 3939 static int debugfs_bist_mode_v3_hw_show(struct seq_file *s, void *p) 3940 { 3941 struct hisi_hba *hisi_hba = s->private; 3942 int i; 3943 3944 for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) { 3945 int match = (hisi_hba->debugfs_bist_mode == 3946 debugfs_loop_modes_v3_hw[i].value); 3947 3948 seq_printf(s, "%s%s%s ", match ? "[" : "", 3949 debugfs_loop_modes_v3_hw[i].name, 3950 match ? "]" : ""); 3951 } 3952 seq_puts(s, "\n"); 3953 3954 return 0; 3955 } 3956 3957 static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp, 3958 const char __user *buf, 3959 size_t count, loff_t *ppos) 3960 { 3961 struct seq_file *m = filp->private_data; 3962 struct hisi_hba *hisi_hba = m->private; 3963 char kbuf[16] = {}, *pkbuf; 3964 bool found = false; 3965 int i; 3966 3967 if (hisi_hba->debugfs_bist_enable) 3968 return -EPERM; 3969 3970 if (count >= sizeof(kbuf)) 3971 return -EINVAL; 3972 3973 if (copy_from_user(kbuf, buf, count)) 3974 return -EOVERFLOW; 3975 3976 pkbuf = strstrip(kbuf); 3977 3978 for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) { 3979 if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf, 16)) { 3980 hisi_hba->debugfs_bist_mode = 3981 debugfs_loop_modes_v3_hw[i].value; 3982 found = true; 3983 break; 3984 } 3985 } 3986 3987 if (!found) 3988 return -EINVAL; 3989 3990 return count; 3991 } 3992 3993 static int debugfs_bist_mode_v3_hw_open(struct inode *inode, 3994 struct file *filp) 3995 { 3996 return single_open(filp, debugfs_bist_mode_v3_hw_show, 3997 inode->i_private); 3998 } 3999 4000 static const struct file_operations debugfs_bist_mode_v3_hw_fops = { 4001 .open = debugfs_bist_mode_v3_hw_open, 4002 .read = seq_read, 4003 .write = debugfs_bist_mode_v3_hw_write, 4004 .llseek = seq_lseek, 4005 .release = single_release, 4006 .owner = THIS_MODULE, 4007 }; 4008 4009 static ssize_t debugfs_bist_enable_v3_hw_write(struct file *filp, 4010 const char __user *buf, 4011 size_t count, loff_t *ppos) 4012 { 4013 struct seq_file *m = filp->private_data; 4014 struct hisi_hba *hisi_hba = m->private; 4015 unsigned int enable; 4016 int val; 4017 4018 val = kstrtouint_from_user(buf, count, 0, &enable); 4019 if (val) 4020 return val; 4021 4022 if (enable > 1) 4023 return -EINVAL; 4024 4025 if (enable == hisi_hba->debugfs_bist_enable) 4026 return count; 4027 4028 val = debugfs_set_bist_v3_hw(hisi_hba, enable); 4029 if (val < 0) 4030 return val; 4031 4032 hisi_hba->debugfs_bist_enable = enable; 4033 4034 return count; 4035 } 4036 4037 static int debugfs_bist_enable_v3_hw_show(struct seq_file *s, void *p) 4038 { 4039 struct hisi_hba *hisi_hba = s->private; 4040 4041 seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable); 4042 4043 return 0; 4044 } 4045 4046 static int debugfs_bist_enable_v3_hw_open(struct inode *inode, 4047 struct file *filp) 4048 { 4049 return single_open(filp, debugfs_bist_enable_v3_hw_show, 4050 inode->i_private); 4051 } 4052 4053 static const struct file_operations debugfs_bist_enable_v3_hw_fops = { 4054 .open = debugfs_bist_enable_v3_hw_open, 4055 .read = seq_read, 4056 .write = debugfs_bist_enable_v3_hw_write, 4057 .llseek = seq_lseek, 4058 .release = single_release, 4059 .owner = THIS_MODULE, 4060 }; 4061 4062 static const struct { 4063 char *name; 4064 } debugfs_ffe_name_v3_hw[FFE_CFG_MAX] = { 4065 { "SAS_1_5_GBPS" }, 4066 { "SAS_3_0_GBPS" }, 4067 { "SAS_6_0_GBPS" }, 4068 { "SAS_12_0_GBPS" }, 4069 { "FFE_RESV" }, 4070 { "SATA_1_5_GBPS" }, 4071 { "SATA_3_0_GBPS" }, 4072 { "SATA_6_0_GBPS" }, 4073 }; 4074 4075 static ssize_t debugfs_v3_hw_write(struct file *filp, 4076 const char __user *buf, 4077 size_t count, loff_t *ppos) 4078 { 4079 struct seq_file *m = filp->private_data; 4080 u32 *val = m->private; 4081 int res; 4082 4083 res = kstrtouint_from_user(buf, count, 0, val); 4084 if (res) 4085 return res; 4086 4087 return count; 4088 } 4089 4090 static int debugfs_v3_hw_show(struct seq_file *s, void *p) 4091 { 4092 u32 *val = s->private; 4093 4094 seq_printf(s, "0x%x\n", *val); 4095 4096 return 0; 4097 } 4098 4099 static int debugfs_v3_hw_open(struct inode *inode, struct file *filp) 4100 { 4101 return single_open(filp, debugfs_v3_hw_show, 4102 inode->i_private); 4103 } 4104 4105 static const struct file_operations debugfs_v3_hw_fops = { 4106 .open = debugfs_v3_hw_open, 4107 .read = seq_read, 4108 .write = debugfs_v3_hw_write, 4109 .llseek = seq_lseek, 4110 .release = single_release, 4111 .owner = THIS_MODULE, 4112 }; 4113 4114 static ssize_t debugfs_phy_down_cnt_v3_hw_write(struct file *filp, 4115 const char __user *buf, 4116 size_t count, loff_t *ppos) 4117 { 4118 struct seq_file *s = filp->private_data; 4119 struct hisi_sas_phy *phy = s->private; 4120 unsigned int set_val; 4121 int res; 4122 4123 res = kstrtouint_from_user(buf, count, 0, &set_val); 4124 if (res) 4125 return res; 4126 4127 if (set_val > 0) 4128 return -EINVAL; 4129 4130 atomic_set(&phy->down_cnt, 0); 4131 4132 return count; 4133 } 4134 4135 static int debugfs_phy_down_cnt_v3_hw_show(struct seq_file *s, void *p) 4136 { 4137 struct hisi_sas_phy *phy = s->private; 4138 4139 seq_printf(s, "%d\n", atomic_read(&phy->down_cnt)); 4140 4141 return 0; 4142 } 4143 4144 static int debugfs_phy_down_cnt_v3_hw_open(struct inode *inode, 4145 struct file *filp) 4146 { 4147 return single_open(filp, debugfs_phy_down_cnt_v3_hw_show, 4148 inode->i_private); 4149 } 4150 4151 static const struct file_operations debugfs_phy_down_cnt_v3_hw_fops = { 4152 .open = debugfs_phy_down_cnt_v3_hw_open, 4153 .read = seq_read, 4154 .write = debugfs_phy_down_cnt_v3_hw_write, 4155 .llseek = seq_lseek, 4156 .release = single_release, 4157 .owner = THIS_MODULE, 4158 }; 4159 4160 static void debugfs_work_handler_v3_hw(struct work_struct *work) 4161 { 4162 struct hisi_hba *hisi_hba = 4163 container_of(work, struct hisi_hba, debugfs_work); 4164 int debugfs_dump_index = hisi_hba->debugfs_dump_index; 4165 struct device *dev = hisi_hba->dev; 4166 u64 timestamp = local_clock(); 4167 4168 if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) { 4169 dev_warn(dev, "dump count exceeded!\n"); 4170 return; 4171 } 4172 4173 do_div(timestamp, NSEC_PER_MSEC); 4174 hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp; 4175 4176 debugfs_snapshot_regs_v3_hw(hisi_hba); 4177 hisi_hba->debugfs_dump_index++; 4178 } 4179 4180 static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index) 4181 { 4182 struct device *dev = hisi_hba->dev; 4183 int i; 4184 4185 devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache); 4186 devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache); 4187 devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost); 4188 devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct); 4189 4190 for (i = 0; i < hisi_hba->queue_count; i++) 4191 devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr); 4192 4193 for (i = 0; i < hisi_hba->queue_count; i++) 4194 devm_kfree(dev, 4195 hisi_hba->debugfs_cq[dump_index][i].complete_hdr); 4196 4197 for (i = 0; i < DEBUGFS_REGS_NUM; i++) 4198 devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data); 4199 4200 for (i = 0; i < hisi_hba->n_phy; i++) 4201 devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data); 4202 } 4203 4204 static const struct hisi_sas_debugfs_reg *debugfs_reg_array_v3_hw[DEBUGFS_REGS_NUM] = { 4205 [DEBUGFS_GLOBAL] = &debugfs_global_reg, 4206 [DEBUGFS_AXI] = &debugfs_axi_reg, 4207 [DEBUGFS_RAS] = &debugfs_ras_reg, 4208 }; 4209 4210 static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index) 4211 { 4212 const struct hisi_sas_hw *hw = hisi_hba->hw; 4213 struct device *dev = hisi_hba->dev; 4214 int p, c, d, r, i; 4215 size_t sz; 4216 4217 for (r = 0; r < DEBUGFS_REGS_NUM; r++) { 4218 struct hisi_sas_debugfs_regs *regs = 4219 &hisi_hba->debugfs_regs[dump_index][r]; 4220 4221 sz = debugfs_reg_array_v3_hw[r]->count * 4; 4222 regs->data = devm_kmalloc(dev, sz, GFP_KERNEL); 4223 if (!regs->data) 4224 goto fail; 4225 regs->hisi_hba = hisi_hba; 4226 } 4227 4228 sz = debugfs_port_reg.count * 4; 4229 for (p = 0; p < hisi_hba->n_phy; p++) { 4230 struct hisi_sas_debugfs_port *port = 4231 &hisi_hba->debugfs_port_reg[dump_index][p]; 4232 4233 port->data = devm_kmalloc(dev, sz, GFP_KERNEL); 4234 if (!port->data) 4235 goto fail; 4236 port->phy = &hisi_hba->phy[p]; 4237 } 4238 4239 sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 4240 for (c = 0; c < hisi_hba->queue_count; c++) { 4241 struct hisi_sas_debugfs_cq *cq = 4242 &hisi_hba->debugfs_cq[dump_index][c]; 4243 4244 cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL); 4245 if (!cq->complete_hdr) 4246 goto fail; 4247 cq->cq = &hisi_hba->cq[c]; 4248 } 4249 4250 sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 4251 for (d = 0; d < hisi_hba->queue_count; d++) { 4252 struct hisi_sas_debugfs_dq *dq = 4253 &hisi_hba->debugfs_dq[dump_index][d]; 4254 4255 dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL); 4256 if (!dq->hdr) 4257 goto fail; 4258 dq->dq = &hisi_hba->dq[d]; 4259 } 4260 4261 sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost); 4262 4263 hisi_hba->debugfs_iost[dump_index].iost = 4264 devm_kmalloc(dev, sz, GFP_KERNEL); 4265 if (!hisi_hba->debugfs_iost[dump_index].iost) 4266 goto fail; 4267 4268 sz = HISI_SAS_IOST_ITCT_CACHE_NUM * 4269 sizeof(struct hisi_sas_iost_itct_cache); 4270 4271 hisi_hba->debugfs_iost_cache[dump_index].cache = 4272 devm_kmalloc(dev, sz, GFP_KERNEL); 4273 if (!hisi_hba->debugfs_iost_cache[dump_index].cache) 4274 goto fail; 4275 4276 sz = HISI_SAS_IOST_ITCT_CACHE_NUM * 4277 sizeof(struct hisi_sas_iost_itct_cache); 4278 4279 hisi_hba->debugfs_itct_cache[dump_index].cache = 4280 devm_kmalloc(dev, sz, GFP_KERNEL); 4281 if (!hisi_hba->debugfs_itct_cache[dump_index].cache) 4282 goto fail; 4283 4284 /* New memory allocation must be locate before itct */ 4285 sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 4286 4287 hisi_hba->debugfs_itct[dump_index].itct = 4288 devm_kmalloc(dev, sz, GFP_KERNEL); 4289 if (!hisi_hba->debugfs_itct[dump_index].itct) 4290 goto fail; 4291 4292 return 0; 4293 fail: 4294 for (i = 0; i < hisi_sas_debugfs_dump_count; i++) 4295 debugfs_release_v3_hw(hisi_hba, i); 4296 return -ENOMEM; 4297 } 4298 4299 static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba) 4300 { 4301 struct dentry *dir = debugfs_create_dir("phy_down_cnt", 4302 hisi_hba->debugfs_dir); 4303 char name[16]; 4304 int phy_no; 4305 4306 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 4307 snprintf(name, 16, "%d", phy_no); 4308 debugfs_create_file(name, 0600, dir, 4309 &hisi_hba->phy[phy_no], 4310 &debugfs_phy_down_cnt_v3_hw_fops); 4311 } 4312 } 4313 4314 static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba) 4315 { 4316 struct dentry *ports_dentry; 4317 int phy_no; 4318 4319 hisi_hba->debugfs_bist_dentry = 4320 debugfs_create_dir("bist", hisi_hba->debugfs_dir); 4321 debugfs_create_file("link_rate", 0600, 4322 hisi_hba->debugfs_bist_dentry, hisi_hba, 4323 &debugfs_bist_linkrate_v3_hw_fops); 4324 4325 debugfs_create_file("code_mode", 0600, 4326 hisi_hba->debugfs_bist_dentry, hisi_hba, 4327 &debugfs_bist_code_mode_v3_hw_fops); 4328 4329 debugfs_create_file("fixed_code", 0600, 4330 hisi_hba->debugfs_bist_dentry, 4331 &hisi_hba->debugfs_bist_fixed_code[0], 4332 &debugfs_v3_hw_fops); 4333 4334 debugfs_create_file("fixed_code_1", 0600, 4335 hisi_hba->debugfs_bist_dentry, 4336 &hisi_hba->debugfs_bist_fixed_code[1], 4337 &debugfs_v3_hw_fops); 4338 4339 debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry, 4340 hisi_hba, &debugfs_bist_phy_v3_hw_fops); 4341 4342 debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry, 4343 &hisi_hba->debugfs_bist_cnt); 4344 4345 debugfs_create_file("loopback_mode", 0600, 4346 hisi_hba->debugfs_bist_dentry, 4347 hisi_hba, &debugfs_bist_mode_v3_hw_fops); 4348 4349 debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry, 4350 hisi_hba, &debugfs_bist_enable_v3_hw_fops); 4351 4352 ports_dentry = debugfs_create_dir("port", hisi_hba->debugfs_bist_dentry); 4353 4354 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 4355 struct dentry *port_dentry; 4356 struct dentry *ffe_dentry; 4357 char name[256]; 4358 int i; 4359 4360 snprintf(name, 256, "%d", phy_no); 4361 port_dentry = debugfs_create_dir(name, ports_dentry); 4362 ffe_dentry = debugfs_create_dir("ffe", port_dentry); 4363 for (i = 0; i < FFE_CFG_MAX; i++) { 4364 if (i == FFE_RESV) 4365 continue; 4366 debugfs_create_file(debugfs_ffe_name_v3_hw[i].name, 4367 0600, ffe_dentry, 4368 &hisi_hba->debugfs_bist_ffe[phy_no][i], 4369 &debugfs_v3_hw_fops); 4370 } 4371 } 4372 4373 hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS; 4374 } 4375 4376 static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba) 4377 { 4378 struct device *dev = hisi_hba->dev; 4379 int i; 4380 4381 hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), 4382 hisi_sas_debugfs_dir); 4383 debugfs_create_file("trigger_dump", 0200, 4384 hisi_hba->debugfs_dir, 4385 hisi_hba, 4386 &debugfs_trigger_dump_v3_hw_fops); 4387 4388 /* create bist structures */ 4389 debugfs_bist_init_v3_hw(hisi_hba); 4390 4391 hisi_hba->debugfs_dump_dentry = 4392 debugfs_create_dir("dump", hisi_hba->debugfs_dir); 4393 4394 debugfs_phy_down_cnt_init_v3_hw(hisi_hba); 4395 4396 for (i = 0; i < hisi_sas_debugfs_dump_count; i++) { 4397 if (debugfs_alloc_v3_hw(hisi_hba, i)) { 4398 debugfs_remove_recursive(hisi_hba->debugfs_dir); 4399 dev_dbg(dev, "failed to init debugfs!\n"); 4400 break; 4401 } 4402 } 4403 } 4404 4405 static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba) 4406 { 4407 debugfs_remove_recursive(hisi_hba->debugfs_dir); 4408 } 4409 4410 static int 4411 hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) 4412 { 4413 struct Scsi_Host *shost; 4414 struct hisi_hba *hisi_hba; 4415 struct device *dev = &pdev->dev; 4416 struct asd_sas_phy **arr_phy; 4417 struct asd_sas_port **arr_port; 4418 struct sas_ha_struct *sha; 4419 int rc, phy_nr, port_nr, i; 4420 4421 rc = pci_enable_device(pdev); 4422 if (rc) 4423 goto err_out; 4424 4425 pci_set_master(pdev); 4426 4427 rc = pci_request_regions(pdev, DRV_NAME); 4428 if (rc) 4429 goto err_out_disable_device; 4430 4431 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 4432 if (rc) 4433 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 4434 if (rc) { 4435 dev_err(dev, "No usable DMA addressing method\n"); 4436 rc = -ENODEV; 4437 goto err_out_regions; 4438 } 4439 4440 shost = hisi_sas_shost_alloc_pci(pdev); 4441 if (!shost) { 4442 rc = -ENOMEM; 4443 goto err_out_regions; 4444 } 4445 4446 sha = SHOST_TO_SAS_HA(shost); 4447 hisi_hba = shost_priv(shost); 4448 dev_set_drvdata(dev, sha); 4449 4450 hisi_hba->regs = pcim_iomap(pdev, 5, 0); 4451 if (!hisi_hba->regs) { 4452 dev_err(dev, "cannot map register\n"); 4453 rc = -ENOMEM; 4454 goto err_out_ha; 4455 } 4456 4457 phy_nr = port_nr = hisi_hba->n_phy; 4458 4459 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 4460 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 4461 if (!arr_phy || !arr_port) { 4462 rc = -ENOMEM; 4463 goto err_out_ha; 4464 } 4465 4466 sha->sas_phy = arr_phy; 4467 sha->sas_port = arr_port; 4468 sha->core.shost = shost; 4469 sha->lldd_ha = hisi_hba; 4470 4471 shost->transportt = hisi_sas_stt; 4472 shost->max_id = HISI_SAS_MAX_DEVICES; 4473 shost->max_lun = ~0; 4474 shost->max_channel = 1; 4475 shost->max_cmd_len = 16; 4476 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 4477 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 4478 4479 sha->sas_ha_name = DRV_NAME; 4480 sha->dev = dev; 4481 sha->lldd_module = THIS_MODULE; 4482 sha->sas_addr = &hisi_hba->sas_addr[0]; 4483 sha->num_phys = hisi_hba->n_phy; 4484 4485 for (i = 0; i < hisi_hba->n_phy; i++) { 4486 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 4487 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 4488 } 4489 4490 if (hisi_hba->prot_mask) { 4491 dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", 4492 prot_mask); 4493 scsi_host_set_prot(hisi_hba->shost, prot_mask); 4494 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 4495 scsi_host_set_guard(hisi_hba->shost, 4496 SHOST_DIX_GUARD_CRC); 4497 } 4498 4499 if (hisi_sas_debugfs_enable) 4500 debugfs_init_v3_hw(hisi_hba); 4501 4502 rc = interrupt_preinit_v3_hw(hisi_hba); 4503 if (rc) 4504 goto err_out_debugfs; 4505 dev_err(dev, "%d hw queues\n", shost->nr_hw_queues); 4506 rc = scsi_add_host(shost, dev); 4507 if (rc) 4508 goto err_out_free_irq_vectors; 4509 4510 rc = sas_register_ha(sha); 4511 if (rc) 4512 goto err_out_register_ha; 4513 4514 rc = hisi_sas_v3_init(hisi_hba); 4515 if (rc) 4516 goto err_out_register_ha; 4517 4518 scsi_scan_host(shost); 4519 4520 /* 4521 * For the situation that there are ATA disks connected with SAS 4522 * controller, it additionally creates ata_port which will affect the 4523 * child_count of hisi_hba->dev. Even if suspended all the disks, 4524 * ata_port is still and the child_count of hisi_hba->dev is not 0. 4525 * So use pm_suspend_ignore_children() to ignore the effect to 4526 * hisi_hba->dev. 4527 */ 4528 pm_suspend_ignore_children(dev, true); 4529 pm_runtime_put_noidle(&pdev->dev); 4530 4531 return 0; 4532 4533 err_out_register_ha: 4534 scsi_remove_host(shost); 4535 err_out_free_irq_vectors: 4536 pci_free_irq_vectors(pdev); 4537 err_out_debugfs: 4538 debugfs_exit_v3_hw(hisi_hba); 4539 err_out_ha: 4540 hisi_sas_free(hisi_hba); 4541 scsi_host_put(shost); 4542 err_out_regions: 4543 pci_release_regions(pdev); 4544 err_out_disable_device: 4545 pci_disable_device(pdev); 4546 err_out: 4547 return rc; 4548 } 4549 4550 static void 4551 hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) 4552 { 4553 int i; 4554 4555 free_irq(pci_irq_vector(pdev, 1), hisi_hba); 4556 free_irq(pci_irq_vector(pdev, 2), hisi_hba); 4557 free_irq(pci_irq_vector(pdev, 11), hisi_hba); 4558 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 4559 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 4560 int nr = hisi_sas_intr_conv ? 16 : 16 + i; 4561 4562 free_irq(pci_irq_vector(pdev, nr), cq); 4563 } 4564 pci_free_irq_vectors(pdev); 4565 } 4566 4567 static void hisi_sas_v3_remove(struct pci_dev *pdev) 4568 { 4569 struct device *dev = &pdev->dev; 4570 struct sas_ha_struct *sha = dev_get_drvdata(dev); 4571 struct hisi_hba *hisi_hba = sha->lldd_ha; 4572 struct Scsi_Host *shost = sha->core.shost; 4573 4574 pm_runtime_get_noresume(dev); 4575 if (timer_pending(&hisi_hba->timer)) 4576 del_timer(&hisi_hba->timer); 4577 4578 sas_unregister_ha(sha); 4579 sas_remove_host(sha->core.shost); 4580 4581 hisi_sas_v3_destroy_irqs(pdev, hisi_hba); 4582 pci_release_regions(pdev); 4583 pci_disable_device(pdev); 4584 hisi_sas_free(hisi_hba); 4585 debugfs_exit_v3_hw(hisi_hba); 4586 scsi_host_put(shost); 4587 } 4588 4589 static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev) 4590 { 4591 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 4592 struct hisi_hba *hisi_hba = sha->lldd_ha; 4593 struct device *dev = hisi_hba->dev; 4594 int rc; 4595 4596 dev_info(dev, "FLR prepare\n"); 4597 set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 4598 hisi_sas_controller_reset_prepare(hisi_hba); 4599 4600 rc = disable_host_v3_hw(hisi_hba); 4601 if (rc) 4602 dev_err(dev, "FLR: disable host failed rc=%d\n", rc); 4603 } 4604 4605 static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev) 4606 { 4607 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 4608 struct hisi_hba *hisi_hba = sha->lldd_ha; 4609 struct device *dev = hisi_hba->dev; 4610 int rc; 4611 4612 hisi_sas_init_mem(hisi_hba); 4613 4614 rc = hw_init_v3_hw(hisi_hba); 4615 if (rc) { 4616 dev_err(dev, "FLR: hw init failed rc=%d\n", rc); 4617 return; 4618 } 4619 4620 hisi_sas_controller_reset_done(hisi_hba); 4621 dev_info(dev, "FLR done\n"); 4622 } 4623 4624 enum { 4625 /* instances of the controller */ 4626 hip08, 4627 }; 4628 4629 static int _suspend_v3_hw(struct device *device) 4630 { 4631 struct pci_dev *pdev = to_pci_dev(device); 4632 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 4633 struct hisi_hba *hisi_hba = sha->lldd_ha; 4634 struct device *dev = hisi_hba->dev; 4635 struct Scsi_Host *shost = hisi_hba->shost; 4636 int rc; 4637 4638 if (!pdev->pm_cap) { 4639 dev_err(dev, "PCI PM not supported\n"); 4640 return -ENODEV; 4641 } 4642 4643 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 4644 return -1; 4645 4646 scsi_block_requests(shost); 4647 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 4648 flush_workqueue(hisi_hba->wq); 4649 4650 rc = disable_host_v3_hw(hisi_hba); 4651 if (rc) { 4652 dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc); 4653 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 4654 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 4655 scsi_unblock_requests(shost); 4656 return rc; 4657 } 4658 4659 hisi_sas_init_mem(hisi_hba); 4660 4661 dev_warn(dev, "entering suspend state\n"); 4662 4663 hisi_sas_release_tasks(hisi_hba); 4664 4665 sas_suspend_ha(sha); 4666 return 0; 4667 } 4668 4669 static int _resume_v3_hw(struct device *device) 4670 { 4671 struct pci_dev *pdev = to_pci_dev(device); 4672 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 4673 struct hisi_hba *hisi_hba = sha->lldd_ha; 4674 struct Scsi_Host *shost = hisi_hba->shost; 4675 struct device *dev = hisi_hba->dev; 4676 unsigned int rc; 4677 pci_power_t device_state = pdev->current_state; 4678 4679 dev_warn(dev, "resuming from operating state [D%d]\n", 4680 device_state); 4681 4682 scsi_unblock_requests(shost); 4683 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 4684 4685 sas_prep_resume_ha(sha); 4686 rc = hw_init_v3_hw(hisi_hba); 4687 if (rc) { 4688 scsi_remove_host(shost); 4689 return rc; 4690 } 4691 phys_init_v3_hw(hisi_hba); 4692 sas_resume_ha(sha); 4693 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 4694 4695 return 0; 4696 } 4697 4698 static int __maybe_unused suspend_v3_hw(struct device *device) 4699 { 4700 struct pci_dev *pdev = to_pci_dev(device); 4701 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 4702 struct hisi_hba *hisi_hba = sha->lldd_ha; 4703 int rc; 4704 4705 set_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); 4706 4707 rc = _suspend_v3_hw(device); 4708 if (rc) 4709 clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); 4710 4711 return rc; 4712 } 4713 4714 static int __maybe_unused resume_v3_hw(struct device *device) 4715 { 4716 struct pci_dev *pdev = to_pci_dev(device); 4717 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 4718 struct hisi_hba *hisi_hba = sha->lldd_ha; 4719 int rc = _resume_v3_hw(device); 4720 4721 clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); 4722 4723 return rc; 4724 } 4725 4726 static const struct pci_device_id sas_v3_pci_table[] = { 4727 { PCI_VDEVICE(HUAWEI, 0xa230), hip08 }, 4728 {} 4729 }; 4730 MODULE_DEVICE_TABLE(pci, sas_v3_pci_table); 4731 4732 static const struct pci_error_handlers hisi_sas_err_handler = { 4733 .reset_prepare = hisi_sas_reset_prepare_v3_hw, 4734 .reset_done = hisi_sas_reset_done_v3_hw, 4735 }; 4736 4737 static UNIVERSAL_DEV_PM_OPS(hisi_sas_v3_pm_ops, 4738 suspend_v3_hw, 4739 resume_v3_hw, 4740 NULL); 4741 4742 static struct pci_driver sas_v3_pci_driver = { 4743 .name = DRV_NAME, 4744 .id_table = sas_v3_pci_table, 4745 .probe = hisi_sas_v3_probe, 4746 .remove = hisi_sas_v3_remove, 4747 .err_handler = &hisi_sas_err_handler, 4748 .driver.pm = &hisi_sas_v3_pm_ops, 4749 }; 4750 4751 module_pci_driver(sas_v3_pci_driver); 4752 module_param_named(intr_conv, hisi_sas_intr_conv, bool, 0444); 4753 4754 MODULE_LICENSE("GPL"); 4755 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 4756 MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device"); 4757 MODULE_ALIAS("pci:" DRV_NAME); 4758