1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2017 Hisilicon Limited. 4 */ 5 6 #include "hisi_sas.h" 7 #define DRV_NAME "hisi_sas_v3_hw" 8 9 /* global registers need init */ 10 #define DLVRY_QUEUE_ENABLE 0x0 11 #define IOST_BASE_ADDR_LO 0x8 12 #define IOST_BASE_ADDR_HI 0xc 13 #define ITCT_BASE_ADDR_LO 0x10 14 #define ITCT_BASE_ADDR_HI 0x14 15 #define IO_BROKEN_MSG_ADDR_LO 0x18 16 #define IO_BROKEN_MSG_ADDR_HI 0x1c 17 #define PHY_CONTEXT 0x20 18 #define PHY_STATE 0x24 19 #define PHY_PORT_NUM_MA 0x28 20 #define PHY_CONN_RATE 0x30 21 #define ITCT_CLR 0x44 22 #define ITCT_CLR_EN_OFF 16 23 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) 24 #define ITCT_DEV_OFF 0 25 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) 26 #define SAS_AXI_USER3 0x50 27 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 28 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c 29 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 30 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 31 #define CFG_MAX_TAG 0x68 32 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 33 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 34 #define HGC_GET_ITV_TIME 0x90 35 #define DEVICE_MSG_WORK_MODE 0x94 36 #define OPENA_WT_CONTI_TIME 0x9c 37 #define I_T_NEXUS_LOSS_TIME 0xa0 38 #define MAX_CON_TIME_LIMIT_TIME 0xa4 39 #define BUS_INACTIVE_LIMIT_TIME 0xa8 40 #define REJECT_TO_OPEN_LIMIT_TIME 0xac 41 #define CQ_INT_CONVERGE_EN 0xb0 42 #define CFG_AGING_TIME 0xbc 43 #define HGC_DFX_CFG2 0xc0 44 #define CFG_ABT_SET_QUERY_IPTT 0xd4 45 #define CFG_SET_ABORTED_IPTT_OFF 0 46 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) 47 #define CFG_SET_ABORTED_EN_OFF 12 48 #define CFG_ABT_SET_IPTT_DONE 0xd8 49 #define CFG_ABT_SET_IPTT_DONE_OFF 0 50 #define HGC_IOMB_PROC1_STATUS 0x104 51 #define HGC_LM_DFX_STATUS2 0x128 52 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0 53 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \ 54 HGC_LM_DFX_STATUS2_IOSTLIST_OFF) 55 #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12 56 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \ 57 HGC_LM_DFX_STATUS2_ITCTLIST_OFF) 58 #define HGC_CQE_ECC_ADDR 0x13c 59 #define HGC_CQE_ECC_1B_ADDR_OFF 0 60 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF) 61 #define HGC_CQE_ECC_MB_ADDR_OFF 8 62 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF) 63 #define HGC_IOST_ECC_ADDR 0x140 64 #define HGC_IOST_ECC_1B_ADDR_OFF 0 65 #define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF) 66 #define HGC_IOST_ECC_MB_ADDR_OFF 16 67 #define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF) 68 #define HGC_DQE_ECC_ADDR 0x144 69 #define HGC_DQE_ECC_1B_ADDR_OFF 0 70 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF) 71 #define HGC_DQE_ECC_MB_ADDR_OFF 16 72 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) 73 #define CHNL_INT_STATUS 0x148 74 #define TAB_DFX 0x14c 75 #define HGC_ITCT_ECC_ADDR 0x150 76 #define HGC_ITCT_ECC_1B_ADDR_OFF 0 77 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ 78 HGC_ITCT_ECC_1B_ADDR_OFF) 79 #define HGC_ITCT_ECC_MB_ADDR_OFF 16 80 #define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \ 81 HGC_ITCT_ECC_MB_ADDR_OFF) 82 #define HGC_AXI_FIFO_ERR_INFO 0x154 83 #define AXI_ERR_INFO_OFF 0 84 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) 85 #define FIFO_ERR_INFO_OFF 8 86 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) 87 #define TAB_RD_TYPE 0x15c 88 #define INT_COAL_EN 0x19c 89 #define OQ_INT_COAL_TIME 0x1a0 90 #define OQ_INT_COAL_CNT 0x1a4 91 #define ENT_INT_COAL_TIME 0x1a8 92 #define ENT_INT_COAL_CNT 0x1ac 93 #define OQ_INT_SRC 0x1b0 94 #define OQ_INT_SRC_MSK 0x1b4 95 #define ENT_INT_SRC1 0x1b8 96 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 97 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) 98 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 99 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) 100 #define ENT_INT_SRC2 0x1bc 101 #define ENT_INT_SRC3 0x1c0 102 #define ENT_INT_SRC3_WP_DEPTH_OFF 8 103 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 104 #define ENT_INT_SRC3_RP_DEPTH_OFF 10 105 #define ENT_INT_SRC3_AXI_OFF 11 106 #define ENT_INT_SRC3_FIFO_OFF 12 107 #define ENT_INT_SRC3_LM_OFF 14 108 #define ENT_INT_SRC3_ITC_INT_OFF 15 109 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) 110 #define ENT_INT_SRC3_ABT_OFF 16 111 #define ENT_INT_SRC3_DQE_POISON_OFF 18 112 #define ENT_INT_SRC3_IOST_POISON_OFF 19 113 #define ENT_INT_SRC3_ITCT_POISON_OFF 20 114 #define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF 21 115 #define ENT_INT_SRC_MSK1 0x1c4 116 #define ENT_INT_SRC_MSK2 0x1c8 117 #define ENT_INT_SRC_MSK3 0x1cc 118 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 119 #define CHNL_PHYUPDOWN_INT_MSK 0x1d0 120 #define CHNL_ENT_INT_MSK 0x1d4 121 #define HGC_COM_INT_MSK 0x1d8 122 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) 123 #define SAS_ECC_INTR 0x1e8 124 #define SAS_ECC_INTR_DQE_ECC_1B_OFF 0 125 #define SAS_ECC_INTR_DQE_ECC_MB_OFF 1 126 #define SAS_ECC_INTR_IOST_ECC_1B_OFF 2 127 #define SAS_ECC_INTR_IOST_ECC_MB_OFF 3 128 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF 4 129 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF 5 130 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 6 131 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 7 132 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 8 133 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 9 134 #define SAS_ECC_INTR_CQE_ECC_1B_OFF 10 135 #define SAS_ECC_INTR_CQE_ECC_MB_OFF 11 136 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 12 137 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 13 138 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 14 139 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 15 140 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 16 141 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 17 142 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 18 143 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 19 144 #define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF 20 145 #define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF 21 146 #define SAS_ECC_INTR_MSK 0x1ec 147 #define HGC_ERR_STAT_EN 0x238 148 #define CQE_SEND_CNT 0x248 149 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 150 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 151 #define DLVRY_Q_0_DEPTH 0x268 152 #define DLVRY_Q_0_WR_PTR 0x26c 153 #define DLVRY_Q_0_RD_PTR 0x270 154 #define HYPER_STREAM_ID_EN_CFG 0xc80 155 #define OQ0_INT_SRC_MSK 0xc90 156 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0 157 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4 158 #define COMPL_Q_0_DEPTH 0x4e8 159 #define COMPL_Q_0_WR_PTR 0x4ec 160 #define COMPL_Q_0_RD_PTR 0x4f0 161 #define HGC_RXM_DFX_STATUS14 0xae8 162 #define HGC_RXM_DFX_STATUS14_MEM0_OFF 0 163 #define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \ 164 HGC_RXM_DFX_STATUS14_MEM0_OFF) 165 #define HGC_RXM_DFX_STATUS14_MEM1_OFF 9 166 #define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \ 167 HGC_RXM_DFX_STATUS14_MEM1_OFF) 168 #define HGC_RXM_DFX_STATUS14_MEM2_OFF 18 169 #define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \ 170 HGC_RXM_DFX_STATUS14_MEM2_OFF) 171 #define HGC_RXM_DFX_STATUS15 0xaec 172 #define HGC_RXM_DFX_STATUS15_MEM3_OFF 0 173 #define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \ 174 HGC_RXM_DFX_STATUS15_MEM3_OFF) 175 #define AWQOS_AWCACHE_CFG 0xc84 176 #define ARQOS_ARCACHE_CFG 0xc88 177 #define HILINK_ERR_DFX 0xe04 178 #define SAS_GPIO_CFG_0 0x1000 179 #define SAS_GPIO_CFG_1 0x1004 180 #define SAS_GPIO_TX_0_1 0x1040 181 #define SAS_CFG_DRIVE_VLD 0x1070 182 183 /* phy registers requiring init */ 184 #define PORT_BASE (0x2000) 185 #define PHY_CFG (PORT_BASE + 0x0) 186 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4) 187 #define PHY_CFG_ENA_OFF 0 188 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) 189 #define PHY_CFG_DC_OPT_OFF 2 190 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) 191 #define PHY_CFG_PHY_RST_OFF 3 192 #define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF) 193 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) 194 #define CFG_PROG_PHY_LINK_RATE_OFF 0 195 #define CFG_PROG_PHY_LINK_RATE_MSK (0xff << CFG_PROG_PHY_LINK_RATE_OFF) 196 #define CFG_PROG_OOB_PHY_LINK_RATE_OFF 8 197 #define CFG_PROG_OOB_PHY_LINK_RATE_MSK (0xf << CFG_PROG_OOB_PHY_LINK_RATE_OFF) 198 #define PHY_CTRL (PORT_BASE + 0x14) 199 #define PHY_CTRL_RESET_OFF 0 200 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) 201 #define CMD_HDR_PIR_OFF 8 202 #define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF) 203 #define SERDES_CFG (PORT_BASE + 0x1c) 204 #define CFG_ALOS_CHK_DISABLE_OFF 9 205 #define CFG_ALOS_CHK_DISABLE_MSK (0x1 << CFG_ALOS_CHK_DISABLE_OFF) 206 #define SAS_PHY_BIST_CTRL (PORT_BASE + 0x2c) 207 #define CFG_BIST_MODE_SEL_OFF 0 208 #define CFG_BIST_MODE_SEL_MSK (0xf << CFG_BIST_MODE_SEL_OFF) 209 #define CFG_LOOP_TEST_MODE_OFF 14 210 #define CFG_LOOP_TEST_MODE_MSK (0x3 << CFG_LOOP_TEST_MODE_OFF) 211 #define CFG_RX_BIST_EN_OFF 16 212 #define CFG_RX_BIST_EN_MSK (0x1 << CFG_RX_BIST_EN_OFF) 213 #define CFG_TX_BIST_EN_OFF 17 214 #define CFG_TX_BIST_EN_MSK (0x1 << CFG_TX_BIST_EN_OFF) 215 #define CFG_BIST_TEST_OFF 18 216 #define CFG_BIST_TEST_MSK (0x1 << CFG_BIST_TEST_OFF) 217 #define SAS_PHY_BIST_CODE (PORT_BASE + 0x30) 218 #define SAS_PHY_BIST_CODE1 (PORT_BASE + 0x34) 219 #define SAS_BIST_ERR_CNT (PORT_BASE + 0x38) 220 #define SL_CFG (PORT_BASE + 0x84) 221 #define AIP_LIMIT (PORT_BASE + 0x90) 222 #define SL_CONTROL (PORT_BASE + 0x94) 223 #define SL_CONTROL_NOTIFY_EN_OFF 0 224 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) 225 #define SL_CTA_OFF 17 226 #define SL_CTA_MSK (0x1 << SL_CTA_OFF) 227 #define RX_PRIMS_STATUS (PORT_BASE + 0x98) 228 #define RX_BCAST_CHG_OFF 1 229 #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) 230 #define TX_ID_DWORD0 (PORT_BASE + 0x9c) 231 #define TX_ID_DWORD1 (PORT_BASE + 0xa0) 232 #define TX_ID_DWORD2 (PORT_BASE + 0xa4) 233 #define TX_ID_DWORD3 (PORT_BASE + 0xa8) 234 #define TX_ID_DWORD4 (PORT_BASE + 0xaC) 235 #define TX_ID_DWORD5 (PORT_BASE + 0xb0) 236 #define TX_ID_DWORD6 (PORT_BASE + 0xb4) 237 #define TXID_AUTO (PORT_BASE + 0xb8) 238 #define CT3_OFF 1 239 #define CT3_MSK (0x1 << CT3_OFF) 240 #define TX_HARDRST_OFF 2 241 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) 242 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) 243 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) 244 #define STP_LINK_TIMER (PORT_BASE + 0x120) 245 #define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124) 246 #define CON_CFG_DRIVER (PORT_BASE + 0x130) 247 #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) 248 #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) 249 #define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c) 250 #define CHL_INT0 (PORT_BASE + 0x1b4) 251 #define CHL_INT0_HOTPLUG_TOUT_OFF 0 252 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) 253 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1 254 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) 255 #define CHL_INT0_SL_PHY_ENABLE_OFF 2 256 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) 257 #define CHL_INT0_NOT_RDY_OFF 4 258 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) 259 #define CHL_INT0_PHY_RDY_OFF 5 260 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) 261 #define CHL_INT1 (PORT_BASE + 0x1b8) 262 #define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF 15 263 #define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF 16 264 #define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF 17 265 #define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF 18 266 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19 267 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20 268 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21 269 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 270 #define CHL_INT1_DMAC_TX_FIFO_ERR_OFF 23 271 #define CHL_INT1_DMAC_RX_FIFO_ERR_OFF 24 272 #define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF 26 273 #define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF 27 274 #define CHL_INT2 (PORT_BASE + 0x1bc) 275 #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 276 #define CHL_INT2_RX_DISP_ERR_OFF 28 277 #define CHL_INT2_RX_CODE_ERR_OFF 29 278 #define CHL_INT2_RX_INVLD_DW_OFF 30 279 #define CHL_INT2_STP_LINK_TIMEOUT_OFF 31 280 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) 281 #define CHL_INT1_MSK (PORT_BASE + 0x1c4) 282 #define CHL_INT2_MSK (PORT_BASE + 0x1c8) 283 #define SAS_EC_INT_COAL_TIME (PORT_BASE + 0x1cc) 284 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) 285 #define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4) 286 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) 287 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) 288 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) 289 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) 290 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) 291 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) 292 #define DMA_TX_STATUS (PORT_BASE + 0x2d0) 293 #define DMA_TX_STATUS_BUSY_OFF 0 294 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) 295 #define DMA_RX_STATUS (PORT_BASE + 0x2e8) 296 #define DMA_RX_STATUS_BUSY_OFF 0 297 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) 298 299 #define COARSETUNE_TIME (PORT_BASE + 0x304) 300 #define TXDEEMPH_G1 (PORT_BASE + 0x350) 301 #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380) 302 #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384) 303 #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390) 304 #define ERR_CNT_CODE_ERR (PORT_BASE + 0x394) 305 #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398) 306 307 #define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */ 308 #if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW) 309 #error Max ITCT exceeded 310 #endif 311 312 #define AXI_MASTER_CFG_BASE (0x5000) 313 #define AM_CTRL_GLOBAL (0x0) 314 #define AM_CTRL_SHUTDOWN_REQ_OFF 0 315 #define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF) 316 #define AM_CURR_TRANS_RETURN (0x150) 317 318 #define AM_CFG_MAX_TRANS (0x5010) 319 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) 320 #define AXI_CFG (0x5100) 321 #define AM_ROB_ECC_ERR_ADDR (0x510c) 322 #define AM_ROB_ECC_ERR_ADDR_OFF 0 323 #define AM_ROB_ECC_ERR_ADDR_MSK 0xffffffff 324 325 /* RAS registers need init */ 326 #define RAS_BASE (0x6000) 327 #define SAS_RAS_INTR0 (RAS_BASE) 328 #define SAS_RAS_INTR1 (RAS_BASE + 0x04) 329 #define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08) 330 #define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c) 331 #define CFG_SAS_RAS_INTR_MASK (RAS_BASE + 0x1c) 332 #define SAS_RAS_INTR2 (RAS_BASE + 0x20) 333 #define SAS_RAS_INTR2_MASK (RAS_BASE + 0x24) 334 335 /* HW dma structures */ 336 /* Delivery queue header */ 337 /* dw0 */ 338 #define CMD_HDR_ABORT_FLAG_OFF 0 339 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) 340 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 341 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) 342 #define CMD_HDR_RESP_REPORT_OFF 5 343 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) 344 #define CMD_HDR_TLR_CTRL_OFF 6 345 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) 346 #define CMD_HDR_PORT_OFF 18 347 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) 348 #define CMD_HDR_PRIORITY_OFF 27 349 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) 350 #define CMD_HDR_CMD_OFF 29 351 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) 352 /* dw1 */ 353 #define CMD_HDR_UNCON_CMD_OFF 3 354 #define CMD_HDR_DIR_OFF 5 355 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) 356 #define CMD_HDR_RESET_OFF 7 357 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) 358 #define CMD_HDR_VDTL_OFF 10 359 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) 360 #define CMD_HDR_FRAME_TYPE_OFF 11 361 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) 362 #define CMD_HDR_DEV_ID_OFF 16 363 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) 364 /* dw2 */ 365 #define CMD_HDR_CFL_OFF 0 366 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) 367 #define CMD_HDR_NCQ_TAG_OFF 10 368 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) 369 #define CMD_HDR_MRFL_OFF 15 370 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) 371 #define CMD_HDR_SG_MOD_OFF 24 372 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) 373 /* dw3 */ 374 #define CMD_HDR_IPTT_OFF 0 375 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) 376 /* dw6 */ 377 #define CMD_HDR_DIF_SGL_LEN_OFF 0 378 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) 379 #define CMD_HDR_DATA_SGL_LEN_OFF 16 380 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) 381 /* dw7 */ 382 #define CMD_HDR_ADDR_MODE_SEL_OFF 15 383 #define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF) 384 #define CMD_HDR_ABORT_IPTT_OFF 16 385 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) 386 387 /* Completion header */ 388 /* dw0 */ 389 #define CMPLT_HDR_CMPLT_OFF 0 390 #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF) 391 #define CMPLT_HDR_ERROR_PHASE_OFF 2 392 #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF) 393 #define CMPLT_HDR_RSPNS_XFRD_OFF 10 394 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) 395 #define CMPLT_HDR_ERX_OFF 12 396 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) 397 #define CMPLT_HDR_ABORT_STAT_OFF 13 398 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) 399 /* abort_stat */ 400 #define STAT_IO_NOT_VALID 0x1 401 #define STAT_IO_NO_DEVICE 0x2 402 #define STAT_IO_COMPLETE 0x3 403 #define STAT_IO_ABORTED 0x4 404 /* dw1 */ 405 #define CMPLT_HDR_IPTT_OFF 0 406 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) 407 #define CMPLT_HDR_DEV_ID_OFF 16 408 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) 409 /* dw3 */ 410 #define CMPLT_HDR_IO_IN_TARGET_OFF 17 411 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF) 412 413 /* ITCT header */ 414 /* qw0 */ 415 #define ITCT_HDR_DEV_TYPE_OFF 0 416 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) 417 #define ITCT_HDR_VALID_OFF 2 418 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) 419 #define ITCT_HDR_MCR_OFF 5 420 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) 421 #define ITCT_HDR_VLN_OFF 9 422 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) 423 #define ITCT_HDR_SMP_TIMEOUT_OFF 16 424 #define ITCT_HDR_AWT_CONTINUE_OFF 25 425 #define ITCT_HDR_PORT_ID_OFF 28 426 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) 427 /* qw2 */ 428 #define ITCT_HDR_INLT_OFF 0 429 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) 430 #define ITCT_HDR_RTOLT_OFF 48 431 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) 432 433 struct hisi_sas_protect_iu_v3_hw { 434 u32 dw0; 435 u32 lbrtcv; 436 u32 lbrtgv; 437 u32 dw3; 438 u32 dw4; 439 u32 dw5; 440 u32 rsv; 441 }; 442 443 struct hisi_sas_complete_v3_hdr { 444 __le32 dw0; 445 __le32 dw1; 446 __le32 act; 447 __le32 dw3; 448 }; 449 450 struct hisi_sas_err_record_v3 { 451 /* dw0 */ 452 __le32 trans_tx_fail_type; 453 454 /* dw1 */ 455 __le32 trans_rx_fail_type; 456 457 /* dw2 */ 458 __le16 dma_tx_err_type; 459 __le16 sipc_rx_err_type; 460 461 /* dw3 */ 462 __le32 dma_rx_err_type; 463 }; 464 465 #define RX_DATA_LEN_UNDERFLOW_OFF 6 466 #define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF) 467 468 #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096 469 #define HISI_SAS_MSI_COUNT_V3_HW 32 470 471 #define DIR_NO_DATA 0 472 #define DIR_TO_INI 1 473 #define DIR_TO_DEVICE 2 474 #define DIR_RESERVED 3 475 476 #define FIS_CMD_IS_UNCONSTRAINED(fis) \ 477 ((fis.command == ATA_CMD_READ_LOG_EXT) || \ 478 (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \ 479 ((fis.command == ATA_CMD_DEV_RESET) && \ 480 ((fis.control & ATA_SRST) != 0))) 481 482 #define T10_INSRT_EN_OFF 0 483 #define T10_INSRT_EN_MSK (1 << T10_INSRT_EN_OFF) 484 #define T10_RMV_EN_OFF 1 485 #define T10_RMV_EN_MSK (1 << T10_RMV_EN_OFF) 486 #define T10_RPLC_EN_OFF 2 487 #define T10_RPLC_EN_MSK (1 << T10_RPLC_EN_OFF) 488 #define T10_CHK_EN_OFF 3 489 #define T10_CHK_EN_MSK (1 << T10_CHK_EN_OFF) 490 #define INCR_LBRT_OFF 5 491 #define INCR_LBRT_MSK (1 << INCR_LBRT_OFF) 492 #define USR_DATA_BLOCK_SZ_OFF 20 493 #define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF) 494 #define T10_CHK_MSK_OFF 16 495 #define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF) 496 #define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF) 497 498 #define BASE_VECTORS_V3_HW 16 499 #define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1) 500 501 #define CHNL_INT_STS_MSK 0xeeeeeeee 502 #define CHNL_INT_STS_PHY_MSK 0xe 503 #define CHNL_INT_STS_INT0_MSK BIT(1) 504 #define CHNL_INT_STS_INT1_MSK BIT(2) 505 #define CHNL_INT_STS_INT2_MSK BIT(3) 506 #define CHNL_WIDTH 4 507 508 enum { 509 DSM_FUNC_ERR_HANDLE_MSI = 0, 510 }; 511 512 static bool hisi_sas_intr_conv; 513 MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)"); 514 515 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ 516 static int prot_mask; 517 module_param(prot_mask, int, 0); 518 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 "); 519 520 static bool auto_affine_msi_experimental; 521 module_param(auto_affine_msi_experimental, bool, 0444); 522 MODULE_PARM_DESC(auto_affine_msi_experimental, "Enable auto-affinity of MSI IRQs as experimental:\n" 523 "default is off"); 524 525 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) 526 { 527 void __iomem *regs = hisi_hba->regs + off; 528 529 return readl(regs); 530 } 531 532 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) 533 { 534 void __iomem *regs = hisi_hba->regs + off; 535 536 writel(val, regs); 537 } 538 539 static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, 540 u32 off, u32 val) 541 { 542 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 543 544 writel(val, regs); 545 } 546 547 static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, 548 int phy_no, u32 off) 549 { 550 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 551 552 return readl(regs); 553 } 554 555 #define hisi_sas_read32_poll_timeout(off, val, cond, delay_us, \ 556 timeout_us) \ 557 ({ \ 558 void __iomem *regs = hisi_hba->regs + off; \ 559 readl_poll_timeout(regs, val, cond, delay_us, timeout_us); \ 560 }) 561 562 #define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us, \ 563 timeout_us) \ 564 ({ \ 565 void __iomem *regs = hisi_hba->regs + off; \ 566 readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\ 567 }) 568 569 static void init_reg_v3_hw(struct hisi_hba *hisi_hba) 570 { 571 int i, j; 572 573 /* Global registers init */ 574 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 575 (u32)((1ULL << hisi_hba->queue_count) - 1)); 576 hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0); 577 hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); 578 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); 579 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); 580 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); 581 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); 582 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); 583 hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN, 584 hisi_sas_intr_conv); 585 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); 586 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); 587 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); 588 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); 589 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); 590 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); 591 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff); 592 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); 593 hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); 594 hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); 595 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555); 596 hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0); 597 hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0); 598 for (i = 0; i < hisi_hba->queue_count; i++) 599 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0); 600 601 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); 602 603 for (i = 0; i < hisi_hba->n_phy; i++) { 604 enum sas_linkrate max; 605 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 606 struct asd_sas_phy *sas_phy = &phy->sas_phy; 607 u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, i, 608 PROG_PHY_LINK_RATE); 609 610 prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK; 611 if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate < 612 SAS_LINK_RATE_1_5_GBPS)) 613 max = SAS_LINK_RATE_12_0_GBPS; 614 else 615 max = sas_phy->phy->maximum_linkrate; 616 prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); 617 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 618 prog_phy_link_rate); 619 hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00); 620 hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80); 621 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); 622 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 623 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); 624 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 625 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff); 626 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe); 627 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); 628 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); 629 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); 630 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); 631 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); 632 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); 633 hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); 634 hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); 635 hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32); 636 hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME, 637 0x30f4240); 638 /* used for 12G negotiate */ 639 hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); 640 hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff); 641 642 /* get default FFE configuration for BIST */ 643 for (j = 0; j < FFE_CFG_MAX; j++) { 644 u32 val = hisi_sas_phy_read32(hisi_hba, i, 645 TXDEEMPH_G1 + (j * 0x4)); 646 hisi_hba->debugfs_bist_ffe[i][j] = val; 647 } 648 } 649 650 for (i = 0; i < hisi_hba->queue_count; i++) { 651 /* Delivery queue */ 652 hisi_sas_write32(hisi_hba, 653 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), 654 upper_32_bits(hisi_hba->cmd_hdr_dma[i])); 655 656 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), 657 lower_32_bits(hisi_hba->cmd_hdr_dma[i])); 658 659 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), 660 HISI_SAS_QUEUE_SLOTS); 661 662 /* Completion queue */ 663 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), 664 upper_32_bits(hisi_hba->complete_hdr_dma[i])); 665 666 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), 667 lower_32_bits(hisi_hba->complete_hdr_dma[i])); 668 669 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), 670 HISI_SAS_QUEUE_SLOTS); 671 } 672 673 /* itct */ 674 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, 675 lower_32_bits(hisi_hba->itct_dma)); 676 677 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, 678 upper_32_bits(hisi_hba->itct_dma)); 679 680 /* iost */ 681 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, 682 lower_32_bits(hisi_hba->iost_dma)); 683 684 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, 685 upper_32_bits(hisi_hba->iost_dma)); 686 687 /* breakpoint */ 688 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, 689 lower_32_bits(hisi_hba->breakpoint_dma)); 690 691 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, 692 upper_32_bits(hisi_hba->breakpoint_dma)); 693 694 /* SATA broken msg */ 695 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, 696 lower_32_bits(hisi_hba->sata_breakpoint_dma)); 697 698 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, 699 upper_32_bits(hisi_hba->sata_breakpoint_dma)); 700 701 /* SATA initial fis */ 702 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, 703 lower_32_bits(hisi_hba->initial_fis_dma)); 704 705 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, 706 upper_32_bits(hisi_hba->initial_fis_dma)); 707 708 /* RAS registers init */ 709 hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0); 710 hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0); 711 hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0); 712 hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0); 713 714 /* LED registers init */ 715 hisi_sas_write32(hisi_hba, SAS_CFG_DRIVE_VLD, 0x80000ff); 716 hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1, 0x80808080); 717 hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1 + 0x4, 0x80808080); 718 /* Configure blink generator rate A to 1Hz and B to 4Hz */ 719 hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_1, 0x121700); 720 hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_0, 0x800000); 721 } 722 723 static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 724 { 725 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 726 727 cfg &= ~PHY_CFG_DC_OPT_MSK; 728 cfg |= 1 << PHY_CFG_DC_OPT_OFF; 729 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 730 } 731 732 static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 733 { 734 struct sas_identify_frame identify_frame; 735 u32 *identify_buffer; 736 737 memset(&identify_frame, 0, sizeof(identify_frame)); 738 identify_frame.dev_type = SAS_END_DEVICE; 739 identify_frame.frame_type = 0; 740 identify_frame._un1 = 1; 741 identify_frame.initiator_bits = SAS_PROTOCOL_ALL; 742 identify_frame.target_bits = SAS_PROTOCOL_NONE; 743 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 744 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 745 identify_frame.phy_id = phy_no; 746 identify_buffer = (u32 *)(&identify_frame); 747 748 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, 749 __swab32(identify_buffer[0])); 750 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, 751 __swab32(identify_buffer[1])); 752 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, 753 __swab32(identify_buffer[2])); 754 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, 755 __swab32(identify_buffer[3])); 756 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, 757 __swab32(identify_buffer[4])); 758 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, 759 __swab32(identify_buffer[5])); 760 } 761 762 static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, 763 struct hisi_sas_device *sas_dev) 764 { 765 struct domain_device *device = sas_dev->sas_device; 766 struct device *dev = hisi_hba->dev; 767 u64 qw0, device_id = sas_dev->device_id; 768 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; 769 struct domain_device *parent_dev = device->parent; 770 struct asd_sas_port *sas_port = device->port; 771 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 772 u64 sas_addr; 773 774 memset(itct, 0, sizeof(*itct)); 775 776 /* qw0 */ 777 qw0 = 0; 778 switch (sas_dev->dev_type) { 779 case SAS_END_DEVICE: 780 case SAS_EDGE_EXPANDER_DEVICE: 781 case SAS_FANOUT_EXPANDER_DEVICE: 782 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; 783 break; 784 case SAS_SATA_DEV: 785 case SAS_SATA_PENDING: 786 if (parent_dev && dev_is_expander(parent_dev->dev_type)) 787 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; 788 else 789 qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; 790 break; 791 default: 792 dev_warn(dev, "setup itct: unsupported dev type (%d)\n", 793 sas_dev->dev_type); 794 } 795 796 qw0 |= ((1 << ITCT_HDR_VALID_OFF) | 797 (device->linkrate << ITCT_HDR_MCR_OFF) | 798 (1 << ITCT_HDR_VLN_OFF) | 799 (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) | 800 (1 << ITCT_HDR_AWT_CONTINUE_OFF) | 801 (port->id << ITCT_HDR_PORT_ID_OFF)); 802 itct->qw0 = cpu_to_le64(qw0); 803 804 /* qw1 */ 805 memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE); 806 itct->sas_addr = cpu_to_le64(__swab64(sas_addr)); 807 808 /* qw2 */ 809 if (!dev_is_sata(device)) 810 itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | 811 (0x1ULL << ITCT_HDR_RTOLT_OFF)); 812 } 813 814 static int clear_itct_v3_hw(struct hisi_hba *hisi_hba, 815 struct hisi_sas_device *sas_dev) 816 { 817 DECLARE_COMPLETION_ONSTACK(completion); 818 u64 dev_id = sas_dev->device_id; 819 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; 820 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 821 struct device *dev = hisi_hba->dev; 822 823 sas_dev->completion = &completion; 824 825 /* clear the itct interrupt state */ 826 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) 827 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 828 ENT_INT_SRC3_ITC_INT_MSK); 829 830 /* clear the itct table */ 831 reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); 832 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); 833 834 if (!wait_for_completion_timeout(sas_dev->completion, 835 CLEAR_ITCT_TIMEOUT * HZ)) { 836 dev_warn(dev, "failed to clear ITCT\n"); 837 return -ETIMEDOUT; 838 } 839 840 memset(itct, 0, sizeof(struct hisi_sas_itct)); 841 return 0; 842 } 843 844 static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, 845 struct domain_device *device) 846 { 847 struct hisi_sas_slot *slot, *slot2; 848 struct hisi_sas_device *sas_dev = device->lldd_dev; 849 u32 cfg_abt_set_query_iptt; 850 851 cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba, 852 CFG_ABT_SET_QUERY_IPTT); 853 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) { 854 cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK; 855 cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) | 856 (slot->idx << CFG_SET_ABORTED_IPTT_OFF); 857 hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, 858 cfg_abt_set_query_iptt); 859 } 860 cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF); 861 hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, 862 cfg_abt_set_query_iptt); 863 hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE, 864 1 << CFG_ABT_SET_IPTT_DONE_OFF); 865 } 866 867 static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) 868 { 869 struct device *dev = hisi_hba->dev; 870 int ret; 871 u32 val; 872 873 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 874 875 /* Disable all of the PHYs */ 876 hisi_sas_stop_phys(hisi_hba); 877 udelay(50); 878 879 /* Ensure axi bus idle */ 880 ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val, 881 20000, 1000000); 882 if (ret) { 883 dev_err(dev, "axi bus is not idle, ret = %d!\n", ret); 884 return -EIO; 885 } 886 887 if (ACPI_HANDLE(dev)) { 888 acpi_status s; 889 890 s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); 891 if (ACPI_FAILURE(s)) { 892 dev_err(dev, "Reset failed\n"); 893 return -EIO; 894 } 895 } else { 896 dev_err(dev, "no reset method!\n"); 897 return -EINVAL; 898 } 899 900 return 0; 901 } 902 903 static int hw_init_v3_hw(struct hisi_hba *hisi_hba) 904 { 905 struct device *dev = hisi_hba->dev; 906 struct acpi_device *acpi_dev; 907 union acpi_object *obj; 908 guid_t guid; 909 int rc; 910 911 rc = reset_hw_v3_hw(hisi_hba); 912 if (rc) { 913 dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc); 914 return rc; 915 } 916 917 msleep(100); 918 init_reg_v3_hw(hisi_hba); 919 920 if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) { 921 dev_err(dev, "Parse GUID failed\n"); 922 return -EINVAL; 923 } 924 925 /* 926 * This DSM handles some hardware-related configurations: 927 * 1. Switch over to MSI error handling in kernel 928 * 2. BIOS *may* reset some register values through this method 929 */ 930 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0, 931 DSM_FUNC_ERR_HANDLE_MSI, NULL); 932 if (!obj) 933 dev_warn(dev, "can not find DSM method, ignore\n"); 934 else 935 ACPI_FREE(obj); 936 937 acpi_dev = ACPI_COMPANION(dev); 938 if (!acpi_device_power_manageable(acpi_dev)) 939 dev_notice(dev, "neither _PS0 nor _PR0 is defined\n"); 940 return 0; 941 } 942 943 static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 944 { 945 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 946 947 cfg |= PHY_CFG_ENA_MSK; 948 cfg &= ~PHY_CFG_PHY_RST_MSK; 949 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 950 } 951 952 static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 953 { 954 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 955 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); 956 static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | 957 BIT(CHL_INT2_RX_CODE_ERR_OFF) | 958 BIT(CHL_INT2_RX_INVLD_DW_OFF); 959 u32 state; 960 961 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, msk | irq_msk); 962 963 cfg &= ~PHY_CFG_ENA_MSK; 964 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 965 966 mdelay(50); 967 968 state = hisi_sas_read32(hisi_hba, PHY_STATE); 969 if (state & BIT(phy_no)) { 970 cfg |= PHY_CFG_PHY_RST_MSK; 971 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 972 } 973 974 udelay(1); 975 976 hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); 977 hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); 978 hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); 979 980 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, msk); 981 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, irq_msk); 982 } 983 984 static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 985 { 986 config_id_frame_v3_hw(hisi_hba, phy_no); 987 config_phy_opt_mode_v3_hw(hisi_hba, phy_no); 988 enable_phy_v3_hw(hisi_hba, phy_no); 989 } 990 991 static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 992 { 993 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 994 u32 txid_auto; 995 996 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 997 if (phy->identify.device_type == SAS_END_DEVICE) { 998 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 999 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 1000 txid_auto | TX_HARDRST_MSK); 1001 } 1002 msleep(100); 1003 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1004 } 1005 1006 static enum sas_linkrate phy_get_max_linkrate_v3_hw(void) 1007 { 1008 return SAS_LINK_RATE_12_0_GBPS; 1009 } 1010 1011 static void phys_init_v3_hw(struct hisi_hba *hisi_hba) 1012 { 1013 int i; 1014 1015 for (i = 0; i < hisi_hba->n_phy; i++) { 1016 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 1017 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1018 1019 if (!sas_phy->phy->enabled) 1020 continue; 1021 1022 hisi_sas_phy_enable(hisi_hba, i, 1); 1023 } 1024 } 1025 1026 static void sl_notify_ssp_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1027 { 1028 u32 sl_control; 1029 1030 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1031 sl_control |= SL_CONTROL_NOTIFY_EN_MSK; 1032 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 1033 msleep(1); 1034 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1035 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; 1036 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 1037 } 1038 1039 static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) 1040 { 1041 int i, bitmap = 0; 1042 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 1043 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1044 1045 for (i = 0; i < hisi_hba->n_phy; i++) 1046 if (phy_state & BIT(i)) 1047 if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) 1048 bitmap |= BIT(i); 1049 1050 return bitmap; 1051 } 1052 1053 static void start_delivery_v3_hw(struct hisi_sas_dq *dq) 1054 { 1055 struct hisi_hba *hisi_hba = dq->hisi_hba; 1056 struct hisi_sas_slot *s, *s1, *s2 = NULL; 1057 int dlvry_queue = dq->id; 1058 int wp; 1059 1060 list_for_each_entry_safe(s, s1, &dq->list, delivery) { 1061 if (!s->ready) 1062 break; 1063 s2 = s; 1064 list_del(&s->delivery); 1065 } 1066 1067 if (!s2) 1068 return; 1069 1070 /* 1071 * Ensure that memories for slots built on other CPUs is observed. 1072 */ 1073 smp_rmb(); 1074 wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; 1075 1076 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); 1077 } 1078 1079 static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, 1080 struct hisi_sas_slot *slot, 1081 struct hisi_sas_cmd_hdr *hdr, 1082 struct scatterlist *scatter, 1083 int n_elem) 1084 { 1085 struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); 1086 struct scatterlist *sg; 1087 int i; 1088 1089 for_each_sg(scatter, sg, n_elem, i) { 1090 struct hisi_sas_sge *entry = &sge_page->sge[i]; 1091 1092 entry->addr = cpu_to_le64(sg_dma_address(sg)); 1093 entry->page_ctrl_0 = entry->page_ctrl_1 = 0; 1094 entry->data_len = cpu_to_le32(sg_dma_len(sg)); 1095 entry->data_off = 0; 1096 } 1097 1098 hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); 1099 1100 hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); 1101 } 1102 1103 static void prep_prd_sge_dif_v3_hw(struct hisi_hba *hisi_hba, 1104 struct hisi_sas_slot *slot, 1105 struct hisi_sas_cmd_hdr *hdr, 1106 struct scatterlist *scatter, 1107 int n_elem) 1108 { 1109 struct hisi_sas_sge_dif_page *sge_dif_page; 1110 struct scatterlist *sg; 1111 int i; 1112 1113 sge_dif_page = hisi_sas_sge_dif_addr_mem(slot); 1114 1115 for_each_sg(scatter, sg, n_elem, i) { 1116 struct hisi_sas_sge *entry = &sge_dif_page->sge[i]; 1117 1118 entry->addr = cpu_to_le64(sg_dma_address(sg)); 1119 entry->page_ctrl_0 = 0; 1120 entry->page_ctrl_1 = 0; 1121 entry->data_len = cpu_to_le32(sg_dma_len(sg)); 1122 entry->data_off = 0; 1123 } 1124 1125 hdr->dif_prd_table_addr = 1126 cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot)); 1127 1128 hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DIF_SGL_LEN_OFF); 1129 } 1130 1131 static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd) 1132 { 1133 unsigned char prot_flags = scsi_cmnd->prot_flags; 1134 1135 if (prot_flags & SCSI_PROT_REF_CHECK) 1136 return T10_CHK_APP_TAG_MSK; 1137 return T10_CHK_REF_TAG_MSK | T10_CHK_APP_TAG_MSK; 1138 } 1139 1140 static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd, 1141 struct hisi_sas_protect_iu_v3_hw *prot) 1142 { 1143 unsigned char prot_op = scsi_get_prot_op(scsi_cmnd); 1144 unsigned int interval = scsi_prot_interval(scsi_cmnd); 1145 u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request); 1146 1147 switch (prot_op) { 1148 case SCSI_PROT_READ_INSERT: 1149 prot->dw0 |= T10_INSRT_EN_MSK; 1150 prot->lbrtgv = lbrt_chk_val; 1151 break; 1152 case SCSI_PROT_READ_STRIP: 1153 prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); 1154 prot->lbrtcv = lbrt_chk_val; 1155 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); 1156 break; 1157 case SCSI_PROT_READ_PASS: 1158 prot->dw0 |= T10_CHK_EN_MSK; 1159 prot->lbrtcv = lbrt_chk_val; 1160 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); 1161 break; 1162 case SCSI_PROT_WRITE_INSERT: 1163 prot->dw0 |= T10_INSRT_EN_MSK; 1164 prot->lbrtgv = lbrt_chk_val; 1165 break; 1166 case SCSI_PROT_WRITE_STRIP: 1167 prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); 1168 prot->lbrtcv = lbrt_chk_val; 1169 break; 1170 case SCSI_PROT_WRITE_PASS: 1171 prot->dw0 |= T10_CHK_EN_MSK; 1172 prot->lbrtcv = lbrt_chk_val; 1173 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); 1174 break; 1175 default: 1176 WARN(1, "prot_op(0x%x) is not valid\n", prot_op); 1177 break; 1178 } 1179 1180 switch (interval) { 1181 case 512: 1182 break; 1183 case 4096: 1184 prot->dw0 |= (0x1 << USR_DATA_BLOCK_SZ_OFF); 1185 break; 1186 case 520: 1187 prot->dw0 |= (0x2 << USR_DATA_BLOCK_SZ_OFF); 1188 break; 1189 default: 1190 WARN(1, "protection interval (0x%x) invalid\n", 1191 interval); 1192 break; 1193 } 1194 1195 prot->dw0 |= INCR_LBRT_MSK; 1196 } 1197 1198 static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, 1199 struct hisi_sas_slot *slot) 1200 { 1201 struct sas_task *task = slot->task; 1202 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1203 struct domain_device *device = task->dev; 1204 struct hisi_sas_device *sas_dev = device->lldd_dev; 1205 struct hisi_sas_port *port = slot->port; 1206 struct sas_ssp_task *ssp_task = &task->ssp_task; 1207 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 1208 struct hisi_sas_tmf_task *tmf = slot->tmf; 1209 int has_data = 0, priority = !!tmf; 1210 unsigned char prot_op; 1211 u8 *buf_cmd; 1212 u32 dw1 = 0, dw2 = 0, len = 0; 1213 1214 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | 1215 (2 << CMD_HDR_TLR_CTRL_OFF) | 1216 (port->id << CMD_HDR_PORT_OFF) | 1217 (priority << CMD_HDR_PRIORITY_OFF) | 1218 (1 << CMD_HDR_CMD_OFF)); /* ssp */ 1219 1220 dw1 = 1 << CMD_HDR_VDTL_OFF; 1221 if (tmf) { 1222 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; 1223 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; 1224 } else { 1225 prot_op = scsi_get_prot_op(scsi_cmnd); 1226 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; 1227 switch (scsi_cmnd->sc_data_direction) { 1228 case DMA_TO_DEVICE: 1229 has_data = 1; 1230 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1231 break; 1232 case DMA_FROM_DEVICE: 1233 has_data = 1; 1234 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1235 break; 1236 default: 1237 dw1 &= ~CMD_HDR_DIR_MSK; 1238 } 1239 } 1240 1241 /* map itct entry */ 1242 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1243 1244 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) 1245 + 3) / 4) << CMD_HDR_CFL_OFF) | 1246 ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | 1247 (2 << CMD_HDR_SG_MOD_OFF); 1248 hdr->dw2 = cpu_to_le32(dw2); 1249 hdr->transfer_tags = cpu_to_le32(slot->idx); 1250 1251 if (has_data) { 1252 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, 1253 slot->n_elem); 1254 1255 if (scsi_prot_sg_count(scsi_cmnd)) 1256 prep_prd_sge_dif_v3_hw(hisi_hba, slot, hdr, 1257 scsi_prot_sglist(scsi_cmnd), 1258 slot->n_elem_dif); 1259 } 1260 1261 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1262 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1263 1264 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + 1265 sizeof(struct ssp_frame_hdr); 1266 1267 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 1268 if (!tmf) { 1269 buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3); 1270 memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 1271 } else { 1272 buf_cmd[10] = tmf->tmf; 1273 switch (tmf->tmf) { 1274 case TMF_ABORT_TASK: 1275 case TMF_QUERY_TASK: 1276 buf_cmd[12] = 1277 (tmf->tag_of_task_to_be_managed >> 8) & 0xff; 1278 buf_cmd[13] = 1279 tmf->tag_of_task_to_be_managed & 0xff; 1280 break; 1281 default: 1282 break; 1283 } 1284 } 1285 1286 if (has_data && (prot_op != SCSI_PROT_NORMAL)) { 1287 struct hisi_sas_protect_iu_v3_hw prot; 1288 u8 *buf_cmd_prot; 1289 1290 hdr->dw7 |= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF); 1291 dw1 |= CMD_HDR_PIR_MSK; 1292 buf_cmd_prot = hisi_sas_cmd_hdr_addr_mem(slot) + 1293 sizeof(struct ssp_frame_hdr) + 1294 sizeof(struct ssp_command_iu); 1295 1296 memset(&prot, 0, sizeof(struct hisi_sas_protect_iu_v3_hw)); 1297 fill_prot_v3_hw(scsi_cmnd, &prot); 1298 memcpy(buf_cmd_prot, &prot, 1299 sizeof(struct hisi_sas_protect_iu_v3_hw)); 1300 /* 1301 * For READ, we need length of info read to memory, while for 1302 * WRITE we need length of data written to the disk. 1303 */ 1304 if (prot_op == SCSI_PROT_WRITE_INSERT || 1305 prot_op == SCSI_PROT_READ_INSERT || 1306 prot_op == SCSI_PROT_WRITE_PASS || 1307 prot_op == SCSI_PROT_READ_PASS) { 1308 unsigned int interval = scsi_prot_interval(scsi_cmnd); 1309 unsigned int ilog2_interval = ilog2(interval); 1310 1311 len = (task->total_xfer_len >> ilog2_interval) * 8; 1312 } 1313 } 1314 1315 hdr->dw1 = cpu_to_le32(dw1); 1316 1317 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len); 1318 } 1319 1320 static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, 1321 struct hisi_sas_slot *slot) 1322 { 1323 struct sas_task *task = slot->task; 1324 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1325 struct domain_device *device = task->dev; 1326 struct hisi_sas_port *port = slot->port; 1327 struct scatterlist *sg_req; 1328 struct hisi_sas_device *sas_dev = device->lldd_dev; 1329 dma_addr_t req_dma_addr; 1330 unsigned int req_len; 1331 1332 /* req */ 1333 sg_req = &task->smp_task.smp_req; 1334 req_len = sg_dma_len(sg_req); 1335 req_dma_addr = sg_dma_address(sg_req); 1336 1337 /* create header */ 1338 /* dw0 */ 1339 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | 1340 (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ 1341 (2 << CMD_HDR_CMD_OFF)); /* smp */ 1342 1343 /* map itct entry */ 1344 hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | 1345 (1 << CMD_HDR_FRAME_TYPE_OFF) | 1346 (DIR_NO_DATA << CMD_HDR_DIR_OFF)); 1347 1348 /* dw2 */ 1349 hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | 1350 (HISI_SAS_MAX_SMP_RESP_SZ / 4 << 1351 CMD_HDR_MRFL_OFF)); 1352 1353 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); 1354 1355 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); 1356 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1357 } 1358 1359 static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, 1360 struct hisi_sas_slot *slot) 1361 { 1362 struct sas_task *task = slot->task; 1363 struct domain_device *device = task->dev; 1364 struct domain_device *parent_dev = device->parent; 1365 struct hisi_sas_device *sas_dev = device->lldd_dev; 1366 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1367 struct asd_sas_port *sas_port = device->port; 1368 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 1369 u8 *buf_cmd; 1370 int has_data = 0, hdr_tag = 0; 1371 u32 dw1 = 0, dw2 = 0; 1372 1373 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); 1374 if (parent_dev && dev_is_expander(parent_dev->dev_type)) 1375 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); 1376 else 1377 hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF); 1378 1379 switch (task->data_dir) { 1380 case DMA_TO_DEVICE: 1381 has_data = 1; 1382 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1383 break; 1384 case DMA_FROM_DEVICE: 1385 has_data = 1; 1386 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1387 break; 1388 default: 1389 dw1 &= ~CMD_HDR_DIR_MSK; 1390 } 1391 1392 if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && 1393 (task->ata_task.fis.control & ATA_SRST)) 1394 dw1 |= 1 << CMD_HDR_RESET_OFF; 1395 1396 dw1 |= (hisi_sas_get_ata_protocol( 1397 &task->ata_task.fis, task->data_dir)) 1398 << CMD_HDR_FRAME_TYPE_OFF; 1399 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1400 1401 if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis)) 1402 dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; 1403 1404 hdr->dw1 = cpu_to_le32(dw1); 1405 1406 /* dw2 */ 1407 if (task->ata_task.use_ncq) { 1408 struct ata_queued_cmd *qc = task->uldd_task; 1409 1410 hdr_tag = qc->tag; 1411 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 1412 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; 1413 } 1414 1415 dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | 1416 2 << CMD_HDR_SG_MOD_OFF; 1417 hdr->dw2 = cpu_to_le32(dw2); 1418 1419 /* dw3 */ 1420 hdr->transfer_tags = cpu_to_le32(slot->idx); 1421 1422 if (has_data) 1423 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, 1424 slot->n_elem); 1425 1426 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1427 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1428 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1429 1430 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot); 1431 1432 if (likely(!task->ata_task.device_control_reg_update)) 1433 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 1434 /* fill in command FIS */ 1435 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 1436 } 1437 1438 static void prep_abort_v3_hw(struct hisi_hba *hisi_hba, 1439 struct hisi_sas_slot *slot, 1440 int device_id, int abort_flag, int tag_to_abort) 1441 { 1442 struct sas_task *task = slot->task; 1443 struct domain_device *dev = task->dev; 1444 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1445 struct hisi_sas_port *port = slot->port; 1446 1447 /* dw0 */ 1448 hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /*abort*/ 1449 (port->id << CMD_HDR_PORT_OFF) | 1450 (dev_is_sata(dev) 1451 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | 1452 (abort_flag 1453 << CMD_HDR_ABORT_FLAG_OFF)); 1454 1455 /* dw1 */ 1456 hdr->dw1 = cpu_to_le32(device_id 1457 << CMD_HDR_DEV_ID_OFF); 1458 1459 /* dw7 */ 1460 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF); 1461 hdr->transfer_tags = cpu_to_le32(slot->idx); 1462 } 1463 1464 static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1465 { 1466 int i; 1467 irqreturn_t res; 1468 u32 context, port_id, link_rate; 1469 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1470 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1471 struct device *dev = hisi_hba->dev; 1472 unsigned long flags; 1473 1474 del_timer(&phy->timer); 1475 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); 1476 1477 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 1478 port_id = (port_id >> (4 * phy_no)) & 0xf; 1479 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 1480 link_rate = (link_rate >> (phy_no * 4)) & 0xf; 1481 1482 if (port_id == 0xf) { 1483 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); 1484 res = IRQ_NONE; 1485 goto end; 1486 } 1487 sas_phy->linkrate = link_rate; 1488 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 1489 1490 /* Check for SATA dev */ 1491 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); 1492 if (context & (1 << phy_no)) { 1493 struct hisi_sas_initial_fis *initial_fis; 1494 struct dev_to_host_fis *fis; 1495 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 1496 struct Scsi_Host *shost = hisi_hba->shost; 1497 1498 dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate); 1499 initial_fis = &hisi_hba->initial_fis[phy_no]; 1500 fis = &initial_fis->fis; 1501 1502 /* check ERR bit of Status Register */ 1503 if (fis->status & ATA_ERR) { 1504 dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", 1505 phy_no, fis->status); 1506 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1507 res = IRQ_NONE; 1508 goto end; 1509 } 1510 1511 sas_phy->oob_mode = SATA_OOB_MODE; 1512 attached_sas_addr[0] = 0x50; 1513 attached_sas_addr[6] = shost->host_no; 1514 attached_sas_addr[7] = phy_no; 1515 memcpy(sas_phy->attached_sas_addr, 1516 attached_sas_addr, 1517 SAS_ADDR_SIZE); 1518 memcpy(sas_phy->frame_rcvd, fis, 1519 sizeof(struct dev_to_host_fis)); 1520 phy->phy_type |= PORT_TYPE_SATA; 1521 phy->identify.device_type = SAS_SATA_DEV; 1522 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 1523 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 1524 } else { 1525 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; 1526 struct sas_identify_frame *id = 1527 (struct sas_identify_frame *)frame_rcvd; 1528 1529 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); 1530 for (i = 0; i < 6; i++) { 1531 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, 1532 RX_IDAF_DWORD0 + (i * 4)); 1533 frame_rcvd[i] = __swab32(idaf); 1534 } 1535 sas_phy->oob_mode = SAS_OOB_MODE; 1536 memcpy(sas_phy->attached_sas_addr, 1537 &id->sas_addr, 1538 SAS_ADDR_SIZE); 1539 phy->phy_type |= PORT_TYPE_SAS; 1540 phy->identify.device_type = id->dev_type; 1541 phy->frame_rcvd_size = sizeof(struct sas_identify_frame); 1542 if (phy->identify.device_type == SAS_END_DEVICE) 1543 phy->identify.target_port_protocols = 1544 SAS_PROTOCOL_SSP; 1545 else if (phy->identify.device_type != SAS_PHY_UNUSED) 1546 phy->identify.target_port_protocols = 1547 SAS_PROTOCOL_SMP; 1548 } 1549 1550 phy->port_id = port_id; 1551 phy->phy_attached = 1; 1552 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); 1553 res = IRQ_HANDLED; 1554 spin_lock_irqsave(&phy->lock, flags); 1555 if (phy->reset_completion) { 1556 phy->in_reset = 0; 1557 complete(phy->reset_completion); 1558 } 1559 spin_unlock_irqrestore(&phy->lock, flags); 1560 end: 1561 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1562 CHL_INT0_SL_PHY_ENABLE_MSK); 1563 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); 1564 1565 return res; 1566 } 1567 1568 static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1569 { 1570 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1571 u32 phy_state, sl_ctrl, txid_auto; 1572 struct device *dev = hisi_hba->dev; 1573 1574 atomic_inc(&phy->down_cnt); 1575 1576 del_timer(&phy->timer); 1577 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 1578 1579 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1580 dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state); 1581 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); 1582 1583 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1584 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, 1585 sl_ctrl&(~SL_CTA_MSK)); 1586 1587 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 1588 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 1589 txid_auto | CT3_MSK); 1590 1591 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); 1592 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); 1593 1594 return IRQ_HANDLED; 1595 } 1596 1597 static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1598 { 1599 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1600 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1601 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1602 u32 bcast_status; 1603 1604 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); 1605 bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); 1606 if ((bcast_status & RX_BCAST_CHG_MSK) && 1607 !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1608 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); 1609 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1610 CHL_INT0_SL_RX_BCST_ACK_MSK); 1611 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); 1612 1613 return IRQ_HANDLED; 1614 } 1615 1616 static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) 1617 { 1618 struct hisi_hba *hisi_hba = p; 1619 u32 irq_msk; 1620 int phy_no = 0; 1621 irqreturn_t res = IRQ_NONE; 1622 1623 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) 1624 & 0x11111111; 1625 while (irq_msk) { 1626 if (irq_msk & 1) { 1627 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, 1628 CHL_INT0); 1629 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1630 int rdy = phy_state & (1 << phy_no); 1631 1632 if (rdy) { 1633 if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK) 1634 /* phy up */ 1635 if (phy_up_v3_hw(phy_no, hisi_hba) 1636 == IRQ_HANDLED) 1637 res = IRQ_HANDLED; 1638 if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK) 1639 /* phy bcast */ 1640 if (phy_bcast_v3_hw(phy_no, hisi_hba) 1641 == IRQ_HANDLED) 1642 res = IRQ_HANDLED; 1643 } else { 1644 if (irq_value & CHL_INT0_NOT_RDY_MSK) 1645 /* phy down */ 1646 if (phy_down_v3_hw(phy_no, hisi_hba) 1647 == IRQ_HANDLED) 1648 res = IRQ_HANDLED; 1649 } 1650 } 1651 irq_msk >>= 4; 1652 phy_no++; 1653 } 1654 1655 return res; 1656 } 1657 1658 static const struct hisi_sas_hw_error port_axi_error[] = { 1659 { 1660 .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF), 1661 .msg = "dmac_tx_ecc_bad_err", 1662 }, 1663 { 1664 .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF), 1665 .msg = "dmac_rx_ecc_bad_err", 1666 }, 1667 { 1668 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF), 1669 .msg = "dma_tx_axi_wr_err", 1670 }, 1671 { 1672 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF), 1673 .msg = "dma_tx_axi_rd_err", 1674 }, 1675 { 1676 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF), 1677 .msg = "dma_rx_axi_wr_err", 1678 }, 1679 { 1680 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF), 1681 .msg = "dma_rx_axi_rd_err", 1682 }, 1683 { 1684 .irq_msk = BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF), 1685 .msg = "dma_tx_fifo_err", 1686 }, 1687 { 1688 .irq_msk = BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF), 1689 .msg = "dma_rx_fifo_err", 1690 }, 1691 { 1692 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF), 1693 .msg = "dma_tx_axi_ruser_err", 1694 }, 1695 { 1696 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF), 1697 .msg = "dma_rx_axi_ruser_err", 1698 }, 1699 }; 1700 1701 static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1702 { 1703 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1); 1704 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1_MSK); 1705 struct device *dev = hisi_hba->dev; 1706 int i; 1707 1708 irq_value &= ~irq_msk; 1709 if (!irq_value) 1710 return; 1711 1712 for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) { 1713 const struct hisi_sas_hw_error *error = &port_axi_error[i]; 1714 1715 if (!(irq_value & error->irq_msk)) 1716 continue; 1717 1718 dev_err(dev, "%s error (phy%d 0x%x) found!\n", 1719 error->msg, phy_no, irq_value); 1720 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1721 } 1722 1723 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value); 1724 } 1725 1726 static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1727 { 1728 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1729 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1730 struct sas_phy *sphy = sas_phy->phy; 1731 unsigned long flags; 1732 u32 reg_value; 1733 1734 spin_lock_irqsave(&phy->lock, flags); 1735 1736 /* loss dword sync */ 1737 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST); 1738 sphy->loss_of_dword_sync_count += reg_value; 1739 1740 /* phy reset problem */ 1741 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB); 1742 sphy->phy_reset_problem_count += reg_value; 1743 1744 /* invalid dword */ 1745 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); 1746 sphy->invalid_dword_count += reg_value; 1747 1748 /* disparity err */ 1749 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); 1750 sphy->running_disparity_error_count += reg_value; 1751 1752 /* code violation error */ 1753 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); 1754 phy->code_violation_err_count += reg_value; 1755 1756 spin_unlock_irqrestore(&phy->lock, flags); 1757 } 1758 1759 static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1760 { 1761 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); 1762 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); 1763 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1764 struct pci_dev *pci_dev = hisi_hba->pci_dev; 1765 struct device *dev = hisi_hba->dev; 1766 static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | 1767 BIT(CHL_INT2_RX_CODE_ERR_OFF) | 1768 BIT(CHL_INT2_RX_INVLD_DW_OFF); 1769 1770 irq_value &= ~irq_msk; 1771 if (!irq_value) 1772 return; 1773 1774 if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) { 1775 dev_warn(dev, "phy%d identify timeout\n", phy_no); 1776 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1777 } 1778 1779 if (irq_value & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) { 1780 u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, 1781 STP_LINK_TIMEOUT_STATE); 1782 1783 dev_warn(dev, "phy%d stp link timeout (0x%x)\n", 1784 phy_no, reg_value); 1785 if (reg_value & BIT(4)) 1786 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1787 } 1788 1789 if (pci_dev->revision > 0x20 && (irq_value & msk)) { 1790 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1791 struct sas_phy *sphy = sas_phy->phy; 1792 1793 phy_get_events_v3_hw(hisi_hba, phy_no); 1794 1795 if (irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) 1796 dev_info(dev, "phy%d invalid dword cnt: %u\n", phy_no, 1797 sphy->invalid_dword_count); 1798 1799 if (irq_value & BIT(CHL_INT2_RX_CODE_ERR_OFF)) 1800 dev_info(dev, "phy%d code violation cnt: %u\n", phy_no, 1801 phy->code_violation_err_count); 1802 1803 if (irq_value & BIT(CHL_INT2_RX_DISP_ERR_OFF)) 1804 dev_info(dev, "phy%d disparity error cnt: %u\n", phy_no, 1805 sphy->running_disparity_error_count); 1806 } 1807 1808 if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) && 1809 (pci_dev->revision == 0x20)) { 1810 u32 reg_value; 1811 int rc; 1812 1813 rc = hisi_sas_read32_poll_timeout_atomic( 1814 HILINK_ERR_DFX, reg_value, 1815 !((reg_value >> 8) & BIT(phy_no)), 1816 1000, 10000); 1817 if (rc) 1818 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1819 } 1820 1821 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value); 1822 } 1823 1824 static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1825 { 1826 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); 1827 1828 if (irq_value0 & CHL_INT0_PHY_RDY_MSK) 1829 hisi_sas_phy_oob_ready(hisi_hba, phy_no); 1830 1831 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1832 irq_value0 & (~CHL_INT0_SL_RX_BCST_ACK_MSK) 1833 & (~CHL_INT0_SL_PHY_ENABLE_MSK) 1834 & (~CHL_INT0_NOT_RDY_MSK)); 1835 } 1836 1837 static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) 1838 { 1839 struct hisi_hba *hisi_hba = p; 1840 u32 irq_msk; 1841 int phy_no = 0; 1842 1843 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) 1844 & CHNL_INT_STS_MSK; 1845 1846 while (irq_msk) { 1847 if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH))) 1848 handle_chl_int0_v3_hw(hisi_hba, phy_no); 1849 1850 if (irq_msk & (CHNL_INT_STS_INT1_MSK << (phy_no * CHNL_WIDTH))) 1851 handle_chl_int1_v3_hw(hisi_hba, phy_no); 1852 1853 if (irq_msk & (CHNL_INT_STS_INT2_MSK << (phy_no * CHNL_WIDTH))) 1854 handle_chl_int2_v3_hw(hisi_hba, phy_no); 1855 1856 irq_msk &= ~(CHNL_INT_STS_PHY_MSK << (phy_no * CHNL_WIDTH)); 1857 phy_no++; 1858 } 1859 1860 return IRQ_HANDLED; 1861 } 1862 1863 static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = { 1864 { 1865 .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF), 1866 .msk = HGC_DQE_ECC_MB_ADDR_MSK, 1867 .shift = HGC_DQE_ECC_MB_ADDR_OFF, 1868 .msg = "hgc_dqe_eccbad_intr", 1869 .reg = HGC_DQE_ECC_ADDR, 1870 }, 1871 { 1872 .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF), 1873 .msk = HGC_IOST_ECC_MB_ADDR_MSK, 1874 .shift = HGC_IOST_ECC_MB_ADDR_OFF, 1875 .msg = "hgc_iost_eccbad_intr", 1876 .reg = HGC_IOST_ECC_ADDR, 1877 }, 1878 { 1879 .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF), 1880 .msk = HGC_ITCT_ECC_MB_ADDR_MSK, 1881 .shift = HGC_ITCT_ECC_MB_ADDR_OFF, 1882 .msg = "hgc_itct_eccbad_intr", 1883 .reg = HGC_ITCT_ECC_ADDR, 1884 }, 1885 { 1886 .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF), 1887 .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, 1888 .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, 1889 .msg = "hgc_iostl_eccbad_intr", 1890 .reg = HGC_LM_DFX_STATUS2, 1891 }, 1892 { 1893 .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF), 1894 .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, 1895 .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, 1896 .msg = "hgc_itctl_eccbad_intr", 1897 .reg = HGC_LM_DFX_STATUS2, 1898 }, 1899 { 1900 .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF), 1901 .msk = HGC_CQE_ECC_MB_ADDR_MSK, 1902 .shift = HGC_CQE_ECC_MB_ADDR_OFF, 1903 .msg = "hgc_cqe_eccbad_intr", 1904 .reg = HGC_CQE_ECC_ADDR, 1905 }, 1906 { 1907 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF), 1908 .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, 1909 .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, 1910 .msg = "rxm_mem0_eccbad_intr", 1911 .reg = HGC_RXM_DFX_STATUS14, 1912 }, 1913 { 1914 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF), 1915 .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, 1916 .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, 1917 .msg = "rxm_mem1_eccbad_intr", 1918 .reg = HGC_RXM_DFX_STATUS14, 1919 }, 1920 { 1921 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF), 1922 .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, 1923 .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, 1924 .msg = "rxm_mem2_eccbad_intr", 1925 .reg = HGC_RXM_DFX_STATUS14, 1926 }, 1927 { 1928 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF), 1929 .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, 1930 .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, 1931 .msg = "rxm_mem3_eccbad_intr", 1932 .reg = HGC_RXM_DFX_STATUS15, 1933 }, 1934 { 1935 .irq_msk = BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF), 1936 .msk = AM_ROB_ECC_ERR_ADDR_MSK, 1937 .shift = AM_ROB_ECC_ERR_ADDR_OFF, 1938 .msg = "ooo_ram_eccbad_intr", 1939 .reg = AM_ROB_ECC_ERR_ADDR, 1940 }, 1941 }; 1942 1943 static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba *hisi_hba, 1944 u32 irq_value) 1945 { 1946 struct device *dev = hisi_hba->dev; 1947 const struct hisi_sas_hw_error *ecc_error; 1948 u32 val; 1949 int i; 1950 1951 for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) { 1952 ecc_error = &multi_bit_ecc_errors[i]; 1953 if (irq_value & ecc_error->irq_msk) { 1954 val = hisi_sas_read32(hisi_hba, ecc_error->reg); 1955 val &= ecc_error->msk; 1956 val >>= ecc_error->shift; 1957 dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n", 1958 ecc_error->msg, irq_value, val); 1959 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1960 } 1961 } 1962 } 1963 1964 static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba) 1965 { 1966 u32 irq_value, irq_msk; 1967 1968 irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); 1969 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); 1970 1971 irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); 1972 if (irq_value) 1973 multi_bit_ecc_error_process_v3_hw(hisi_hba, irq_value); 1974 1975 hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value); 1976 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk); 1977 } 1978 1979 static const struct hisi_sas_hw_error axi_error[] = { 1980 { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" }, 1981 { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" }, 1982 { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" }, 1983 { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" }, 1984 { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" }, 1985 { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" }, 1986 { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" }, 1987 { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" }, 1988 {} 1989 }; 1990 1991 static const struct hisi_sas_hw_error fifo_error[] = { 1992 { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" }, 1993 { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" }, 1994 { .msk = BIT(10), .msg = "GETDQE_FIFO" }, 1995 { .msk = BIT(11), .msg = "CMDP_FIFO" }, 1996 { .msk = BIT(12), .msg = "AWTCTRL_FIFO" }, 1997 {} 1998 }; 1999 2000 static const struct hisi_sas_hw_error fatal_axi_error[] = { 2001 { 2002 .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF), 2003 .msg = "write pointer and depth", 2004 }, 2005 { 2006 .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF), 2007 .msg = "iptt no match slot", 2008 }, 2009 { 2010 .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF), 2011 .msg = "read pointer and depth", 2012 }, 2013 { 2014 .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF), 2015 .reg = HGC_AXI_FIFO_ERR_INFO, 2016 .sub = axi_error, 2017 }, 2018 { 2019 .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF), 2020 .reg = HGC_AXI_FIFO_ERR_INFO, 2021 .sub = fifo_error, 2022 }, 2023 { 2024 .irq_msk = BIT(ENT_INT_SRC3_LM_OFF), 2025 .msg = "LM add/fetch list", 2026 }, 2027 { 2028 .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF), 2029 .msg = "SAS_HGC_ABT fetch LM list", 2030 }, 2031 { 2032 .irq_msk = BIT(ENT_INT_SRC3_DQE_POISON_OFF), 2033 .msg = "read dqe poison", 2034 }, 2035 { 2036 .irq_msk = BIT(ENT_INT_SRC3_IOST_POISON_OFF), 2037 .msg = "read iost poison", 2038 }, 2039 { 2040 .irq_msk = BIT(ENT_INT_SRC3_ITCT_POISON_OFF), 2041 .msg = "read itct poison", 2042 }, 2043 { 2044 .irq_msk = BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF), 2045 .msg = "read itct ncq poison", 2046 }, 2047 2048 }; 2049 2050 static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) 2051 { 2052 u32 irq_value, irq_msk; 2053 struct hisi_hba *hisi_hba = p; 2054 struct device *dev = hisi_hba->dev; 2055 struct pci_dev *pdev = hisi_hba->pci_dev; 2056 int i; 2057 2058 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 2059 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00); 2060 2061 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 2062 irq_value &= ~irq_msk; 2063 2064 for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) { 2065 const struct hisi_sas_hw_error *error = &fatal_axi_error[i]; 2066 2067 if (!(irq_value & error->irq_msk)) 2068 continue; 2069 2070 if (error->sub) { 2071 const struct hisi_sas_hw_error *sub = error->sub; 2072 u32 err_value = hisi_sas_read32(hisi_hba, error->reg); 2073 2074 for (; sub->msk || sub->msg; sub++) { 2075 if (!(err_value & sub->msk)) 2076 continue; 2077 2078 dev_err(dev, "%s error (0x%x) found!\n", 2079 sub->msg, irq_value); 2080 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2081 } 2082 } else { 2083 dev_err(dev, "%s error (0x%x) found!\n", 2084 error->msg, irq_value); 2085 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2086 } 2087 2088 if (pdev->revision < 0x21) { 2089 u32 reg_val; 2090 2091 reg_val = hisi_sas_read32(hisi_hba, 2092 AXI_MASTER_CFG_BASE + 2093 AM_CTRL_GLOBAL); 2094 reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; 2095 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 2096 AM_CTRL_GLOBAL, reg_val); 2097 } 2098 } 2099 2100 fatal_ecc_int_v3_hw(hisi_hba); 2101 2102 if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) { 2103 u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); 2104 u32 dev_id = reg_val & ITCT_DEV_MSK; 2105 struct hisi_sas_device *sas_dev = 2106 &hisi_hba->devices[dev_id]; 2107 2108 hisi_sas_write32(hisi_hba, ITCT_CLR, 0); 2109 dev_dbg(dev, "clear ITCT ok\n"); 2110 complete(sas_dev->completion); 2111 } 2112 2113 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00); 2114 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); 2115 2116 return IRQ_HANDLED; 2117 } 2118 2119 static void 2120 slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, 2121 struct hisi_sas_slot *slot) 2122 { 2123 struct task_status_struct *ts = &task->task_status; 2124 struct hisi_sas_complete_v3_hdr *complete_queue = 2125 hisi_hba->complete_hdr[slot->cmplt_queue]; 2126 struct hisi_sas_complete_v3_hdr *complete_hdr = 2127 &complete_queue[slot->cmplt_queue_slot]; 2128 struct hisi_sas_err_record_v3 *record = 2129 hisi_sas_status_buf_addr_mem(slot); 2130 u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type); 2131 u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type); 2132 u32 dw3 = le32_to_cpu(complete_hdr->dw3); 2133 2134 switch (task->task_proto) { 2135 case SAS_PROTOCOL_SSP: 2136 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { 2137 ts->residual = trans_tx_fail_type; 2138 ts->stat = SAS_DATA_UNDERRUN; 2139 } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { 2140 ts->stat = SAS_QUEUE_FULL; 2141 slot->abort = 1; 2142 } else { 2143 ts->stat = SAS_OPEN_REJECT; 2144 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2145 } 2146 break; 2147 case SAS_PROTOCOL_SATA: 2148 case SAS_PROTOCOL_STP: 2149 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2150 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { 2151 ts->residual = trans_tx_fail_type; 2152 ts->stat = SAS_DATA_UNDERRUN; 2153 } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { 2154 ts->stat = SAS_PHY_DOWN; 2155 slot->abort = 1; 2156 } else { 2157 ts->stat = SAS_OPEN_REJECT; 2158 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2159 } 2160 hisi_sas_sata_done(task, slot); 2161 break; 2162 case SAS_PROTOCOL_SMP: 2163 ts->stat = SAM_STAT_CHECK_CONDITION; 2164 break; 2165 default: 2166 break; 2167 } 2168 } 2169 2170 static void slot_complete_v3_hw(struct hisi_hba *hisi_hba, 2171 struct hisi_sas_slot *slot) 2172 { 2173 struct sas_task *task = slot->task; 2174 struct hisi_sas_device *sas_dev; 2175 struct device *dev = hisi_hba->dev; 2176 struct task_status_struct *ts; 2177 struct domain_device *device; 2178 struct sas_ha_struct *ha; 2179 struct hisi_sas_complete_v3_hdr *complete_queue = 2180 hisi_hba->complete_hdr[slot->cmplt_queue]; 2181 struct hisi_sas_complete_v3_hdr *complete_hdr = 2182 &complete_queue[slot->cmplt_queue_slot]; 2183 unsigned long flags; 2184 bool is_internal = slot->is_internal; 2185 u32 dw0, dw1, dw3; 2186 2187 if (unlikely(!task || !task->lldd_task || !task->dev)) 2188 return; 2189 2190 ts = &task->task_status; 2191 device = task->dev; 2192 ha = device->port->ha; 2193 sas_dev = device->lldd_dev; 2194 2195 spin_lock_irqsave(&task->task_state_lock, flags); 2196 task->task_state_flags &= 2197 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 2198 spin_unlock_irqrestore(&task->task_state_lock, flags); 2199 2200 memset(ts, 0, sizeof(*ts)); 2201 ts->resp = SAS_TASK_COMPLETE; 2202 2203 if (unlikely(!sas_dev)) { 2204 dev_dbg(dev, "slot complete: port has not device\n"); 2205 ts->stat = SAS_PHY_DOWN; 2206 goto out; 2207 } 2208 2209 dw0 = le32_to_cpu(complete_hdr->dw0); 2210 dw1 = le32_to_cpu(complete_hdr->dw1); 2211 dw3 = le32_to_cpu(complete_hdr->dw3); 2212 2213 /* 2214 * Use SAS+TMF status codes 2215 */ 2216 switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) { 2217 case STAT_IO_ABORTED: 2218 /* this IO has been aborted by abort command */ 2219 ts->stat = SAS_ABORTED_TASK; 2220 goto out; 2221 case STAT_IO_COMPLETE: 2222 /* internal abort command complete */ 2223 ts->stat = TMF_RESP_FUNC_SUCC; 2224 goto out; 2225 case STAT_IO_NO_DEVICE: 2226 ts->stat = TMF_RESP_FUNC_COMPLETE; 2227 goto out; 2228 case STAT_IO_NOT_VALID: 2229 /* 2230 * abort single IO, the controller can't find the IO 2231 */ 2232 ts->stat = TMF_RESP_FUNC_FAILED; 2233 goto out; 2234 default: 2235 break; 2236 } 2237 2238 /* check for erroneous completion */ 2239 if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { 2240 u32 *error_info = hisi_sas_status_buf_addr_mem(slot); 2241 2242 slot_err_v3_hw(hisi_hba, task, slot); 2243 if (ts->stat != SAS_DATA_UNDERRUN) 2244 dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", 2245 slot->idx, task, sas_dev->device_id, 2246 dw0, dw1, complete_hdr->act, dw3, 2247 error_info[0], error_info[1], 2248 error_info[2], error_info[3]); 2249 if (unlikely(slot->abort)) { 2250 sas_task_abort(task); 2251 return; 2252 } 2253 goto out; 2254 } 2255 2256 switch (task->task_proto) { 2257 case SAS_PROTOCOL_SSP: { 2258 struct ssp_response_iu *iu = 2259 hisi_sas_status_buf_addr_mem(slot) + 2260 sizeof(struct hisi_sas_err_record); 2261 2262 sas_ssp_task_response(dev, task, iu); 2263 break; 2264 } 2265 case SAS_PROTOCOL_SMP: { 2266 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 2267 void *to = page_address(sg_page(sg_resp)); 2268 2269 ts->stat = SAM_STAT_GOOD; 2270 2271 dma_unmap_sg(dev, &task->smp_task.smp_req, 1, 2272 DMA_TO_DEVICE); 2273 memcpy(to + sg_resp->offset, 2274 hisi_sas_status_buf_addr_mem(slot) + 2275 sizeof(struct hisi_sas_err_record), 2276 sg_resp->length); 2277 break; 2278 } 2279 case SAS_PROTOCOL_SATA: 2280 case SAS_PROTOCOL_STP: 2281 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2282 ts->stat = SAM_STAT_GOOD; 2283 hisi_sas_sata_done(task, slot); 2284 break; 2285 default: 2286 ts->stat = SAM_STAT_CHECK_CONDITION; 2287 break; 2288 } 2289 2290 if (!slot->port->port_attached) { 2291 dev_warn(dev, "slot complete: port %d has removed\n", 2292 slot->port->sas_port.id); 2293 ts->stat = SAS_PHY_DOWN; 2294 } 2295 2296 out: 2297 spin_lock_irqsave(&task->task_state_lock, flags); 2298 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 2299 spin_unlock_irqrestore(&task->task_state_lock, flags); 2300 dev_info(dev, "slot complete: task(%pK) aborted\n", task); 2301 return; 2302 } 2303 task->task_state_flags |= SAS_TASK_STATE_DONE; 2304 spin_unlock_irqrestore(&task->task_state_lock, flags); 2305 hisi_sas_slot_task_free(hisi_hba, task, slot); 2306 2307 if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { 2308 spin_lock_irqsave(&device->done_lock, flags); 2309 if (test_bit(SAS_HA_FROZEN, &ha->state)) { 2310 spin_unlock_irqrestore(&device->done_lock, flags); 2311 dev_info(dev, "slot complete: task(%pK) ignored\n ", 2312 task); 2313 return; 2314 } 2315 spin_unlock_irqrestore(&device->done_lock, flags); 2316 } 2317 2318 if (task->task_done) 2319 task->task_done(task); 2320 } 2321 2322 static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) 2323 { 2324 struct hisi_sas_cq *cq = p; 2325 struct hisi_hba *hisi_hba = cq->hisi_hba; 2326 struct hisi_sas_slot *slot; 2327 struct hisi_sas_complete_v3_hdr *complete_queue; 2328 u32 rd_point = cq->rd_point, wr_point; 2329 int queue = cq->id; 2330 2331 complete_queue = hisi_hba->complete_hdr[queue]; 2332 2333 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + 2334 (0x14 * queue)); 2335 2336 while (rd_point != wr_point) { 2337 struct hisi_sas_complete_v3_hdr *complete_hdr; 2338 struct device *dev = hisi_hba->dev; 2339 u32 dw1; 2340 int iptt; 2341 2342 complete_hdr = &complete_queue[rd_point]; 2343 dw1 = le32_to_cpu(complete_hdr->dw1); 2344 2345 iptt = dw1 & CMPLT_HDR_IPTT_MSK; 2346 if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { 2347 slot = &hisi_hba->slot_info[iptt]; 2348 slot->cmplt_queue_slot = rd_point; 2349 slot->cmplt_queue = queue; 2350 slot_complete_v3_hw(hisi_hba, slot); 2351 } else 2352 dev_err(dev, "IPTT %d is invalid, discard it.\n", iptt); 2353 2354 if (++rd_point >= HISI_SAS_QUEUE_SLOTS) 2355 rd_point = 0; 2356 } 2357 2358 /* update rd_point */ 2359 cq->rd_point = rd_point; 2360 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 2361 2362 return IRQ_HANDLED; 2363 } 2364 2365 static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) 2366 { 2367 struct hisi_sas_cq *cq = p; 2368 struct hisi_hba *hisi_hba = cq->hisi_hba; 2369 int queue = cq->id; 2370 2371 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); 2372 2373 return IRQ_WAKE_THREAD; 2374 } 2375 2376 static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) 2377 { 2378 int vectors; 2379 int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi; 2380 struct Scsi_Host *shost = hisi_hba->shost; 2381 struct irq_affinity desc = { 2382 .pre_vectors = BASE_VECTORS_V3_HW, 2383 }; 2384 2385 min_msi = MIN_AFFINE_VECTORS_V3_HW; 2386 vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev, 2387 min_msi, max_msi, 2388 PCI_IRQ_MSI | 2389 PCI_IRQ_AFFINITY, 2390 &desc); 2391 if (vectors < 0) 2392 return -ENOENT; 2393 2394 2395 hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW; 2396 shost->nr_hw_queues = hisi_hba->cq_nvecs; 2397 2398 return 0; 2399 } 2400 2401 static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) 2402 { 2403 struct device *dev = hisi_hba->dev; 2404 struct pci_dev *pdev = hisi_hba->pci_dev; 2405 int rc, i; 2406 2407 rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), 2408 int_phy_up_down_bcast_v3_hw, 0, 2409 DRV_NAME " phy", hisi_hba); 2410 if (rc) { 2411 dev_err(dev, "could not request phy interrupt, rc=%d\n", rc); 2412 rc = -ENOENT; 2413 goto free_irq_vectors; 2414 } 2415 2416 rc = devm_request_irq(dev, pci_irq_vector(pdev, 2), 2417 int_chnl_int_v3_hw, 0, 2418 DRV_NAME " channel", hisi_hba); 2419 if (rc) { 2420 dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc); 2421 rc = -ENOENT; 2422 goto free_irq_vectors; 2423 } 2424 2425 rc = devm_request_irq(dev, pci_irq_vector(pdev, 11), 2426 fatal_axi_int_v3_hw, 0, 2427 DRV_NAME " fatal", hisi_hba); 2428 if (rc) { 2429 dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc); 2430 rc = -ENOENT; 2431 goto free_irq_vectors; 2432 } 2433 2434 if (hisi_sas_intr_conv) 2435 dev_info(dev, "Enable interrupt converge\n"); 2436 2437 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2438 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2439 int nr = hisi_sas_intr_conv ? 16 : 16 + i; 2440 unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 2441 IRQF_ONESHOT; 2442 2443 cq->irq_no = pci_irq_vector(pdev, nr); 2444 rc = devm_request_threaded_irq(dev, cq->irq_no, 2445 cq_interrupt_v3_hw, 2446 cq_thread_v3_hw, 2447 irqflags, 2448 DRV_NAME " cq", cq); 2449 if (rc) { 2450 dev_err(dev, "could not request cq%d interrupt, rc=%d\n", 2451 i, rc); 2452 rc = -ENOENT; 2453 goto free_irq_vectors; 2454 } 2455 cq->irq_mask = pci_irq_get_affinity(pdev, i + BASE_VECTORS_V3_HW); 2456 if (!cq->irq_mask) { 2457 dev_err(dev, "could not get cq%d irq affinity!\n", i); 2458 return -ENOENT; 2459 } 2460 } 2461 2462 return 0; 2463 2464 free_irq_vectors: 2465 pci_free_irq_vectors(pdev); 2466 return rc; 2467 } 2468 2469 static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) 2470 { 2471 int rc; 2472 2473 rc = hw_init_v3_hw(hisi_hba); 2474 if (rc) 2475 return rc; 2476 2477 rc = interrupt_init_v3_hw(hisi_hba); 2478 if (rc) 2479 return rc; 2480 2481 return 0; 2482 } 2483 2484 static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no, 2485 struct sas_phy_linkrates *r) 2486 { 2487 enum sas_linkrate max = r->maximum_linkrate; 2488 u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, phy_no, 2489 PROG_PHY_LINK_RATE); 2490 2491 prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK; 2492 prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); 2493 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 2494 prog_phy_link_rate); 2495 } 2496 2497 static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) 2498 { 2499 struct pci_dev *pdev = hisi_hba->pci_dev; 2500 int i; 2501 2502 synchronize_irq(pci_irq_vector(pdev, 1)); 2503 synchronize_irq(pci_irq_vector(pdev, 2)); 2504 synchronize_irq(pci_irq_vector(pdev, 11)); 2505 for (i = 0; i < hisi_hba->queue_count; i++) 2506 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); 2507 2508 for (i = 0; i < hisi_hba->cq_nvecs; i++) 2509 synchronize_irq(pci_irq_vector(pdev, i + 16)); 2510 2511 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); 2512 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); 2513 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); 2514 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); 2515 2516 for (i = 0; i < hisi_hba->n_phy; i++) { 2517 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 2518 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); 2519 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1); 2520 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1); 2521 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1); 2522 } 2523 } 2524 2525 static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba) 2526 { 2527 return hisi_sas_read32(hisi_hba, PHY_STATE); 2528 } 2529 2530 static int disable_host_v3_hw(struct hisi_hba *hisi_hba) 2531 { 2532 struct device *dev = hisi_hba->dev; 2533 u32 status, reg_val; 2534 int rc; 2535 2536 interrupt_disable_v3_hw(hisi_hba); 2537 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); 2538 2539 hisi_sas_stop_phys(hisi_hba); 2540 2541 mdelay(10); 2542 2543 reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + 2544 AM_CTRL_GLOBAL); 2545 reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; 2546 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 2547 AM_CTRL_GLOBAL, reg_val); 2548 2549 /* wait until bus idle */ 2550 rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE + 2551 AM_CURR_TRANS_RETURN, status, 2552 status == 0x3, 10, 100); 2553 if (rc) { 2554 dev_err(dev, "axi bus is not idle, rc=%d\n", rc); 2555 return rc; 2556 } 2557 2558 return 0; 2559 } 2560 2561 static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) 2562 { 2563 struct device *dev = hisi_hba->dev; 2564 int rc; 2565 2566 rc = disable_host_v3_hw(hisi_hba); 2567 if (rc) { 2568 dev_err(dev, "soft reset: disable host failed rc=%d\n", rc); 2569 return rc; 2570 } 2571 2572 hisi_sas_init_mem(hisi_hba); 2573 2574 return hw_init_v3_hw(hisi_hba); 2575 } 2576 2577 static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type, 2578 u8 reg_index, u8 reg_count, u8 *write_data) 2579 { 2580 struct device *dev = hisi_hba->dev; 2581 u32 *data = (u32 *)write_data; 2582 int i; 2583 2584 switch (reg_type) { 2585 case SAS_GPIO_REG_TX: 2586 if ((reg_index + reg_count) > ((hisi_hba->n_phy + 3) / 4)) { 2587 dev_err(dev, "write gpio: invalid reg range[%d, %d]\n", 2588 reg_index, reg_index + reg_count - 1); 2589 return -EINVAL; 2590 } 2591 2592 for (i = 0; i < reg_count; i++) 2593 hisi_sas_write32(hisi_hba, 2594 SAS_GPIO_TX_0_1 + (reg_index + i) * 4, 2595 data[i]); 2596 break; 2597 default: 2598 dev_err(dev, "write gpio: unsupported or bad reg type %d\n", 2599 reg_type); 2600 return -EINVAL; 2601 } 2602 2603 return 0; 2604 } 2605 2606 static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, 2607 int delay_ms, int timeout_ms) 2608 { 2609 struct device *dev = hisi_hba->dev; 2610 int entries, entries_old = 0, time; 2611 2612 for (time = 0; time < timeout_ms; time += delay_ms) { 2613 entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT); 2614 if (entries == entries_old) 2615 break; 2616 2617 entries_old = entries; 2618 msleep(delay_ms); 2619 } 2620 2621 if (time >= timeout_ms) { 2622 dev_dbg(dev, "Wait commands complete timeout!\n"); 2623 return; 2624 } 2625 2626 dev_dbg(dev, "wait commands complete %dms\n", time); 2627 } 2628 2629 static ssize_t intr_conv_v3_hw_show(struct device *dev, 2630 struct device_attribute *attr, char *buf) 2631 { 2632 return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv); 2633 } 2634 static DEVICE_ATTR_RO(intr_conv_v3_hw); 2635 2636 static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba) 2637 { 2638 /* config those registers between enable and disable PHYs */ 2639 hisi_sas_stop_phys(hisi_hba); 2640 2641 if (hisi_hba->intr_coal_ticks == 0 || 2642 hisi_hba->intr_coal_count == 0) { 2643 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); 2644 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); 2645 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); 2646 } else { 2647 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); 2648 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 2649 hisi_hba->intr_coal_ticks); 2650 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 2651 hisi_hba->intr_coal_count); 2652 } 2653 phys_init_v3_hw(hisi_hba); 2654 } 2655 2656 static ssize_t intr_coal_ticks_v3_hw_show(struct device *dev, 2657 struct device_attribute *attr, 2658 char *buf) 2659 { 2660 struct Scsi_Host *shost = class_to_shost(dev); 2661 struct hisi_hba *hisi_hba = shost_priv(shost); 2662 2663 return scnprintf(buf, PAGE_SIZE, "%u\n", 2664 hisi_hba->intr_coal_ticks); 2665 } 2666 2667 static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev, 2668 struct device_attribute *attr, 2669 const char *buf, size_t count) 2670 { 2671 struct Scsi_Host *shost = class_to_shost(dev); 2672 struct hisi_hba *hisi_hba = shost_priv(shost); 2673 u32 intr_coal_ticks; 2674 int ret; 2675 2676 ret = kstrtou32(buf, 10, &intr_coal_ticks); 2677 if (ret) { 2678 dev_err(dev, "Input data of interrupt coalesce unmatch\n"); 2679 return -EINVAL; 2680 } 2681 2682 if (intr_coal_ticks >= BIT(24)) { 2683 dev_err(dev, "intr_coal_ticks must be less than 2^24!\n"); 2684 return -EINVAL; 2685 } 2686 2687 hisi_hba->intr_coal_ticks = intr_coal_ticks; 2688 2689 config_intr_coal_v3_hw(hisi_hba); 2690 2691 return count; 2692 } 2693 static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw); 2694 2695 static ssize_t intr_coal_count_v3_hw_show(struct device *dev, 2696 struct device_attribute 2697 *attr, char *buf) 2698 { 2699 struct Scsi_Host *shost = class_to_shost(dev); 2700 struct hisi_hba *hisi_hba = shost_priv(shost); 2701 2702 return scnprintf(buf, PAGE_SIZE, "%u\n", 2703 hisi_hba->intr_coal_count); 2704 } 2705 2706 static ssize_t intr_coal_count_v3_hw_store(struct device *dev, 2707 struct device_attribute 2708 *attr, const char *buf, size_t count) 2709 { 2710 struct Scsi_Host *shost = class_to_shost(dev); 2711 struct hisi_hba *hisi_hba = shost_priv(shost); 2712 u32 intr_coal_count; 2713 int ret; 2714 2715 ret = kstrtou32(buf, 10, &intr_coal_count); 2716 if (ret) { 2717 dev_err(dev, "Input data of interrupt coalesce unmatch\n"); 2718 return -EINVAL; 2719 } 2720 2721 if (intr_coal_count >= BIT(8)) { 2722 dev_err(dev, "intr_coal_count must be less than 2^8!\n"); 2723 return -EINVAL; 2724 } 2725 2726 hisi_hba->intr_coal_count = intr_coal_count; 2727 2728 config_intr_coal_v3_hw(hisi_hba); 2729 2730 return count; 2731 } 2732 static DEVICE_ATTR_RW(intr_coal_count_v3_hw); 2733 2734 static int slave_configure_v3_hw(struct scsi_device *sdev) 2735 { 2736 struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev); 2737 struct domain_device *ddev = sdev_to_domain_dev(sdev); 2738 struct hisi_hba *hisi_hba = shost_priv(shost); 2739 struct device *dev = hisi_hba->dev; 2740 int ret = sas_slave_configure(sdev); 2741 2742 if (ret) 2743 return ret; 2744 if (!dev_is_sata(ddev)) 2745 sas_change_queue_depth(sdev, 64); 2746 2747 if (sdev->type == TYPE_ENCLOSURE) 2748 return 0; 2749 2750 if (!device_link_add(&sdev->sdev_gendev, dev, 2751 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)) { 2752 if (pm_runtime_enabled(dev)) { 2753 dev_info(dev, "add device link failed, disable runtime PM for the host\n"); 2754 pm_runtime_disable(dev); 2755 } 2756 } 2757 2758 return 0; 2759 } 2760 2761 static struct device_attribute *host_attrs_v3_hw[] = { 2762 &dev_attr_phy_event_threshold, 2763 &dev_attr_intr_conv_v3_hw, 2764 &dev_attr_intr_coal_ticks_v3_hw, 2765 &dev_attr_intr_coal_count_v3_hw, 2766 NULL 2767 }; 2768 2769 static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = { 2770 HISI_SAS_DEBUGFS_REG(PHY_CFG), 2771 HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE), 2772 HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE), 2773 HISI_SAS_DEBUGFS_REG(PHY_CTRL), 2774 HISI_SAS_DEBUGFS_REG(SL_CFG), 2775 HISI_SAS_DEBUGFS_REG(AIP_LIMIT), 2776 HISI_SAS_DEBUGFS_REG(SL_CONTROL), 2777 HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS), 2778 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0), 2779 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1), 2780 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2), 2781 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3), 2782 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4), 2783 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5), 2784 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6), 2785 HISI_SAS_DEBUGFS_REG(TXID_AUTO), 2786 HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0), 2787 HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H), 2788 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER), 2789 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE), 2790 HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER), 2791 HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG), 2792 HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG), 2793 HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG), 2794 HISI_SAS_DEBUGFS_REG(CHL_INT0), 2795 HISI_SAS_DEBUGFS_REG(CHL_INT1), 2796 HISI_SAS_DEBUGFS_REG(CHL_INT2), 2797 HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK), 2798 HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK), 2799 HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK), 2800 HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME), 2801 HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN), 2802 HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER), 2803 HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK), 2804 HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK), 2805 HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK), 2806 HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK), 2807 HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK), 2808 HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK), 2809 HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS), 2810 HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS), 2811 HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME), 2812 HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST), 2813 HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB), 2814 HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW), 2815 HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR), 2816 HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR), 2817 {} 2818 }; 2819 2820 static const struct hisi_sas_debugfs_reg debugfs_port_reg = { 2821 .lu = debugfs_port_reg_lu, 2822 .count = 0x100, 2823 .base_off = PORT_BASE, 2824 .read_port_reg = hisi_sas_phy_read32, 2825 }; 2826 2827 static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = { 2828 HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE), 2829 HISI_SAS_DEBUGFS_REG(PHY_CONTEXT), 2830 HISI_SAS_DEBUGFS_REG(PHY_STATE), 2831 HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA), 2832 HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE), 2833 HISI_SAS_DEBUGFS_REG(ITCT_CLR), 2834 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO), 2835 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI), 2836 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO), 2837 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI), 2838 HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG), 2839 HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL), 2840 HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL), 2841 HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME), 2842 HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE), 2843 HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME), 2844 HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME), 2845 HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME), 2846 HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME), 2847 HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME), 2848 HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN), 2849 HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME), 2850 HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2), 2851 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT), 2852 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE), 2853 HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS), 2854 HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS), 2855 HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO), 2856 HISI_SAS_DEBUGFS_REG(INT_COAL_EN), 2857 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME), 2858 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT), 2859 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME), 2860 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT), 2861 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC), 2862 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK), 2863 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1), 2864 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2), 2865 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3), 2866 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1), 2867 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2), 2868 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3), 2869 HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK), 2870 HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK), 2871 HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK), 2872 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR), 2873 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK), 2874 HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN), 2875 HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT), 2876 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH), 2877 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR), 2878 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR), 2879 HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG), 2880 HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK), 2881 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH), 2882 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR), 2883 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR), 2884 HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG), 2885 HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG), 2886 HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX), 2887 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0), 2888 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1), 2889 HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1), 2890 HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD), 2891 {} 2892 }; 2893 2894 static const struct hisi_sas_debugfs_reg debugfs_global_reg = { 2895 .lu = debugfs_global_reg_lu, 2896 .count = 0x800, 2897 .read_global_reg = hisi_sas_read32, 2898 }; 2899 2900 static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = { 2901 HISI_SAS_DEBUGFS_REG(AM_CFG_MAX_TRANS), 2902 HISI_SAS_DEBUGFS_REG(AM_CFG_SINGLE_PORT_MAX_TRANS), 2903 HISI_SAS_DEBUGFS_REG(AXI_CFG), 2904 HISI_SAS_DEBUGFS_REG(AM_ROB_ECC_ERR_ADDR), 2905 {} 2906 }; 2907 2908 static const struct hisi_sas_debugfs_reg debugfs_axi_reg = { 2909 .lu = debugfs_axi_reg_lu, 2910 .count = 0x61, 2911 .base_off = AXI_MASTER_CFG_BASE, 2912 .read_global_reg = hisi_sas_read32, 2913 }; 2914 2915 static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = { 2916 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0), 2917 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1), 2918 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK), 2919 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK), 2920 HISI_SAS_DEBUGFS_REG(CFG_SAS_RAS_INTR_MASK), 2921 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2), 2922 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2_MASK), 2923 {} 2924 }; 2925 2926 static const struct hisi_sas_debugfs_reg debugfs_ras_reg = { 2927 .lu = debugfs_ras_reg_lu, 2928 .count = 0x10, 2929 .base_off = RAS_BASE, 2930 .read_global_reg = hisi_sas_read32, 2931 }; 2932 2933 static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) 2934 { 2935 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 2936 2937 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 2938 2939 wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000); 2940 2941 hisi_sas_sync_irqs(hisi_hba); 2942 } 2943 2944 static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) 2945 { 2946 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 2947 (u32)((1ULL << hisi_hba->queue_count) - 1)); 2948 2949 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 2950 } 2951 2952 static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba, 2953 enum hisi_sas_debugfs_cache_type type, 2954 u32 *cache) 2955 { 2956 u32 cache_dw_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 2957 HISI_SAS_IOST_ITCT_CACHE_NUM; 2958 struct device *dev = hisi_hba->dev; 2959 u32 *buf = cache; 2960 u32 i, val; 2961 2962 hisi_sas_write32(hisi_hba, TAB_RD_TYPE, type); 2963 2964 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_DW_SZ; i++) { 2965 val = hisi_sas_read32(hisi_hba, TAB_DFX); 2966 if (val == 0xffffffff) 2967 break; 2968 } 2969 2970 if (val != 0xffffffff) { 2971 dev_err(dev, "Issue occurred in reading IOST/ITCT cache!\n"); 2972 return; 2973 } 2974 2975 memset(buf, 0, cache_dw_size * 4); 2976 buf[0] = val; 2977 2978 for (i = 1; i < cache_dw_size; i++) 2979 buf[i] = hisi_sas_read32(hisi_hba, TAB_DFX); 2980 } 2981 2982 static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba) 2983 { 2984 u32 reg_val; 2985 int phy_no = hisi_hba->debugfs_bist_phy_no; 2986 int i; 2987 2988 /* disable PHY */ 2989 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 2990 2991 /* update FFE */ 2992 for (i = 0; i < FFE_CFG_MAX; i++) 2993 hisi_sas_phy_write32(hisi_hba, phy_no, TXDEEMPH_G1 + (i * 0x4), 2994 hisi_hba->debugfs_bist_ffe[phy_no][i]); 2995 2996 /* disable ALOS */ 2997 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG); 2998 reg_val |= CFG_ALOS_CHK_DISABLE_MSK; 2999 hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val); 3000 } 3001 3002 static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba) 3003 { 3004 u32 reg_val; 3005 int phy_no = hisi_hba->debugfs_bist_phy_no; 3006 3007 /* disable loopback */ 3008 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL); 3009 reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK | 3010 CFG_BIST_TEST_MSK); 3011 hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, reg_val); 3012 3013 /* enable ALOS */ 3014 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG); 3015 reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK; 3016 hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val); 3017 3018 /* restore the linkrate */ 3019 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); 3020 /* init OOB link rate as 1.5 Gbits */ 3021 reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; 3022 reg_val |= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF); 3023 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val); 3024 3025 /* enable PHY */ 3026 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 3027 } 3028 3029 #define SAS_PHY_BIST_CODE_INIT 0x1 3030 #define SAS_PHY_BIST_CODE1_INIT 0X80 3031 static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) 3032 { 3033 u32 reg_val, mode_tmp; 3034 u32 linkrate = hisi_hba->debugfs_bist_linkrate; 3035 u32 phy_no = hisi_hba->debugfs_bist_phy_no; 3036 u32 *ffe = hisi_hba->debugfs_bist_ffe[phy_no]; 3037 u32 code_mode = hisi_hba->debugfs_bist_code_mode; 3038 u32 path_mode = hisi_hba->debugfs_bist_mode; 3039 u32 *fix_code = &hisi_hba->debugfs_bist_fixed_code[0]; 3040 struct device *dev = hisi_hba->dev; 3041 3042 dev_info(dev, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n", 3043 phy_no, linkrate, code_mode, path_mode, 3044 ffe[FFE_SAS_1_5_GBPS], ffe[FFE_SAS_3_0_GBPS], 3045 ffe[FFE_SAS_6_0_GBPS], ffe[FFE_SAS_12_0_GBPS], 3046 ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS], 3047 ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE], 3048 fix_code[FIXED_CODE_1]); 3049 mode_tmp = path_mode ? 2 : 1; 3050 if (enable) { 3051 /* some preparations before bist test */ 3052 hisi_sas_bist_test_prep_v3_hw(hisi_hba); 3053 3054 /* set linkrate of bit test*/ 3055 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, 3056 PROG_PHY_LINK_RATE); 3057 reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; 3058 reg_val |= (linkrate << CFG_PROG_OOB_PHY_LINK_RATE_OFF); 3059 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 3060 reg_val); 3061 3062 /* set code mode of bit test */ 3063 reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, 3064 SAS_PHY_BIST_CTRL); 3065 reg_val &= ~(CFG_BIST_MODE_SEL_MSK | CFG_LOOP_TEST_MODE_MSK | 3066 CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK | 3067 CFG_BIST_TEST_MSK); 3068 reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) | 3069 (mode_tmp << CFG_LOOP_TEST_MODE_OFF) | 3070 CFG_BIST_TEST_MSK); 3071 hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, 3072 reg_val); 3073 3074 /* set the bist init value */ 3075 if (code_mode == HISI_SAS_BIST_CODE_MODE_FIXED_DATA) { 3076 reg_val = hisi_hba->debugfs_bist_fixed_code[0]; 3077 hisi_sas_phy_write32(hisi_hba, phy_no, 3078 SAS_PHY_BIST_CODE, reg_val); 3079 3080 reg_val = hisi_hba->debugfs_bist_fixed_code[1]; 3081 hisi_sas_phy_write32(hisi_hba, phy_no, 3082 SAS_PHY_BIST_CODE1, reg_val); 3083 } else { 3084 hisi_sas_phy_write32(hisi_hba, phy_no, 3085 SAS_PHY_BIST_CODE, 3086 SAS_PHY_BIST_CODE_INIT); 3087 hisi_sas_phy_write32(hisi_hba, phy_no, 3088 SAS_PHY_BIST_CODE1, 3089 SAS_PHY_BIST_CODE1_INIT); 3090 } 3091 3092 mdelay(100); 3093 reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK); 3094 hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, 3095 reg_val); 3096 3097 /* clear error bit */ 3098 mdelay(100); 3099 hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT); 3100 } else { 3101 /* disable bist test and recover it */ 3102 hisi_hba->debugfs_bist_cnt += hisi_sas_phy_read32(hisi_hba, 3103 phy_no, SAS_BIST_ERR_CNT); 3104 hisi_sas_bist_test_restore_v3_hw(hisi_hba); 3105 } 3106 3107 return 0; 3108 } 3109 3110 static int hisi_sas_map_queues(struct Scsi_Host *shost) 3111 { 3112 struct hisi_hba *hisi_hba = shost_priv(shost); 3113 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 3114 3115 return blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, 3116 BASE_VECTORS_V3_HW); 3117 } 3118 3119 static struct scsi_host_template sht_v3_hw = { 3120 .name = DRV_NAME, 3121 .proc_name = DRV_NAME, 3122 .module = THIS_MODULE, 3123 .queuecommand = sas_queuecommand, 3124 .dma_need_drain = ata_scsi_dma_need_drain, 3125 .target_alloc = sas_target_alloc, 3126 .slave_configure = slave_configure_v3_hw, 3127 .scan_finished = hisi_sas_scan_finished, 3128 .scan_start = hisi_sas_scan_start, 3129 .map_queues = hisi_sas_map_queues, 3130 .change_queue_depth = sas_change_queue_depth, 3131 .bios_param = sas_bios_param, 3132 .this_id = -1, 3133 .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, 3134 .sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT, 3135 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 3136 .eh_device_reset_handler = sas_eh_device_reset_handler, 3137 .eh_target_reset_handler = sas_eh_target_reset_handler, 3138 .target_destroy = sas_target_destroy, 3139 .ioctl = sas_ioctl, 3140 #ifdef CONFIG_COMPAT 3141 .compat_ioctl = sas_ioctl, 3142 #endif 3143 .shost_attrs = host_attrs_v3_hw, 3144 .tag_alloc_policy = BLK_TAG_ALLOC_RR, 3145 .host_reset = hisi_sas_host_reset, 3146 .host_tagset = 1, 3147 }; 3148 3149 static const struct hisi_sas_hw hisi_sas_v3_hw = { 3150 .hw_init = hisi_sas_v3_init, 3151 .setup_itct = setup_itct_v3_hw, 3152 .get_wideport_bitmap = get_wideport_bitmap_v3_hw, 3153 .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), 3154 .clear_itct = clear_itct_v3_hw, 3155 .sl_notify_ssp = sl_notify_ssp_v3_hw, 3156 .prep_ssp = prep_ssp_v3_hw, 3157 .prep_smp = prep_smp_v3_hw, 3158 .prep_stp = prep_ata_v3_hw, 3159 .prep_abort = prep_abort_v3_hw, 3160 .start_delivery = start_delivery_v3_hw, 3161 .phys_init = phys_init_v3_hw, 3162 .phy_start = start_phy_v3_hw, 3163 .phy_disable = disable_phy_v3_hw, 3164 .phy_hard_reset = phy_hard_reset_v3_hw, 3165 .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw, 3166 .phy_set_linkrate = phy_set_linkrate_v3_hw, 3167 .dereg_device = dereg_device_v3_hw, 3168 .soft_reset = soft_reset_v3_hw, 3169 .get_phys_state = get_phys_state_v3_hw, 3170 .get_events = phy_get_events_v3_hw, 3171 .write_gpio = write_gpio_v3_hw, 3172 .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw, 3173 .debugfs_reg_array[DEBUGFS_GLOBAL] = &debugfs_global_reg, 3174 .debugfs_reg_array[DEBUGFS_AXI] = &debugfs_axi_reg, 3175 .debugfs_reg_array[DEBUGFS_RAS] = &debugfs_ras_reg, 3176 .debugfs_reg_port = &debugfs_port_reg, 3177 .snapshot_prepare = debugfs_snapshot_prepare_v3_hw, 3178 .snapshot_restore = debugfs_snapshot_restore_v3_hw, 3179 .read_iost_itct_cache = read_iost_itct_cache_v3_hw, 3180 .set_bist = debugfs_set_bist_v3_hw, 3181 }; 3182 3183 static struct Scsi_Host * 3184 hisi_sas_shost_alloc_pci(struct pci_dev *pdev) 3185 { 3186 struct Scsi_Host *shost; 3187 struct hisi_hba *hisi_hba; 3188 struct device *dev = &pdev->dev; 3189 3190 shost = scsi_host_alloc(&sht_v3_hw, sizeof(*hisi_hba)); 3191 if (!shost) { 3192 dev_err(dev, "shost alloc failed\n"); 3193 return NULL; 3194 } 3195 hisi_hba = shost_priv(shost); 3196 3197 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 3198 INIT_WORK(&hisi_hba->debugfs_work, hisi_sas_debugfs_work_handler); 3199 hisi_hba->hw = &hisi_sas_v3_hw; 3200 hisi_hba->pci_dev = pdev; 3201 hisi_hba->dev = dev; 3202 hisi_hba->shost = shost; 3203 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 3204 3205 if (prot_mask & ~HISI_SAS_PROT_MASK) 3206 dev_err(dev, "unsupported protection mask 0x%x, using default (0x0)\n", 3207 prot_mask); 3208 else 3209 hisi_hba->prot_mask = prot_mask; 3210 3211 if (hisi_sas_get_fw_info(hisi_hba) < 0) 3212 goto err_out; 3213 3214 if (hisi_sas_alloc(hisi_hba)) { 3215 hisi_sas_free(hisi_hba); 3216 goto err_out; 3217 } 3218 3219 return shost; 3220 err_out: 3221 scsi_host_put(shost); 3222 dev_err(dev, "shost alloc failed\n"); 3223 return NULL; 3224 } 3225 3226 static int 3227 hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3228 { 3229 struct Scsi_Host *shost; 3230 struct hisi_hba *hisi_hba; 3231 struct device *dev = &pdev->dev; 3232 struct asd_sas_phy **arr_phy; 3233 struct asd_sas_port **arr_port; 3234 struct sas_ha_struct *sha; 3235 int rc, phy_nr, port_nr, i; 3236 3237 rc = pci_enable_device(pdev); 3238 if (rc) 3239 goto err_out; 3240 3241 pci_set_master(pdev); 3242 3243 rc = pci_request_regions(pdev, DRV_NAME); 3244 if (rc) 3245 goto err_out_disable_device; 3246 3247 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3248 if (rc) 3249 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3250 if (rc) { 3251 dev_err(dev, "No usable DMA addressing method\n"); 3252 rc = -ENODEV; 3253 goto err_out_regions; 3254 } 3255 3256 shost = hisi_sas_shost_alloc_pci(pdev); 3257 if (!shost) { 3258 rc = -ENOMEM; 3259 goto err_out_regions; 3260 } 3261 3262 sha = SHOST_TO_SAS_HA(shost); 3263 hisi_hba = shost_priv(shost); 3264 dev_set_drvdata(dev, sha); 3265 3266 hisi_hba->regs = pcim_iomap(pdev, 5, 0); 3267 if (!hisi_hba->regs) { 3268 dev_err(dev, "cannot map register\n"); 3269 rc = -ENOMEM; 3270 goto err_out_ha; 3271 } 3272 3273 phy_nr = port_nr = hisi_hba->n_phy; 3274 3275 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 3276 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 3277 if (!arr_phy || !arr_port) { 3278 rc = -ENOMEM; 3279 goto err_out_ha; 3280 } 3281 3282 sha->sas_phy = arr_phy; 3283 sha->sas_port = arr_port; 3284 sha->core.shost = shost; 3285 sha->lldd_ha = hisi_hba; 3286 3287 shost->transportt = hisi_sas_stt; 3288 shost->max_id = HISI_SAS_MAX_DEVICES; 3289 shost->max_lun = ~0; 3290 shost->max_channel = 1; 3291 shost->max_cmd_len = 16; 3292 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 3293 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 3294 3295 sha->sas_ha_name = DRV_NAME; 3296 sha->dev = dev; 3297 sha->lldd_module = THIS_MODULE; 3298 sha->sas_addr = &hisi_hba->sas_addr[0]; 3299 sha->num_phys = hisi_hba->n_phy; 3300 3301 for (i = 0; i < hisi_hba->n_phy; i++) { 3302 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 3303 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 3304 } 3305 3306 if (hisi_hba->prot_mask) { 3307 dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", 3308 prot_mask); 3309 scsi_host_set_prot(hisi_hba->shost, prot_mask); 3310 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 3311 scsi_host_set_guard(hisi_hba->shost, 3312 SHOST_DIX_GUARD_CRC); 3313 } 3314 3315 if (hisi_sas_debugfs_enable) 3316 hisi_sas_debugfs_init(hisi_hba); 3317 3318 rc = interrupt_preinit_v3_hw(hisi_hba); 3319 if (rc) 3320 goto err_out_ha; 3321 dev_err(dev, "%d hw queues\n", shost->nr_hw_queues); 3322 rc = scsi_add_host(shost, dev); 3323 if (rc) 3324 goto err_out_ha; 3325 3326 rc = sas_register_ha(sha); 3327 if (rc) 3328 goto err_out_register_ha; 3329 3330 rc = hisi_hba->hw->hw_init(hisi_hba); 3331 if (rc) 3332 goto err_out_register_ha; 3333 3334 scsi_scan_host(shost); 3335 3336 /* 3337 * For the situation that there are ATA disks connected with SAS 3338 * controller, it additionally creates ata_port which will affect the 3339 * child_count of hisi_hba->dev. Even if suspended all the disks, 3340 * ata_port is still and the child_count of hisi_hba->dev is not 0. 3341 * So use pm_suspend_ignore_children() to ignore the effect to 3342 * hisi_hba->dev. 3343 */ 3344 pm_suspend_ignore_children(dev, true); 3345 pm_runtime_put_noidle(&pdev->dev); 3346 3347 return 0; 3348 3349 err_out_register_ha: 3350 scsi_remove_host(shost); 3351 err_out_ha: 3352 hisi_sas_debugfs_exit(hisi_hba); 3353 scsi_host_put(shost); 3354 err_out_regions: 3355 pci_release_regions(pdev); 3356 err_out_disable_device: 3357 pci_disable_device(pdev); 3358 err_out: 3359 return rc; 3360 } 3361 3362 static void 3363 hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) 3364 { 3365 int i; 3366 3367 free_irq(pci_irq_vector(pdev, 1), hisi_hba); 3368 free_irq(pci_irq_vector(pdev, 2), hisi_hba); 3369 free_irq(pci_irq_vector(pdev, 11), hisi_hba); 3370 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 3371 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 3372 int nr = hisi_sas_intr_conv ? 16 : 16 + i; 3373 3374 free_irq(pci_irq_vector(pdev, nr), cq); 3375 } 3376 pci_free_irq_vectors(pdev); 3377 } 3378 3379 static void hisi_sas_v3_remove(struct pci_dev *pdev) 3380 { 3381 struct device *dev = &pdev->dev; 3382 struct sas_ha_struct *sha = dev_get_drvdata(dev); 3383 struct hisi_hba *hisi_hba = sha->lldd_ha; 3384 struct Scsi_Host *shost = sha->core.shost; 3385 3386 pm_runtime_get_noresume(dev); 3387 if (timer_pending(&hisi_hba->timer)) 3388 del_timer(&hisi_hba->timer); 3389 3390 sas_unregister_ha(sha); 3391 sas_remove_host(sha->core.shost); 3392 3393 hisi_sas_v3_destroy_irqs(pdev, hisi_hba); 3394 pci_release_regions(pdev); 3395 pci_disable_device(pdev); 3396 hisi_sas_free(hisi_hba); 3397 hisi_sas_debugfs_exit(hisi_hba); 3398 scsi_host_put(shost); 3399 } 3400 3401 static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev) 3402 { 3403 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 3404 struct hisi_hba *hisi_hba = sha->lldd_ha; 3405 struct device *dev = hisi_hba->dev; 3406 int rc; 3407 3408 dev_info(dev, "FLR prepare\n"); 3409 set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 3410 hisi_sas_controller_reset_prepare(hisi_hba); 3411 3412 rc = disable_host_v3_hw(hisi_hba); 3413 if (rc) 3414 dev_err(dev, "FLR: disable host failed rc=%d\n", rc); 3415 } 3416 3417 static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev) 3418 { 3419 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 3420 struct hisi_hba *hisi_hba = sha->lldd_ha; 3421 struct device *dev = hisi_hba->dev; 3422 int rc; 3423 3424 hisi_sas_init_mem(hisi_hba); 3425 3426 rc = hw_init_v3_hw(hisi_hba); 3427 if (rc) { 3428 dev_err(dev, "FLR: hw init failed rc=%d\n", rc); 3429 return; 3430 } 3431 3432 hisi_sas_controller_reset_done(hisi_hba); 3433 dev_info(dev, "FLR done\n"); 3434 } 3435 3436 enum { 3437 /* instances of the controller */ 3438 hip08, 3439 }; 3440 3441 static int _suspend_v3_hw(struct device *device) 3442 { 3443 struct pci_dev *pdev = to_pci_dev(device); 3444 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 3445 struct hisi_hba *hisi_hba = sha->lldd_ha; 3446 struct device *dev = hisi_hba->dev; 3447 struct Scsi_Host *shost = hisi_hba->shost; 3448 pci_power_t device_state; 3449 int rc; 3450 3451 if (!pdev->pm_cap) { 3452 dev_err(dev, "PCI PM not supported\n"); 3453 return -ENODEV; 3454 } 3455 3456 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 3457 return -1; 3458 3459 scsi_block_requests(shost); 3460 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 3461 flush_workqueue(hisi_hba->wq); 3462 3463 rc = disable_host_v3_hw(hisi_hba); 3464 if (rc) { 3465 dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc); 3466 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 3467 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 3468 scsi_unblock_requests(shost); 3469 return rc; 3470 } 3471 3472 hisi_sas_init_mem(hisi_hba); 3473 3474 device_state = pci_choose_state(pdev, PMSG_SUSPEND); 3475 dev_warn(dev, "entering operating state [D%d]\n", 3476 device_state); 3477 pci_save_state(pdev); 3478 pci_disable_device(pdev); 3479 pci_set_power_state(pdev, device_state); 3480 3481 hisi_sas_release_tasks(hisi_hba); 3482 3483 sas_suspend_ha(sha); 3484 return 0; 3485 } 3486 3487 static int _resume_v3_hw(struct device *device) 3488 { 3489 struct pci_dev *pdev = to_pci_dev(device); 3490 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 3491 struct hisi_hba *hisi_hba = sha->lldd_ha; 3492 struct Scsi_Host *shost = hisi_hba->shost; 3493 struct device *dev = hisi_hba->dev; 3494 unsigned int rc; 3495 pci_power_t device_state = pdev->current_state; 3496 3497 dev_warn(dev, "resuming from operating state [D%d]\n", 3498 device_state); 3499 pci_set_power_state(pdev, PCI_D0); 3500 pci_enable_wake(pdev, PCI_D0, 0); 3501 pci_restore_state(pdev); 3502 rc = pci_enable_device(pdev); 3503 if (rc) { 3504 dev_err(dev, "enable device failed during resume (%d)\n", rc); 3505 return rc; 3506 } 3507 3508 pci_set_master(pdev); 3509 scsi_unblock_requests(shost); 3510 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 3511 3512 sas_prep_resume_ha(sha); 3513 rc = hw_init_v3_hw(hisi_hba); 3514 if (rc) { 3515 scsi_remove_host(shost); 3516 pci_disable_device(pdev); 3517 return rc; 3518 } 3519 hisi_hba->hw->phys_init(hisi_hba); 3520 sas_resume_ha(sha); 3521 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 3522 3523 return 0; 3524 } 3525 3526 static int suspend_v3_hw(struct device *device) 3527 { 3528 struct pci_dev *pdev = to_pci_dev(device); 3529 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 3530 struct hisi_hba *hisi_hba = sha->lldd_ha; 3531 int rc; 3532 3533 set_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); 3534 3535 rc = _suspend_v3_hw(device); 3536 if (rc) 3537 clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); 3538 3539 return rc; 3540 } 3541 3542 static int resume_v3_hw(struct device *device) 3543 { 3544 struct pci_dev *pdev = to_pci_dev(device); 3545 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 3546 struct hisi_hba *hisi_hba = sha->lldd_ha; 3547 int rc = _resume_v3_hw(device); 3548 3549 clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags); 3550 3551 return rc; 3552 } 3553 3554 static const struct pci_device_id sas_v3_pci_table[] = { 3555 { PCI_VDEVICE(HUAWEI, 0xa230), hip08 }, 3556 {} 3557 }; 3558 MODULE_DEVICE_TABLE(pci, sas_v3_pci_table); 3559 3560 static const struct pci_error_handlers hisi_sas_err_handler = { 3561 .reset_prepare = hisi_sas_reset_prepare_v3_hw, 3562 .reset_done = hisi_sas_reset_done_v3_hw, 3563 }; 3564 3565 static int runtime_suspend_v3_hw(struct device *dev) 3566 { 3567 return suspend_v3_hw(dev); 3568 } 3569 3570 static int runtime_resume_v3_hw(struct device *dev) 3571 { 3572 return resume_v3_hw(dev); 3573 } 3574 3575 static const struct dev_pm_ops hisi_sas_v3_pm_ops = { 3576 SET_SYSTEM_SLEEP_PM_OPS(suspend_v3_hw, resume_v3_hw) 3577 SET_RUNTIME_PM_OPS(runtime_suspend_v3_hw, 3578 runtime_resume_v3_hw, NULL) 3579 }; 3580 3581 static struct pci_driver sas_v3_pci_driver = { 3582 .name = DRV_NAME, 3583 .id_table = sas_v3_pci_table, 3584 .probe = hisi_sas_v3_probe, 3585 .remove = hisi_sas_v3_remove, 3586 .err_handler = &hisi_sas_err_handler, 3587 .driver.pm = &hisi_sas_v3_pm_ops, 3588 }; 3589 3590 module_pci_driver(sas_v3_pci_driver); 3591 module_param_named(intr_conv, hisi_sas_intr_conv, bool, 0444); 3592 3593 MODULE_LICENSE("GPL"); 3594 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 3595 MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device"); 3596 MODULE_ALIAS("pci:" DRV_NAME); 3597