1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2017 Hisilicon Limited. 4 */ 5 6 #include "hisi_sas.h" 7 #define DRV_NAME "hisi_sas_v3_hw" 8 9 /* global registers need init */ 10 #define DLVRY_QUEUE_ENABLE 0x0 11 #define IOST_BASE_ADDR_LO 0x8 12 #define IOST_BASE_ADDR_HI 0xc 13 #define ITCT_BASE_ADDR_LO 0x10 14 #define ITCT_BASE_ADDR_HI 0x14 15 #define IO_BROKEN_MSG_ADDR_LO 0x18 16 #define IO_BROKEN_MSG_ADDR_HI 0x1c 17 #define PHY_CONTEXT 0x20 18 #define PHY_STATE 0x24 19 #define PHY_PORT_NUM_MA 0x28 20 #define PHY_CONN_RATE 0x30 21 #define ITCT_CLR 0x44 22 #define ITCT_CLR_EN_OFF 16 23 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) 24 #define ITCT_DEV_OFF 0 25 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) 26 #define SAS_AXI_USER3 0x50 27 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 28 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c 29 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 30 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 31 #define CFG_MAX_TAG 0x68 32 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 33 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 34 #define HGC_GET_ITV_TIME 0x90 35 #define DEVICE_MSG_WORK_MODE 0x94 36 #define OPENA_WT_CONTI_TIME 0x9c 37 #define I_T_NEXUS_LOSS_TIME 0xa0 38 #define MAX_CON_TIME_LIMIT_TIME 0xa4 39 #define BUS_INACTIVE_LIMIT_TIME 0xa8 40 #define REJECT_TO_OPEN_LIMIT_TIME 0xac 41 #define CQ_INT_CONVERGE_EN 0xb0 42 #define CFG_AGING_TIME 0xbc 43 #define HGC_DFX_CFG2 0xc0 44 #define CFG_ABT_SET_QUERY_IPTT 0xd4 45 #define CFG_SET_ABORTED_IPTT_OFF 0 46 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) 47 #define CFG_SET_ABORTED_EN_OFF 12 48 #define CFG_ABT_SET_IPTT_DONE 0xd8 49 #define CFG_ABT_SET_IPTT_DONE_OFF 0 50 #define HGC_IOMB_PROC1_STATUS 0x104 51 #define HGC_LM_DFX_STATUS2 0x128 52 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0 53 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \ 54 HGC_LM_DFX_STATUS2_IOSTLIST_OFF) 55 #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12 56 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \ 57 HGC_LM_DFX_STATUS2_ITCTLIST_OFF) 58 #define HGC_CQE_ECC_ADDR 0x13c 59 #define HGC_CQE_ECC_1B_ADDR_OFF 0 60 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF) 61 #define HGC_CQE_ECC_MB_ADDR_OFF 8 62 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF) 63 #define HGC_IOST_ECC_ADDR 0x140 64 #define HGC_IOST_ECC_1B_ADDR_OFF 0 65 #define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF) 66 #define HGC_IOST_ECC_MB_ADDR_OFF 16 67 #define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF) 68 #define HGC_DQE_ECC_ADDR 0x144 69 #define HGC_DQE_ECC_1B_ADDR_OFF 0 70 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF) 71 #define HGC_DQE_ECC_MB_ADDR_OFF 16 72 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) 73 #define CHNL_INT_STATUS 0x148 74 #define HGC_ITCT_ECC_ADDR 0x150 75 #define HGC_ITCT_ECC_1B_ADDR_OFF 0 76 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ 77 HGC_ITCT_ECC_1B_ADDR_OFF) 78 #define HGC_ITCT_ECC_MB_ADDR_OFF 16 79 #define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \ 80 HGC_ITCT_ECC_MB_ADDR_OFF) 81 #define HGC_AXI_FIFO_ERR_INFO 0x154 82 #define AXI_ERR_INFO_OFF 0 83 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) 84 #define FIFO_ERR_INFO_OFF 8 85 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) 86 #define INT_COAL_EN 0x19c 87 #define OQ_INT_COAL_TIME 0x1a0 88 #define OQ_INT_COAL_CNT 0x1a4 89 #define ENT_INT_COAL_TIME 0x1a8 90 #define ENT_INT_COAL_CNT 0x1ac 91 #define OQ_INT_SRC 0x1b0 92 #define OQ_INT_SRC_MSK 0x1b4 93 #define ENT_INT_SRC1 0x1b8 94 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 95 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) 96 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 97 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) 98 #define ENT_INT_SRC2 0x1bc 99 #define ENT_INT_SRC3 0x1c0 100 #define ENT_INT_SRC3_WP_DEPTH_OFF 8 101 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 102 #define ENT_INT_SRC3_RP_DEPTH_OFF 10 103 #define ENT_INT_SRC3_AXI_OFF 11 104 #define ENT_INT_SRC3_FIFO_OFF 12 105 #define ENT_INT_SRC3_LM_OFF 14 106 #define ENT_INT_SRC3_ITC_INT_OFF 15 107 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) 108 #define ENT_INT_SRC3_ABT_OFF 16 109 #define ENT_INT_SRC3_DQE_POISON_OFF 18 110 #define ENT_INT_SRC3_IOST_POISON_OFF 19 111 #define ENT_INT_SRC3_ITCT_POISON_OFF 20 112 #define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF 21 113 #define ENT_INT_SRC_MSK1 0x1c4 114 #define ENT_INT_SRC_MSK2 0x1c8 115 #define ENT_INT_SRC_MSK3 0x1cc 116 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 117 #define CHNL_PHYUPDOWN_INT_MSK 0x1d0 118 #define CHNL_ENT_INT_MSK 0x1d4 119 #define HGC_COM_INT_MSK 0x1d8 120 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) 121 #define SAS_ECC_INTR 0x1e8 122 #define SAS_ECC_INTR_DQE_ECC_1B_OFF 0 123 #define SAS_ECC_INTR_DQE_ECC_MB_OFF 1 124 #define SAS_ECC_INTR_IOST_ECC_1B_OFF 2 125 #define SAS_ECC_INTR_IOST_ECC_MB_OFF 3 126 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF 4 127 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF 5 128 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 6 129 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 7 130 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 8 131 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 9 132 #define SAS_ECC_INTR_CQE_ECC_1B_OFF 10 133 #define SAS_ECC_INTR_CQE_ECC_MB_OFF 11 134 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 12 135 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 13 136 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 14 137 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 15 138 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 16 139 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 17 140 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 18 141 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 19 142 #define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF 20 143 #define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF 21 144 #define SAS_ECC_INTR_MSK 0x1ec 145 #define HGC_ERR_STAT_EN 0x238 146 #define CQE_SEND_CNT 0x248 147 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 148 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 149 #define DLVRY_Q_0_DEPTH 0x268 150 #define DLVRY_Q_0_WR_PTR 0x26c 151 #define DLVRY_Q_0_RD_PTR 0x270 152 #define HYPER_STREAM_ID_EN_CFG 0xc80 153 #define OQ0_INT_SRC_MSK 0xc90 154 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0 155 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4 156 #define COMPL_Q_0_DEPTH 0x4e8 157 #define COMPL_Q_0_WR_PTR 0x4ec 158 #define COMPL_Q_0_RD_PTR 0x4f0 159 #define HGC_RXM_DFX_STATUS14 0xae8 160 #define HGC_RXM_DFX_STATUS14_MEM0_OFF 0 161 #define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \ 162 HGC_RXM_DFX_STATUS14_MEM0_OFF) 163 #define HGC_RXM_DFX_STATUS14_MEM1_OFF 9 164 #define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \ 165 HGC_RXM_DFX_STATUS14_MEM1_OFF) 166 #define HGC_RXM_DFX_STATUS14_MEM2_OFF 18 167 #define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \ 168 HGC_RXM_DFX_STATUS14_MEM2_OFF) 169 #define HGC_RXM_DFX_STATUS15 0xaec 170 #define HGC_RXM_DFX_STATUS15_MEM3_OFF 0 171 #define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \ 172 HGC_RXM_DFX_STATUS15_MEM3_OFF) 173 #define AWQOS_AWCACHE_CFG 0xc84 174 #define ARQOS_ARCACHE_CFG 0xc88 175 #define HILINK_ERR_DFX 0xe04 176 #define SAS_GPIO_CFG_0 0x1000 177 #define SAS_GPIO_CFG_1 0x1004 178 #define SAS_GPIO_TX_0_1 0x1040 179 #define SAS_CFG_DRIVE_VLD 0x1070 180 181 /* phy registers requiring init */ 182 #define PORT_BASE (0x2000) 183 #define PHY_CFG (PORT_BASE + 0x0) 184 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4) 185 #define PHY_CFG_ENA_OFF 0 186 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) 187 #define PHY_CFG_DC_OPT_OFF 2 188 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) 189 #define PHY_CFG_PHY_RST_OFF 3 190 #define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF) 191 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) 192 #define PHY_CTRL (PORT_BASE + 0x14) 193 #define PHY_CTRL_RESET_OFF 0 194 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) 195 #define CMD_HDR_PIR_OFF 8 196 #define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF) 197 #define SERDES_CFG (PORT_BASE + 0x1c) 198 #define SL_CFG (PORT_BASE + 0x84) 199 #define AIP_LIMIT (PORT_BASE + 0x90) 200 #define SL_CONTROL (PORT_BASE + 0x94) 201 #define SL_CONTROL_NOTIFY_EN_OFF 0 202 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) 203 #define SL_CTA_OFF 17 204 #define SL_CTA_MSK (0x1 << SL_CTA_OFF) 205 #define RX_PRIMS_STATUS (PORT_BASE + 0x98) 206 #define RX_BCAST_CHG_OFF 1 207 #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) 208 #define TX_ID_DWORD0 (PORT_BASE + 0x9c) 209 #define TX_ID_DWORD1 (PORT_BASE + 0xa0) 210 #define TX_ID_DWORD2 (PORT_BASE + 0xa4) 211 #define TX_ID_DWORD3 (PORT_BASE + 0xa8) 212 #define TX_ID_DWORD4 (PORT_BASE + 0xaC) 213 #define TX_ID_DWORD5 (PORT_BASE + 0xb0) 214 #define TX_ID_DWORD6 (PORT_BASE + 0xb4) 215 #define TXID_AUTO (PORT_BASE + 0xb8) 216 #define CT3_OFF 1 217 #define CT3_MSK (0x1 << CT3_OFF) 218 #define TX_HARDRST_OFF 2 219 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) 220 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) 221 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) 222 #define STP_LINK_TIMER (PORT_BASE + 0x120) 223 #define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124) 224 #define CON_CFG_DRIVER (PORT_BASE + 0x130) 225 #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) 226 #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) 227 #define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c) 228 #define CHL_INT0 (PORT_BASE + 0x1b4) 229 #define CHL_INT0_HOTPLUG_TOUT_OFF 0 230 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) 231 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1 232 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) 233 #define CHL_INT0_SL_PHY_ENABLE_OFF 2 234 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) 235 #define CHL_INT0_NOT_RDY_OFF 4 236 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) 237 #define CHL_INT0_PHY_RDY_OFF 5 238 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) 239 #define CHL_INT1 (PORT_BASE + 0x1b8) 240 #define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF 15 241 #define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF 16 242 #define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF 17 243 #define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF 18 244 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19 245 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20 246 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21 247 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 248 #define CHL_INT1_DMAC_TX_FIFO_ERR_OFF 23 249 #define CHL_INT1_DMAC_RX_FIFO_ERR_OFF 24 250 #define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF 26 251 #define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF 27 252 #define CHL_INT2 (PORT_BASE + 0x1bc) 253 #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 254 #define CHL_INT2_RX_DISP_ERR_OFF 28 255 #define CHL_INT2_RX_CODE_ERR_OFF 29 256 #define CHL_INT2_RX_INVLD_DW_OFF 30 257 #define CHL_INT2_STP_LINK_TIMEOUT_OFF 31 258 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) 259 #define CHL_INT1_MSK (PORT_BASE + 0x1c4) 260 #define CHL_INT2_MSK (PORT_BASE + 0x1c8) 261 #define SAS_EC_INT_COAL_TIME (PORT_BASE + 0x1cc) 262 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) 263 #define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4) 264 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) 265 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) 266 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) 267 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) 268 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) 269 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) 270 #define DMA_TX_STATUS (PORT_BASE + 0x2d0) 271 #define DMA_TX_STATUS_BUSY_OFF 0 272 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) 273 #define DMA_RX_STATUS (PORT_BASE + 0x2e8) 274 #define DMA_RX_STATUS_BUSY_OFF 0 275 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) 276 277 #define COARSETUNE_TIME (PORT_BASE + 0x304) 278 #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380) 279 #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384) 280 #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390) 281 #define ERR_CNT_CODE_ERR (PORT_BASE + 0x394) 282 #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398) 283 284 #define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */ 285 #if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW) 286 #error Max ITCT exceeded 287 #endif 288 289 #define AXI_MASTER_CFG_BASE (0x5000) 290 #define AM_CTRL_GLOBAL (0x0) 291 #define AM_CTRL_SHUTDOWN_REQ_OFF 0 292 #define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF) 293 #define AM_CURR_TRANS_RETURN (0x150) 294 295 #define AM_CFG_MAX_TRANS (0x5010) 296 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) 297 #define AXI_CFG (0x5100) 298 #define AM_ROB_ECC_ERR_ADDR (0x510c) 299 #define AM_ROB_ECC_ERR_ADDR_OFF 0 300 #define AM_ROB_ECC_ERR_ADDR_MSK 0xffffffff 301 302 /* RAS registers need init */ 303 #define RAS_BASE (0x6000) 304 #define SAS_RAS_INTR0 (RAS_BASE) 305 #define SAS_RAS_INTR1 (RAS_BASE + 0x04) 306 #define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08) 307 #define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c) 308 #define CFG_SAS_RAS_INTR_MASK (RAS_BASE + 0x1c) 309 #define SAS_RAS_INTR2 (RAS_BASE + 0x20) 310 #define SAS_RAS_INTR2_MASK (RAS_BASE + 0x24) 311 312 /* HW dma structures */ 313 /* Delivery queue header */ 314 /* dw0 */ 315 #define CMD_HDR_ABORT_FLAG_OFF 0 316 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) 317 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 318 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) 319 #define CMD_HDR_RESP_REPORT_OFF 5 320 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) 321 #define CMD_HDR_TLR_CTRL_OFF 6 322 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) 323 #define CMD_HDR_PORT_OFF 18 324 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) 325 #define CMD_HDR_PRIORITY_OFF 27 326 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) 327 #define CMD_HDR_CMD_OFF 29 328 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) 329 /* dw1 */ 330 #define CMD_HDR_UNCON_CMD_OFF 3 331 #define CMD_HDR_DIR_OFF 5 332 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) 333 #define CMD_HDR_RESET_OFF 7 334 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) 335 #define CMD_HDR_VDTL_OFF 10 336 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) 337 #define CMD_HDR_FRAME_TYPE_OFF 11 338 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) 339 #define CMD_HDR_DEV_ID_OFF 16 340 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) 341 /* dw2 */ 342 #define CMD_HDR_CFL_OFF 0 343 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) 344 #define CMD_HDR_NCQ_TAG_OFF 10 345 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) 346 #define CMD_HDR_MRFL_OFF 15 347 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) 348 #define CMD_HDR_SG_MOD_OFF 24 349 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) 350 /* dw3 */ 351 #define CMD_HDR_IPTT_OFF 0 352 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) 353 /* dw6 */ 354 #define CMD_HDR_DIF_SGL_LEN_OFF 0 355 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) 356 #define CMD_HDR_DATA_SGL_LEN_OFF 16 357 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) 358 /* dw7 */ 359 #define CMD_HDR_ADDR_MODE_SEL_OFF 15 360 #define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF) 361 #define CMD_HDR_ABORT_IPTT_OFF 16 362 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) 363 364 /* Completion header */ 365 /* dw0 */ 366 #define CMPLT_HDR_CMPLT_OFF 0 367 #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF) 368 #define CMPLT_HDR_ERROR_PHASE_OFF 2 369 #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF) 370 #define CMPLT_HDR_RSPNS_XFRD_OFF 10 371 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) 372 #define CMPLT_HDR_ERX_OFF 12 373 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) 374 #define CMPLT_HDR_ABORT_STAT_OFF 13 375 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) 376 /* abort_stat */ 377 #define STAT_IO_NOT_VALID 0x1 378 #define STAT_IO_NO_DEVICE 0x2 379 #define STAT_IO_COMPLETE 0x3 380 #define STAT_IO_ABORTED 0x4 381 /* dw1 */ 382 #define CMPLT_HDR_IPTT_OFF 0 383 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) 384 #define CMPLT_HDR_DEV_ID_OFF 16 385 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) 386 /* dw3 */ 387 #define CMPLT_HDR_IO_IN_TARGET_OFF 17 388 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF) 389 390 /* ITCT header */ 391 /* qw0 */ 392 #define ITCT_HDR_DEV_TYPE_OFF 0 393 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) 394 #define ITCT_HDR_VALID_OFF 2 395 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) 396 #define ITCT_HDR_MCR_OFF 5 397 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) 398 #define ITCT_HDR_VLN_OFF 9 399 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) 400 #define ITCT_HDR_SMP_TIMEOUT_OFF 16 401 #define ITCT_HDR_AWT_CONTINUE_OFF 25 402 #define ITCT_HDR_PORT_ID_OFF 28 403 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) 404 /* qw2 */ 405 #define ITCT_HDR_INLT_OFF 0 406 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) 407 #define ITCT_HDR_RTOLT_OFF 48 408 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) 409 410 struct hisi_sas_protect_iu_v3_hw { 411 u32 dw0; 412 u32 lbrtcv; 413 u32 lbrtgv; 414 u32 dw3; 415 u32 dw4; 416 u32 dw5; 417 u32 rsv; 418 }; 419 420 struct hisi_sas_complete_v3_hdr { 421 __le32 dw0; 422 __le32 dw1; 423 __le32 act; 424 __le32 dw3; 425 }; 426 427 struct hisi_sas_err_record_v3 { 428 /* dw0 */ 429 __le32 trans_tx_fail_type; 430 431 /* dw1 */ 432 __le32 trans_rx_fail_type; 433 434 /* dw2 */ 435 __le16 dma_tx_err_type; 436 __le16 sipc_rx_err_type; 437 438 /* dw3 */ 439 __le32 dma_rx_err_type; 440 }; 441 442 #define RX_DATA_LEN_UNDERFLOW_OFF 6 443 #define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF) 444 445 #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096 446 #define HISI_SAS_MSI_COUNT_V3_HW 32 447 448 #define DIR_NO_DATA 0 449 #define DIR_TO_INI 1 450 #define DIR_TO_DEVICE 2 451 #define DIR_RESERVED 3 452 453 #define FIS_CMD_IS_UNCONSTRAINED(fis) \ 454 ((fis.command == ATA_CMD_READ_LOG_EXT) || \ 455 (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \ 456 ((fis.command == ATA_CMD_DEV_RESET) && \ 457 ((fis.control & ATA_SRST) != 0))) 458 459 #define T10_INSRT_EN_OFF 0 460 #define T10_INSRT_EN_MSK (1 << T10_INSRT_EN_OFF) 461 #define T10_RMV_EN_OFF 1 462 #define T10_RMV_EN_MSK (1 << T10_RMV_EN_OFF) 463 #define T10_RPLC_EN_OFF 2 464 #define T10_RPLC_EN_MSK (1 << T10_RPLC_EN_OFF) 465 #define T10_CHK_EN_OFF 3 466 #define T10_CHK_EN_MSK (1 << T10_CHK_EN_OFF) 467 #define INCR_LBRT_OFF 5 468 #define INCR_LBRT_MSK (1 << INCR_LBRT_OFF) 469 #define USR_DATA_BLOCK_SZ_OFF 20 470 #define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF) 471 #define T10_CHK_MSK_OFF 16 472 #define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF) 473 #define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF) 474 475 #define BASE_VECTORS_V3_HW 16 476 #define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1) 477 478 enum { 479 DSM_FUNC_ERR_HANDLE_MSI = 0, 480 }; 481 482 static bool hisi_sas_intr_conv; 483 MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)"); 484 485 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ 486 static int prot_mask; 487 module_param(prot_mask, int, 0); 488 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 "); 489 490 static bool auto_affine_msi_experimental; 491 module_param(auto_affine_msi_experimental, bool, 0444); 492 MODULE_PARM_DESC(auto_affine_msi_experimental, "Enable auto-affinity of MSI IRQs as experimental:\n" 493 "default is off"); 494 495 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) 496 { 497 void __iomem *regs = hisi_hba->regs + off; 498 499 return readl(regs); 500 } 501 502 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) 503 { 504 void __iomem *regs = hisi_hba->regs + off; 505 506 writel(val, regs); 507 } 508 509 static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, 510 u32 off, u32 val) 511 { 512 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 513 514 writel(val, regs); 515 } 516 517 static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, 518 int phy_no, u32 off) 519 { 520 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 521 522 return readl(regs); 523 } 524 525 #define hisi_sas_read32_poll_timeout(off, val, cond, delay_us, \ 526 timeout_us) \ 527 ({ \ 528 void __iomem *regs = hisi_hba->regs + off; \ 529 readl_poll_timeout(regs, val, cond, delay_us, timeout_us); \ 530 }) 531 532 #define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us, \ 533 timeout_us) \ 534 ({ \ 535 void __iomem *regs = hisi_hba->regs + off; \ 536 readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\ 537 }) 538 539 static void init_reg_v3_hw(struct hisi_hba *hisi_hba) 540 { 541 int i; 542 543 /* Global registers init */ 544 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 545 (u32)((1ULL << hisi_hba->queue_count) - 1)); 546 hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0); 547 hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); 548 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); 549 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); 550 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); 551 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); 552 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); 553 hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN, 554 hisi_sas_intr_conv); 555 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); 556 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); 557 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); 558 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); 559 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); 560 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); 561 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff); 562 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); 563 hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); 564 hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); 565 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555); 566 hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0); 567 hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0); 568 for (i = 0; i < hisi_hba->queue_count; i++) 569 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); 570 571 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); 572 573 for (i = 0; i < hisi_hba->n_phy; i++) { 574 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 575 struct asd_sas_phy *sas_phy = &phy->sas_phy; 576 u32 prog_phy_link_rate = 0x800; 577 578 if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate < 579 SAS_LINK_RATE_1_5_GBPS)) { 580 prog_phy_link_rate = 0x855; 581 } else { 582 enum sas_linkrate max = sas_phy->phy->maximum_linkrate; 583 584 prog_phy_link_rate = 585 hisi_sas_get_prog_phy_linkrate_mask(max) | 586 0x800; 587 } 588 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 589 prog_phy_link_rate); 590 hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00); 591 hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80); 592 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); 593 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 594 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); 595 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 596 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff); 597 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe); 598 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); 599 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); 600 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); 601 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); 602 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); 603 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); 604 hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); 605 hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); 606 hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32); 607 hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME, 608 0x30f4240); 609 /* used for 12G negotiate */ 610 hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); 611 hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff); 612 } 613 614 for (i = 0; i < hisi_hba->queue_count; i++) { 615 /* Delivery queue */ 616 hisi_sas_write32(hisi_hba, 617 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), 618 upper_32_bits(hisi_hba->cmd_hdr_dma[i])); 619 620 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), 621 lower_32_bits(hisi_hba->cmd_hdr_dma[i])); 622 623 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), 624 HISI_SAS_QUEUE_SLOTS); 625 626 /* Completion queue */ 627 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), 628 upper_32_bits(hisi_hba->complete_hdr_dma[i])); 629 630 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), 631 lower_32_bits(hisi_hba->complete_hdr_dma[i])); 632 633 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), 634 HISI_SAS_QUEUE_SLOTS); 635 } 636 637 /* itct */ 638 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, 639 lower_32_bits(hisi_hba->itct_dma)); 640 641 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, 642 upper_32_bits(hisi_hba->itct_dma)); 643 644 /* iost */ 645 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, 646 lower_32_bits(hisi_hba->iost_dma)); 647 648 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, 649 upper_32_bits(hisi_hba->iost_dma)); 650 651 /* breakpoint */ 652 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, 653 lower_32_bits(hisi_hba->breakpoint_dma)); 654 655 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, 656 upper_32_bits(hisi_hba->breakpoint_dma)); 657 658 /* SATA broken msg */ 659 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, 660 lower_32_bits(hisi_hba->sata_breakpoint_dma)); 661 662 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, 663 upper_32_bits(hisi_hba->sata_breakpoint_dma)); 664 665 /* SATA initial fis */ 666 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, 667 lower_32_bits(hisi_hba->initial_fis_dma)); 668 669 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, 670 upper_32_bits(hisi_hba->initial_fis_dma)); 671 672 /* RAS registers init */ 673 hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0); 674 hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0); 675 hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0); 676 hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0); 677 678 /* LED registers init */ 679 hisi_sas_write32(hisi_hba, SAS_CFG_DRIVE_VLD, 0x80000ff); 680 hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1, 0x80808080); 681 hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1 + 0x4, 0x80808080); 682 /* Configure blink generator rate A to 1Hz and B to 4Hz */ 683 hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_1, 0x121700); 684 hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_0, 0x800000); 685 } 686 687 static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 688 { 689 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 690 691 cfg &= ~PHY_CFG_DC_OPT_MSK; 692 cfg |= 1 << PHY_CFG_DC_OPT_OFF; 693 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 694 } 695 696 static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 697 { 698 struct sas_identify_frame identify_frame; 699 u32 *identify_buffer; 700 701 memset(&identify_frame, 0, sizeof(identify_frame)); 702 identify_frame.dev_type = SAS_END_DEVICE; 703 identify_frame.frame_type = 0; 704 identify_frame._un1 = 1; 705 identify_frame.initiator_bits = SAS_PROTOCOL_ALL; 706 identify_frame.target_bits = SAS_PROTOCOL_NONE; 707 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 708 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 709 identify_frame.phy_id = phy_no; 710 identify_buffer = (u32 *)(&identify_frame); 711 712 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, 713 __swab32(identify_buffer[0])); 714 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, 715 __swab32(identify_buffer[1])); 716 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, 717 __swab32(identify_buffer[2])); 718 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, 719 __swab32(identify_buffer[3])); 720 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, 721 __swab32(identify_buffer[4])); 722 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, 723 __swab32(identify_buffer[5])); 724 } 725 726 static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, 727 struct hisi_sas_device *sas_dev) 728 { 729 struct domain_device *device = sas_dev->sas_device; 730 struct device *dev = hisi_hba->dev; 731 u64 qw0, device_id = sas_dev->device_id; 732 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; 733 struct domain_device *parent_dev = device->parent; 734 struct asd_sas_port *sas_port = device->port; 735 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 736 u64 sas_addr; 737 738 memset(itct, 0, sizeof(*itct)); 739 740 /* qw0 */ 741 qw0 = 0; 742 switch (sas_dev->dev_type) { 743 case SAS_END_DEVICE: 744 case SAS_EDGE_EXPANDER_DEVICE: 745 case SAS_FANOUT_EXPANDER_DEVICE: 746 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; 747 break; 748 case SAS_SATA_DEV: 749 case SAS_SATA_PENDING: 750 if (parent_dev && dev_is_expander(parent_dev->dev_type)) 751 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; 752 else 753 qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; 754 break; 755 default: 756 dev_warn(dev, "setup itct: unsupported dev type (%d)\n", 757 sas_dev->dev_type); 758 } 759 760 qw0 |= ((1 << ITCT_HDR_VALID_OFF) | 761 (device->linkrate << ITCT_HDR_MCR_OFF) | 762 (1 << ITCT_HDR_VLN_OFF) | 763 (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) | 764 (1 << ITCT_HDR_AWT_CONTINUE_OFF) | 765 (port->id << ITCT_HDR_PORT_ID_OFF)); 766 itct->qw0 = cpu_to_le64(qw0); 767 768 /* qw1 */ 769 memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE); 770 itct->sas_addr = cpu_to_le64(__swab64(sas_addr)); 771 772 /* qw2 */ 773 if (!dev_is_sata(device)) 774 itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | 775 (0x1ULL << ITCT_HDR_RTOLT_OFF)); 776 } 777 778 static void clear_itct_v3_hw(struct hisi_hba *hisi_hba, 779 struct hisi_sas_device *sas_dev) 780 { 781 DECLARE_COMPLETION_ONSTACK(completion); 782 u64 dev_id = sas_dev->device_id; 783 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; 784 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 785 786 sas_dev->completion = &completion; 787 788 /* clear the itct interrupt state */ 789 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) 790 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 791 ENT_INT_SRC3_ITC_INT_MSK); 792 793 /* clear the itct table */ 794 reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); 795 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); 796 797 wait_for_completion(sas_dev->completion); 798 memset(itct, 0, sizeof(struct hisi_sas_itct)); 799 } 800 801 static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, 802 struct domain_device *device) 803 { 804 struct hisi_sas_slot *slot, *slot2; 805 struct hisi_sas_device *sas_dev = device->lldd_dev; 806 u32 cfg_abt_set_query_iptt; 807 808 cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba, 809 CFG_ABT_SET_QUERY_IPTT); 810 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) { 811 cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK; 812 cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) | 813 (slot->idx << CFG_SET_ABORTED_IPTT_OFF); 814 hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, 815 cfg_abt_set_query_iptt); 816 } 817 cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF); 818 hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, 819 cfg_abt_set_query_iptt); 820 hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE, 821 1 << CFG_ABT_SET_IPTT_DONE_OFF); 822 } 823 824 static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) 825 { 826 struct device *dev = hisi_hba->dev; 827 int ret; 828 u32 val; 829 830 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 831 832 /* Disable all of the PHYs */ 833 hisi_sas_stop_phys(hisi_hba); 834 udelay(50); 835 836 /* Ensure axi bus idle */ 837 ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val, 838 20000, 1000000); 839 if (ret) { 840 dev_err(dev, "axi bus is not idle, ret = %d!\n", ret); 841 return -EIO; 842 } 843 844 if (ACPI_HANDLE(dev)) { 845 acpi_status s; 846 847 s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); 848 if (ACPI_FAILURE(s)) { 849 dev_err(dev, "Reset failed\n"); 850 return -EIO; 851 } 852 } else { 853 dev_err(dev, "no reset method!\n"); 854 return -EINVAL; 855 } 856 857 return 0; 858 } 859 860 static int hw_init_v3_hw(struct hisi_hba *hisi_hba) 861 { 862 struct device *dev = hisi_hba->dev; 863 union acpi_object *obj; 864 guid_t guid; 865 int rc; 866 867 rc = reset_hw_v3_hw(hisi_hba); 868 if (rc) { 869 dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc); 870 return rc; 871 } 872 873 msleep(100); 874 init_reg_v3_hw(hisi_hba); 875 876 if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) { 877 dev_err(dev, "Parse GUID failed\n"); 878 return -EINVAL; 879 } 880 881 /* Switch over to MSI handling , from PCI AER default */ 882 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0, 883 DSM_FUNC_ERR_HANDLE_MSI, NULL); 884 if (!obj) 885 dev_warn(dev, "Switch over to MSI handling failed\n"); 886 else 887 ACPI_FREE(obj); 888 889 return 0; 890 } 891 892 static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 893 { 894 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 895 896 cfg |= PHY_CFG_ENA_MSK; 897 cfg &= ~PHY_CFG_PHY_RST_MSK; 898 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 899 } 900 901 static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 902 { 903 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 904 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); 905 static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | 906 BIT(CHL_INT2_RX_CODE_ERR_OFF) | 907 BIT(CHL_INT2_RX_INVLD_DW_OFF); 908 u32 state; 909 910 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, msk | irq_msk); 911 912 cfg &= ~PHY_CFG_ENA_MSK; 913 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 914 915 mdelay(50); 916 917 state = hisi_sas_read32(hisi_hba, PHY_STATE); 918 if (state & BIT(phy_no)) { 919 cfg |= PHY_CFG_PHY_RST_MSK; 920 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 921 } 922 923 udelay(1); 924 925 hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); 926 hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); 927 hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); 928 929 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, msk); 930 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, irq_msk); 931 } 932 933 static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 934 { 935 config_id_frame_v3_hw(hisi_hba, phy_no); 936 config_phy_opt_mode_v3_hw(hisi_hba, phy_no); 937 enable_phy_v3_hw(hisi_hba, phy_no); 938 } 939 940 static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 941 { 942 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 943 u32 txid_auto; 944 945 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 946 if (phy->identify.device_type == SAS_END_DEVICE) { 947 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 948 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 949 txid_auto | TX_HARDRST_MSK); 950 } 951 msleep(100); 952 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 953 } 954 955 static enum sas_linkrate phy_get_max_linkrate_v3_hw(void) 956 { 957 return SAS_LINK_RATE_12_0_GBPS; 958 } 959 960 static void phys_init_v3_hw(struct hisi_hba *hisi_hba) 961 { 962 int i; 963 964 for (i = 0; i < hisi_hba->n_phy; i++) { 965 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 966 struct asd_sas_phy *sas_phy = &phy->sas_phy; 967 968 if (!sas_phy->phy->enabled) 969 continue; 970 971 hisi_sas_phy_enable(hisi_hba, i, 1); 972 } 973 } 974 975 static void sl_notify_ssp_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 976 { 977 u32 sl_control; 978 979 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 980 sl_control |= SL_CONTROL_NOTIFY_EN_MSK; 981 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 982 msleep(1); 983 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 984 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; 985 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 986 } 987 988 static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) 989 { 990 int i, bitmap = 0; 991 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 992 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 993 994 for (i = 0; i < hisi_hba->n_phy; i++) 995 if (phy_state & BIT(i)) 996 if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) 997 bitmap |= BIT(i); 998 999 return bitmap; 1000 } 1001 1002 static void start_delivery_v3_hw(struct hisi_sas_dq *dq) 1003 { 1004 struct hisi_hba *hisi_hba = dq->hisi_hba; 1005 struct hisi_sas_slot *s, *s1, *s2 = NULL; 1006 int dlvry_queue = dq->id; 1007 int wp; 1008 1009 list_for_each_entry_safe(s, s1, &dq->list, delivery) { 1010 if (!s->ready) 1011 break; 1012 s2 = s; 1013 list_del(&s->delivery); 1014 } 1015 1016 if (!s2) 1017 return; 1018 1019 /* 1020 * Ensure that memories for slots built on other CPUs is observed. 1021 */ 1022 smp_rmb(); 1023 wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; 1024 1025 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); 1026 } 1027 1028 static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, 1029 struct hisi_sas_slot *slot, 1030 struct hisi_sas_cmd_hdr *hdr, 1031 struct scatterlist *scatter, 1032 int n_elem) 1033 { 1034 struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); 1035 struct scatterlist *sg; 1036 int i; 1037 1038 for_each_sg(scatter, sg, n_elem, i) { 1039 struct hisi_sas_sge *entry = &sge_page->sge[i]; 1040 1041 entry->addr = cpu_to_le64(sg_dma_address(sg)); 1042 entry->page_ctrl_0 = entry->page_ctrl_1 = 0; 1043 entry->data_len = cpu_to_le32(sg_dma_len(sg)); 1044 entry->data_off = 0; 1045 } 1046 1047 hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); 1048 1049 hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); 1050 } 1051 1052 static void prep_prd_sge_dif_v3_hw(struct hisi_hba *hisi_hba, 1053 struct hisi_sas_slot *slot, 1054 struct hisi_sas_cmd_hdr *hdr, 1055 struct scatterlist *scatter, 1056 int n_elem) 1057 { 1058 struct hisi_sas_sge_dif_page *sge_dif_page; 1059 struct scatterlist *sg; 1060 int i; 1061 1062 sge_dif_page = hisi_sas_sge_dif_addr_mem(slot); 1063 1064 for_each_sg(scatter, sg, n_elem, i) { 1065 struct hisi_sas_sge *entry = &sge_dif_page->sge[i]; 1066 1067 entry->addr = cpu_to_le64(sg_dma_address(sg)); 1068 entry->page_ctrl_0 = 0; 1069 entry->page_ctrl_1 = 0; 1070 entry->data_len = cpu_to_le32(sg_dma_len(sg)); 1071 entry->data_off = 0; 1072 } 1073 1074 hdr->dif_prd_table_addr = 1075 cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot)); 1076 1077 hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DIF_SGL_LEN_OFF); 1078 } 1079 1080 static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd) 1081 { 1082 unsigned char prot_flags = scsi_cmnd->prot_flags; 1083 1084 if (prot_flags & SCSI_PROT_REF_CHECK) 1085 return T10_CHK_APP_TAG_MSK; 1086 return T10_CHK_REF_TAG_MSK | T10_CHK_APP_TAG_MSK; 1087 } 1088 1089 static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd, 1090 struct hisi_sas_protect_iu_v3_hw *prot) 1091 { 1092 unsigned char prot_op = scsi_get_prot_op(scsi_cmnd); 1093 unsigned int interval = scsi_prot_interval(scsi_cmnd); 1094 u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request); 1095 1096 switch (prot_op) { 1097 case SCSI_PROT_READ_INSERT: 1098 prot->dw0 |= T10_INSRT_EN_MSK; 1099 prot->lbrtgv = lbrt_chk_val; 1100 break; 1101 case SCSI_PROT_READ_STRIP: 1102 prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); 1103 prot->lbrtcv = lbrt_chk_val; 1104 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); 1105 break; 1106 case SCSI_PROT_READ_PASS: 1107 prot->dw0 |= T10_CHK_EN_MSK; 1108 prot->lbrtcv = lbrt_chk_val; 1109 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); 1110 break; 1111 case SCSI_PROT_WRITE_INSERT: 1112 prot->dw0 |= T10_INSRT_EN_MSK; 1113 prot->lbrtgv = lbrt_chk_val; 1114 break; 1115 case SCSI_PROT_WRITE_STRIP: 1116 prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); 1117 prot->lbrtcv = lbrt_chk_val; 1118 break; 1119 case SCSI_PROT_WRITE_PASS: 1120 prot->dw0 |= T10_CHK_EN_MSK; 1121 prot->lbrtcv = lbrt_chk_val; 1122 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); 1123 break; 1124 default: 1125 WARN(1, "prot_op(0x%x) is not valid\n", prot_op); 1126 break; 1127 } 1128 1129 switch (interval) { 1130 case 512: 1131 break; 1132 case 4096: 1133 prot->dw0 |= (0x1 << USR_DATA_BLOCK_SZ_OFF); 1134 break; 1135 case 520: 1136 prot->dw0 |= (0x2 << USR_DATA_BLOCK_SZ_OFF); 1137 break; 1138 default: 1139 WARN(1, "protection interval (0x%x) invalid\n", 1140 interval); 1141 break; 1142 } 1143 1144 prot->dw0 |= INCR_LBRT_MSK; 1145 } 1146 1147 static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, 1148 struct hisi_sas_slot *slot) 1149 { 1150 struct sas_task *task = slot->task; 1151 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1152 struct domain_device *device = task->dev; 1153 struct hisi_sas_device *sas_dev = device->lldd_dev; 1154 struct hisi_sas_port *port = slot->port; 1155 struct sas_ssp_task *ssp_task = &task->ssp_task; 1156 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 1157 struct hisi_sas_tmf_task *tmf = slot->tmf; 1158 int has_data = 0, priority = !!tmf; 1159 unsigned char prot_op; 1160 u8 *buf_cmd; 1161 u32 dw1 = 0, dw2 = 0, len = 0; 1162 1163 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | 1164 (2 << CMD_HDR_TLR_CTRL_OFF) | 1165 (port->id << CMD_HDR_PORT_OFF) | 1166 (priority << CMD_HDR_PRIORITY_OFF) | 1167 (1 << CMD_HDR_CMD_OFF)); /* ssp */ 1168 1169 dw1 = 1 << CMD_HDR_VDTL_OFF; 1170 if (tmf) { 1171 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; 1172 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; 1173 } else { 1174 prot_op = scsi_get_prot_op(scsi_cmnd); 1175 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; 1176 switch (scsi_cmnd->sc_data_direction) { 1177 case DMA_TO_DEVICE: 1178 has_data = 1; 1179 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1180 break; 1181 case DMA_FROM_DEVICE: 1182 has_data = 1; 1183 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1184 break; 1185 default: 1186 dw1 &= ~CMD_HDR_DIR_MSK; 1187 } 1188 } 1189 1190 /* map itct entry */ 1191 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1192 1193 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) 1194 + 3) / 4) << CMD_HDR_CFL_OFF) | 1195 ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | 1196 (2 << CMD_HDR_SG_MOD_OFF); 1197 hdr->dw2 = cpu_to_le32(dw2); 1198 hdr->transfer_tags = cpu_to_le32(slot->idx); 1199 1200 if (has_data) { 1201 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, 1202 slot->n_elem); 1203 1204 if (scsi_prot_sg_count(scsi_cmnd)) 1205 prep_prd_sge_dif_v3_hw(hisi_hba, slot, hdr, 1206 scsi_prot_sglist(scsi_cmnd), 1207 slot->n_elem_dif); 1208 } 1209 1210 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1211 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1212 1213 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + 1214 sizeof(struct ssp_frame_hdr); 1215 1216 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 1217 if (!tmf) { 1218 buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3); 1219 memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 1220 } else { 1221 buf_cmd[10] = tmf->tmf; 1222 switch (tmf->tmf) { 1223 case TMF_ABORT_TASK: 1224 case TMF_QUERY_TASK: 1225 buf_cmd[12] = 1226 (tmf->tag_of_task_to_be_managed >> 8) & 0xff; 1227 buf_cmd[13] = 1228 tmf->tag_of_task_to_be_managed & 0xff; 1229 break; 1230 default: 1231 break; 1232 } 1233 } 1234 1235 if (has_data && (prot_op != SCSI_PROT_NORMAL)) { 1236 struct hisi_sas_protect_iu_v3_hw prot; 1237 u8 *buf_cmd_prot; 1238 1239 hdr->dw7 |= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF); 1240 dw1 |= CMD_HDR_PIR_MSK; 1241 buf_cmd_prot = hisi_sas_cmd_hdr_addr_mem(slot) + 1242 sizeof(struct ssp_frame_hdr) + 1243 sizeof(struct ssp_command_iu); 1244 1245 memset(&prot, 0, sizeof(struct hisi_sas_protect_iu_v3_hw)); 1246 fill_prot_v3_hw(scsi_cmnd, &prot); 1247 memcpy(buf_cmd_prot, &prot, 1248 sizeof(struct hisi_sas_protect_iu_v3_hw)); 1249 /* 1250 * For READ, we need length of info read to memory, while for 1251 * WRITE we need length of data written to the disk. 1252 */ 1253 if (prot_op == SCSI_PROT_WRITE_INSERT || 1254 prot_op == SCSI_PROT_READ_INSERT || 1255 prot_op == SCSI_PROT_WRITE_PASS || 1256 prot_op == SCSI_PROT_READ_PASS) { 1257 unsigned int interval = scsi_prot_interval(scsi_cmnd); 1258 unsigned int ilog2_interval = ilog2(interval); 1259 1260 len = (task->total_xfer_len >> ilog2_interval) * 8; 1261 } 1262 } 1263 1264 hdr->dw1 = cpu_to_le32(dw1); 1265 1266 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len); 1267 } 1268 1269 static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, 1270 struct hisi_sas_slot *slot) 1271 { 1272 struct sas_task *task = slot->task; 1273 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1274 struct domain_device *device = task->dev; 1275 struct hisi_sas_port *port = slot->port; 1276 struct scatterlist *sg_req; 1277 struct hisi_sas_device *sas_dev = device->lldd_dev; 1278 dma_addr_t req_dma_addr; 1279 unsigned int req_len; 1280 1281 /* req */ 1282 sg_req = &task->smp_task.smp_req; 1283 req_len = sg_dma_len(sg_req); 1284 req_dma_addr = sg_dma_address(sg_req); 1285 1286 /* create header */ 1287 /* dw0 */ 1288 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | 1289 (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ 1290 (2 << CMD_HDR_CMD_OFF)); /* smp */ 1291 1292 /* map itct entry */ 1293 hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | 1294 (1 << CMD_HDR_FRAME_TYPE_OFF) | 1295 (DIR_NO_DATA << CMD_HDR_DIR_OFF)); 1296 1297 /* dw2 */ 1298 hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | 1299 (HISI_SAS_MAX_SMP_RESP_SZ / 4 << 1300 CMD_HDR_MRFL_OFF)); 1301 1302 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); 1303 1304 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); 1305 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1306 1307 } 1308 1309 static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, 1310 struct hisi_sas_slot *slot) 1311 { 1312 struct sas_task *task = slot->task; 1313 struct domain_device *device = task->dev; 1314 struct domain_device *parent_dev = device->parent; 1315 struct hisi_sas_device *sas_dev = device->lldd_dev; 1316 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1317 struct asd_sas_port *sas_port = device->port; 1318 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 1319 u8 *buf_cmd; 1320 int has_data = 0, hdr_tag = 0; 1321 u32 dw1 = 0, dw2 = 0; 1322 1323 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); 1324 if (parent_dev && dev_is_expander(parent_dev->dev_type)) 1325 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); 1326 else 1327 hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF); 1328 1329 switch (task->data_dir) { 1330 case DMA_TO_DEVICE: 1331 has_data = 1; 1332 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1333 break; 1334 case DMA_FROM_DEVICE: 1335 has_data = 1; 1336 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1337 break; 1338 default: 1339 dw1 &= ~CMD_HDR_DIR_MSK; 1340 } 1341 1342 if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && 1343 (task->ata_task.fis.control & ATA_SRST)) 1344 dw1 |= 1 << CMD_HDR_RESET_OFF; 1345 1346 dw1 |= (hisi_sas_get_ata_protocol( 1347 &task->ata_task.fis, task->data_dir)) 1348 << CMD_HDR_FRAME_TYPE_OFF; 1349 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1350 1351 if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis)) 1352 dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; 1353 1354 hdr->dw1 = cpu_to_le32(dw1); 1355 1356 /* dw2 */ 1357 if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) { 1358 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 1359 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; 1360 } 1361 1362 dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | 1363 2 << CMD_HDR_SG_MOD_OFF; 1364 hdr->dw2 = cpu_to_le32(dw2); 1365 1366 /* dw3 */ 1367 hdr->transfer_tags = cpu_to_le32(slot->idx); 1368 1369 if (has_data) 1370 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, 1371 slot->n_elem); 1372 1373 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1374 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1375 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1376 1377 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot); 1378 1379 if (likely(!task->ata_task.device_control_reg_update)) 1380 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 1381 /* fill in command FIS */ 1382 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 1383 } 1384 1385 static void prep_abort_v3_hw(struct hisi_hba *hisi_hba, 1386 struct hisi_sas_slot *slot, 1387 int device_id, int abort_flag, int tag_to_abort) 1388 { 1389 struct sas_task *task = slot->task; 1390 struct domain_device *dev = task->dev; 1391 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1392 struct hisi_sas_port *port = slot->port; 1393 1394 /* dw0 */ 1395 hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /*abort*/ 1396 (port->id << CMD_HDR_PORT_OFF) | 1397 (dev_is_sata(dev) 1398 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | 1399 (abort_flag 1400 << CMD_HDR_ABORT_FLAG_OFF)); 1401 1402 /* dw1 */ 1403 hdr->dw1 = cpu_to_le32(device_id 1404 << CMD_HDR_DEV_ID_OFF); 1405 1406 /* dw7 */ 1407 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF); 1408 hdr->transfer_tags = cpu_to_le32(slot->idx); 1409 1410 } 1411 1412 static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1413 { 1414 int i; 1415 irqreturn_t res; 1416 u32 context, port_id, link_rate; 1417 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1418 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1419 struct device *dev = hisi_hba->dev; 1420 unsigned long flags; 1421 1422 del_timer(&phy->timer); 1423 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); 1424 1425 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 1426 port_id = (port_id >> (4 * phy_no)) & 0xf; 1427 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 1428 link_rate = (link_rate >> (phy_no * 4)) & 0xf; 1429 1430 if (port_id == 0xf) { 1431 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); 1432 res = IRQ_NONE; 1433 goto end; 1434 } 1435 sas_phy->linkrate = link_rate; 1436 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 1437 1438 /* Check for SATA dev */ 1439 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); 1440 if (context & (1 << phy_no)) { 1441 struct hisi_sas_initial_fis *initial_fis; 1442 struct dev_to_host_fis *fis; 1443 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 1444 struct Scsi_Host *shost = hisi_hba->shost; 1445 1446 dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate); 1447 initial_fis = &hisi_hba->initial_fis[phy_no]; 1448 fis = &initial_fis->fis; 1449 1450 /* check ERR bit of Status Register */ 1451 if (fis->status & ATA_ERR) { 1452 dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", 1453 phy_no, fis->status); 1454 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1455 res = IRQ_NONE; 1456 goto end; 1457 } 1458 1459 sas_phy->oob_mode = SATA_OOB_MODE; 1460 attached_sas_addr[0] = 0x50; 1461 attached_sas_addr[6] = shost->host_no; 1462 attached_sas_addr[7] = phy_no; 1463 memcpy(sas_phy->attached_sas_addr, 1464 attached_sas_addr, 1465 SAS_ADDR_SIZE); 1466 memcpy(sas_phy->frame_rcvd, fis, 1467 sizeof(struct dev_to_host_fis)); 1468 phy->phy_type |= PORT_TYPE_SATA; 1469 phy->identify.device_type = SAS_SATA_DEV; 1470 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 1471 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 1472 } else { 1473 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; 1474 struct sas_identify_frame *id = 1475 (struct sas_identify_frame *)frame_rcvd; 1476 1477 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); 1478 for (i = 0; i < 6; i++) { 1479 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, 1480 RX_IDAF_DWORD0 + (i * 4)); 1481 frame_rcvd[i] = __swab32(idaf); 1482 } 1483 sas_phy->oob_mode = SAS_OOB_MODE; 1484 memcpy(sas_phy->attached_sas_addr, 1485 &id->sas_addr, 1486 SAS_ADDR_SIZE); 1487 phy->phy_type |= PORT_TYPE_SAS; 1488 phy->identify.device_type = id->dev_type; 1489 phy->frame_rcvd_size = sizeof(struct sas_identify_frame); 1490 if (phy->identify.device_type == SAS_END_DEVICE) 1491 phy->identify.target_port_protocols = 1492 SAS_PROTOCOL_SSP; 1493 else if (phy->identify.device_type != SAS_PHY_UNUSED) 1494 phy->identify.target_port_protocols = 1495 SAS_PROTOCOL_SMP; 1496 } 1497 1498 phy->port_id = port_id; 1499 phy->phy_attached = 1; 1500 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); 1501 res = IRQ_HANDLED; 1502 spin_lock_irqsave(&phy->lock, flags); 1503 if (phy->reset_completion) { 1504 phy->in_reset = 0; 1505 complete(phy->reset_completion); 1506 } 1507 spin_unlock_irqrestore(&phy->lock, flags); 1508 end: 1509 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1510 CHL_INT0_SL_PHY_ENABLE_MSK); 1511 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); 1512 1513 return res; 1514 } 1515 1516 static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1517 { 1518 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1519 u32 phy_state, sl_ctrl, txid_auto; 1520 struct device *dev = hisi_hba->dev; 1521 1522 del_timer(&phy->timer); 1523 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 1524 1525 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1526 dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state); 1527 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); 1528 1529 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1530 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, 1531 sl_ctrl&(~SL_CTA_MSK)); 1532 1533 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 1534 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 1535 txid_auto | CT3_MSK); 1536 1537 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); 1538 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); 1539 1540 return IRQ_HANDLED; 1541 } 1542 1543 static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1544 { 1545 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1546 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1547 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1548 u32 bcast_status; 1549 1550 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); 1551 bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); 1552 if ((bcast_status & RX_BCAST_CHG_MSK) && 1553 !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1554 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); 1555 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1556 CHL_INT0_SL_RX_BCST_ACK_MSK); 1557 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); 1558 1559 return IRQ_HANDLED; 1560 } 1561 1562 static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) 1563 { 1564 struct hisi_hba *hisi_hba = p; 1565 u32 irq_msk; 1566 int phy_no = 0; 1567 irqreturn_t res = IRQ_NONE; 1568 1569 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) 1570 & 0x11111111; 1571 while (irq_msk) { 1572 if (irq_msk & 1) { 1573 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, 1574 CHL_INT0); 1575 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1576 int rdy = phy_state & (1 << phy_no); 1577 1578 if (rdy) { 1579 if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK) 1580 /* phy up */ 1581 if (phy_up_v3_hw(phy_no, hisi_hba) 1582 == IRQ_HANDLED) 1583 res = IRQ_HANDLED; 1584 if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK) 1585 /* phy bcast */ 1586 if (phy_bcast_v3_hw(phy_no, hisi_hba) 1587 == IRQ_HANDLED) 1588 res = IRQ_HANDLED; 1589 } else { 1590 if (irq_value & CHL_INT0_NOT_RDY_MSK) 1591 /* phy down */ 1592 if (phy_down_v3_hw(phy_no, hisi_hba) 1593 == IRQ_HANDLED) 1594 res = IRQ_HANDLED; 1595 } 1596 } 1597 irq_msk >>= 4; 1598 phy_no++; 1599 } 1600 1601 return res; 1602 } 1603 1604 static const struct hisi_sas_hw_error port_axi_error[] = { 1605 { 1606 .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF), 1607 .msg = "dmac_tx_ecc_bad_err", 1608 }, 1609 { 1610 .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF), 1611 .msg = "dmac_rx_ecc_bad_err", 1612 }, 1613 { 1614 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF), 1615 .msg = "dma_tx_axi_wr_err", 1616 }, 1617 { 1618 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF), 1619 .msg = "dma_tx_axi_rd_err", 1620 }, 1621 { 1622 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF), 1623 .msg = "dma_rx_axi_wr_err", 1624 }, 1625 { 1626 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF), 1627 .msg = "dma_rx_axi_rd_err", 1628 }, 1629 { 1630 .irq_msk = BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF), 1631 .msg = "dma_tx_fifo_err", 1632 }, 1633 { 1634 .irq_msk = BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF), 1635 .msg = "dma_rx_fifo_err", 1636 }, 1637 { 1638 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF), 1639 .msg = "dma_tx_axi_ruser_err", 1640 }, 1641 { 1642 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF), 1643 .msg = "dma_rx_axi_ruser_err", 1644 }, 1645 }; 1646 1647 static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1648 { 1649 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1); 1650 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1_MSK); 1651 struct device *dev = hisi_hba->dev; 1652 int i; 1653 1654 irq_value &= ~irq_msk; 1655 if (!irq_value) 1656 return; 1657 1658 for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) { 1659 const struct hisi_sas_hw_error *error = &port_axi_error[i]; 1660 1661 if (!(irq_value & error->irq_msk)) 1662 continue; 1663 1664 dev_err(dev, "%s error (phy%d 0x%x) found!\n", 1665 error->msg, phy_no, irq_value); 1666 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1667 } 1668 1669 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value); 1670 } 1671 1672 static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1673 { 1674 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1675 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1676 struct sas_phy *sphy = sas_phy->phy; 1677 unsigned long flags; 1678 u32 reg_value; 1679 1680 spin_lock_irqsave(&phy->lock, flags); 1681 1682 /* loss dword sync */ 1683 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST); 1684 sphy->loss_of_dword_sync_count += reg_value; 1685 1686 /* phy reset problem */ 1687 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB); 1688 sphy->phy_reset_problem_count += reg_value; 1689 1690 /* invalid dword */ 1691 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); 1692 sphy->invalid_dword_count += reg_value; 1693 1694 /* disparity err */ 1695 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); 1696 sphy->running_disparity_error_count += reg_value; 1697 1698 /* code violation error */ 1699 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR); 1700 phy->code_violation_err_count += reg_value; 1701 1702 spin_unlock_irqrestore(&phy->lock, flags); 1703 } 1704 1705 static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1706 { 1707 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); 1708 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); 1709 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1710 struct pci_dev *pci_dev = hisi_hba->pci_dev; 1711 struct device *dev = hisi_hba->dev; 1712 static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) | 1713 BIT(CHL_INT2_RX_CODE_ERR_OFF) | 1714 BIT(CHL_INT2_RX_INVLD_DW_OFF); 1715 1716 irq_value &= ~irq_msk; 1717 if (!irq_value) 1718 return; 1719 1720 if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) { 1721 dev_warn(dev, "phy%d identify timeout\n", phy_no); 1722 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1723 } 1724 1725 if (irq_value & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) { 1726 u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, 1727 STP_LINK_TIMEOUT_STATE); 1728 1729 dev_warn(dev, "phy%d stp link timeout (0x%x)\n", 1730 phy_no, reg_value); 1731 if (reg_value & BIT(4)) 1732 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1733 } 1734 1735 if (pci_dev->revision > 0x20 && (irq_value & msk)) { 1736 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1737 struct sas_phy *sphy = sas_phy->phy; 1738 1739 phy_get_events_v3_hw(hisi_hba, phy_no); 1740 1741 if (irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) 1742 dev_info(dev, "phy%d invalid dword cnt: %u\n", phy_no, 1743 sphy->invalid_dword_count); 1744 1745 if (irq_value & BIT(CHL_INT2_RX_CODE_ERR_OFF)) 1746 dev_info(dev, "phy%d code violation cnt: %u\n", phy_no, 1747 phy->code_violation_err_count); 1748 1749 if (irq_value & BIT(CHL_INT2_RX_DISP_ERR_OFF)) 1750 dev_info(dev, "phy%d disparity error cnt: %u\n", phy_no, 1751 sphy->running_disparity_error_count); 1752 } 1753 1754 if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) && 1755 (pci_dev->revision == 0x20)) { 1756 u32 reg_value; 1757 int rc; 1758 1759 rc = hisi_sas_read32_poll_timeout_atomic( 1760 HILINK_ERR_DFX, reg_value, 1761 !((reg_value >> 8) & BIT(phy_no)), 1762 1000, 10000); 1763 if (rc) 1764 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1765 } 1766 1767 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value); 1768 } 1769 1770 static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1771 { 1772 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); 1773 1774 if (irq_value0 & CHL_INT0_PHY_RDY_MSK) 1775 hisi_sas_phy_oob_ready(hisi_hba, phy_no); 1776 1777 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1778 irq_value0 & (~CHL_INT0_SL_RX_BCST_ACK_MSK) 1779 & (~CHL_INT0_SL_PHY_ENABLE_MSK) 1780 & (~CHL_INT0_NOT_RDY_MSK)); 1781 } 1782 1783 static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) 1784 { 1785 struct hisi_hba *hisi_hba = p; 1786 u32 irq_msk; 1787 int phy_no = 0; 1788 1789 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) 1790 & 0xeeeeeeee; 1791 1792 while (irq_msk) { 1793 if (irq_msk & (2 << (phy_no * 4))) 1794 handle_chl_int0_v3_hw(hisi_hba, phy_no); 1795 1796 if (irq_msk & (4 << (phy_no * 4))) 1797 handle_chl_int1_v3_hw(hisi_hba, phy_no); 1798 1799 if (irq_msk & (8 << (phy_no * 4))) 1800 handle_chl_int2_v3_hw(hisi_hba, phy_no); 1801 1802 irq_msk &= ~(0xe << (phy_no * 4)); 1803 phy_no++; 1804 } 1805 1806 return IRQ_HANDLED; 1807 } 1808 1809 static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = { 1810 { 1811 .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF), 1812 .msk = HGC_DQE_ECC_MB_ADDR_MSK, 1813 .shift = HGC_DQE_ECC_MB_ADDR_OFF, 1814 .msg = "hgc_dqe_eccbad_intr", 1815 .reg = HGC_DQE_ECC_ADDR, 1816 }, 1817 { 1818 .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF), 1819 .msk = HGC_IOST_ECC_MB_ADDR_MSK, 1820 .shift = HGC_IOST_ECC_MB_ADDR_OFF, 1821 .msg = "hgc_iost_eccbad_intr", 1822 .reg = HGC_IOST_ECC_ADDR, 1823 }, 1824 { 1825 .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF), 1826 .msk = HGC_ITCT_ECC_MB_ADDR_MSK, 1827 .shift = HGC_ITCT_ECC_MB_ADDR_OFF, 1828 .msg = "hgc_itct_eccbad_intr", 1829 .reg = HGC_ITCT_ECC_ADDR, 1830 }, 1831 { 1832 .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF), 1833 .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, 1834 .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, 1835 .msg = "hgc_iostl_eccbad_intr", 1836 .reg = HGC_LM_DFX_STATUS2, 1837 }, 1838 { 1839 .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF), 1840 .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, 1841 .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, 1842 .msg = "hgc_itctl_eccbad_intr", 1843 .reg = HGC_LM_DFX_STATUS2, 1844 }, 1845 { 1846 .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF), 1847 .msk = HGC_CQE_ECC_MB_ADDR_MSK, 1848 .shift = HGC_CQE_ECC_MB_ADDR_OFF, 1849 .msg = "hgc_cqe_eccbad_intr", 1850 .reg = HGC_CQE_ECC_ADDR, 1851 }, 1852 { 1853 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF), 1854 .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, 1855 .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, 1856 .msg = "rxm_mem0_eccbad_intr", 1857 .reg = HGC_RXM_DFX_STATUS14, 1858 }, 1859 { 1860 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF), 1861 .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, 1862 .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, 1863 .msg = "rxm_mem1_eccbad_intr", 1864 .reg = HGC_RXM_DFX_STATUS14, 1865 }, 1866 { 1867 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF), 1868 .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, 1869 .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, 1870 .msg = "rxm_mem2_eccbad_intr", 1871 .reg = HGC_RXM_DFX_STATUS14, 1872 }, 1873 { 1874 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF), 1875 .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, 1876 .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, 1877 .msg = "rxm_mem3_eccbad_intr", 1878 .reg = HGC_RXM_DFX_STATUS15, 1879 }, 1880 { 1881 .irq_msk = BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF), 1882 .msk = AM_ROB_ECC_ERR_ADDR_MSK, 1883 .shift = AM_ROB_ECC_ERR_ADDR_OFF, 1884 .msg = "ooo_ram_eccbad_intr", 1885 .reg = AM_ROB_ECC_ERR_ADDR, 1886 }, 1887 }; 1888 1889 static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba *hisi_hba, 1890 u32 irq_value) 1891 { 1892 struct device *dev = hisi_hba->dev; 1893 const struct hisi_sas_hw_error *ecc_error; 1894 u32 val; 1895 int i; 1896 1897 for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) { 1898 ecc_error = &multi_bit_ecc_errors[i]; 1899 if (irq_value & ecc_error->irq_msk) { 1900 val = hisi_sas_read32(hisi_hba, ecc_error->reg); 1901 val &= ecc_error->msk; 1902 val >>= ecc_error->shift; 1903 dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n", 1904 ecc_error->msg, irq_value, val); 1905 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1906 } 1907 } 1908 } 1909 1910 static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba) 1911 { 1912 u32 irq_value, irq_msk; 1913 1914 irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); 1915 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff); 1916 1917 irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); 1918 if (irq_value) 1919 multi_bit_ecc_error_process_v3_hw(hisi_hba, irq_value); 1920 1921 hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value); 1922 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk); 1923 } 1924 1925 static const struct hisi_sas_hw_error axi_error[] = { 1926 { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" }, 1927 { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" }, 1928 { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" }, 1929 { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" }, 1930 { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" }, 1931 { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" }, 1932 { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" }, 1933 { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" }, 1934 {} 1935 }; 1936 1937 static const struct hisi_sas_hw_error fifo_error[] = { 1938 { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" }, 1939 { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" }, 1940 { .msk = BIT(10), .msg = "GETDQE_FIFO" }, 1941 { .msk = BIT(11), .msg = "CMDP_FIFO" }, 1942 { .msk = BIT(12), .msg = "AWTCTRL_FIFO" }, 1943 {} 1944 }; 1945 1946 static const struct hisi_sas_hw_error fatal_axi_error[] = { 1947 { 1948 .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF), 1949 .msg = "write pointer and depth", 1950 }, 1951 { 1952 .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF), 1953 .msg = "iptt no match slot", 1954 }, 1955 { 1956 .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF), 1957 .msg = "read pointer and depth", 1958 }, 1959 { 1960 .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF), 1961 .reg = HGC_AXI_FIFO_ERR_INFO, 1962 .sub = axi_error, 1963 }, 1964 { 1965 .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF), 1966 .reg = HGC_AXI_FIFO_ERR_INFO, 1967 .sub = fifo_error, 1968 }, 1969 { 1970 .irq_msk = BIT(ENT_INT_SRC3_LM_OFF), 1971 .msg = "LM add/fetch list", 1972 }, 1973 { 1974 .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF), 1975 .msg = "SAS_HGC_ABT fetch LM list", 1976 }, 1977 { 1978 .irq_msk = BIT(ENT_INT_SRC3_DQE_POISON_OFF), 1979 .msg = "read dqe poison", 1980 }, 1981 { 1982 .irq_msk = BIT(ENT_INT_SRC3_IOST_POISON_OFF), 1983 .msg = "read iost poison", 1984 }, 1985 { 1986 .irq_msk = BIT(ENT_INT_SRC3_ITCT_POISON_OFF), 1987 .msg = "read itct poison", 1988 }, 1989 { 1990 .irq_msk = BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF), 1991 .msg = "read itct ncq poison", 1992 }, 1993 1994 }; 1995 1996 static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) 1997 { 1998 u32 irq_value, irq_msk; 1999 struct hisi_hba *hisi_hba = p; 2000 struct device *dev = hisi_hba->dev; 2001 struct pci_dev *pdev = hisi_hba->pci_dev; 2002 int i; 2003 2004 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 2005 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00); 2006 2007 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 2008 irq_value &= ~irq_msk; 2009 2010 for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) { 2011 const struct hisi_sas_hw_error *error = &fatal_axi_error[i]; 2012 2013 if (!(irq_value & error->irq_msk)) 2014 continue; 2015 2016 if (error->sub) { 2017 const struct hisi_sas_hw_error *sub = error->sub; 2018 u32 err_value = hisi_sas_read32(hisi_hba, error->reg); 2019 2020 for (; sub->msk || sub->msg; sub++) { 2021 if (!(err_value & sub->msk)) 2022 continue; 2023 2024 dev_err(dev, "%s error (0x%x) found!\n", 2025 sub->msg, irq_value); 2026 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2027 } 2028 } else { 2029 dev_err(dev, "%s error (0x%x) found!\n", 2030 error->msg, irq_value); 2031 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2032 } 2033 2034 if (pdev->revision < 0x21) { 2035 u32 reg_val; 2036 2037 reg_val = hisi_sas_read32(hisi_hba, 2038 AXI_MASTER_CFG_BASE + 2039 AM_CTRL_GLOBAL); 2040 reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; 2041 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 2042 AM_CTRL_GLOBAL, reg_val); 2043 } 2044 } 2045 2046 fatal_ecc_int_v3_hw(hisi_hba); 2047 2048 if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) { 2049 u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); 2050 u32 dev_id = reg_val & ITCT_DEV_MSK; 2051 struct hisi_sas_device *sas_dev = 2052 &hisi_hba->devices[dev_id]; 2053 2054 hisi_sas_write32(hisi_hba, ITCT_CLR, 0); 2055 dev_dbg(dev, "clear ITCT ok\n"); 2056 complete(sas_dev->completion); 2057 } 2058 2059 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00); 2060 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); 2061 2062 return IRQ_HANDLED; 2063 } 2064 2065 static void 2066 slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, 2067 struct hisi_sas_slot *slot) 2068 { 2069 struct task_status_struct *ts = &task->task_status; 2070 struct hisi_sas_complete_v3_hdr *complete_queue = 2071 hisi_hba->complete_hdr[slot->cmplt_queue]; 2072 struct hisi_sas_complete_v3_hdr *complete_hdr = 2073 &complete_queue[slot->cmplt_queue_slot]; 2074 struct hisi_sas_err_record_v3 *record = 2075 hisi_sas_status_buf_addr_mem(slot); 2076 u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type); 2077 u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type); 2078 u32 dw3 = le32_to_cpu(complete_hdr->dw3); 2079 2080 switch (task->task_proto) { 2081 case SAS_PROTOCOL_SSP: 2082 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { 2083 ts->residual = trans_tx_fail_type; 2084 ts->stat = SAS_DATA_UNDERRUN; 2085 } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { 2086 ts->stat = SAS_QUEUE_FULL; 2087 slot->abort = 1; 2088 } else { 2089 ts->stat = SAS_OPEN_REJECT; 2090 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2091 } 2092 break; 2093 case SAS_PROTOCOL_SATA: 2094 case SAS_PROTOCOL_STP: 2095 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2096 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { 2097 ts->residual = trans_tx_fail_type; 2098 ts->stat = SAS_DATA_UNDERRUN; 2099 } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { 2100 ts->stat = SAS_PHY_DOWN; 2101 slot->abort = 1; 2102 } else { 2103 ts->stat = SAS_OPEN_REJECT; 2104 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2105 } 2106 hisi_sas_sata_done(task, slot); 2107 break; 2108 case SAS_PROTOCOL_SMP: 2109 ts->stat = SAM_STAT_CHECK_CONDITION; 2110 break; 2111 default: 2112 break; 2113 } 2114 } 2115 2116 static int 2117 slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) 2118 { 2119 struct sas_task *task = slot->task; 2120 struct hisi_sas_device *sas_dev; 2121 struct device *dev = hisi_hba->dev; 2122 struct task_status_struct *ts; 2123 struct domain_device *device; 2124 struct sas_ha_struct *ha; 2125 enum exec_status sts; 2126 struct hisi_sas_complete_v3_hdr *complete_queue = 2127 hisi_hba->complete_hdr[slot->cmplt_queue]; 2128 struct hisi_sas_complete_v3_hdr *complete_hdr = 2129 &complete_queue[slot->cmplt_queue_slot]; 2130 unsigned long flags; 2131 bool is_internal = slot->is_internal; 2132 u32 dw0, dw1, dw3; 2133 2134 if (unlikely(!task || !task->lldd_task || !task->dev)) 2135 return -EINVAL; 2136 2137 ts = &task->task_status; 2138 device = task->dev; 2139 ha = device->port->ha; 2140 sas_dev = device->lldd_dev; 2141 2142 spin_lock_irqsave(&task->task_state_lock, flags); 2143 task->task_state_flags &= 2144 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 2145 spin_unlock_irqrestore(&task->task_state_lock, flags); 2146 2147 memset(ts, 0, sizeof(*ts)); 2148 ts->resp = SAS_TASK_COMPLETE; 2149 2150 if (unlikely(!sas_dev)) { 2151 dev_dbg(dev, "slot complete: port has not device\n"); 2152 ts->stat = SAS_PHY_DOWN; 2153 goto out; 2154 } 2155 2156 dw0 = le32_to_cpu(complete_hdr->dw0); 2157 dw1 = le32_to_cpu(complete_hdr->dw1); 2158 dw3 = le32_to_cpu(complete_hdr->dw3); 2159 2160 /* 2161 * Use SAS+TMF status codes 2162 */ 2163 switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) { 2164 case STAT_IO_ABORTED: 2165 /* this IO has been aborted by abort command */ 2166 ts->stat = SAS_ABORTED_TASK; 2167 goto out; 2168 case STAT_IO_COMPLETE: 2169 /* internal abort command complete */ 2170 ts->stat = TMF_RESP_FUNC_SUCC; 2171 goto out; 2172 case STAT_IO_NO_DEVICE: 2173 ts->stat = TMF_RESP_FUNC_COMPLETE; 2174 goto out; 2175 case STAT_IO_NOT_VALID: 2176 /* 2177 * abort single IO, the controller can't find the IO 2178 */ 2179 ts->stat = TMF_RESP_FUNC_FAILED; 2180 goto out; 2181 default: 2182 break; 2183 } 2184 2185 /* check for erroneous completion */ 2186 if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { 2187 u32 *error_info = hisi_sas_status_buf_addr_mem(slot); 2188 2189 slot_err_v3_hw(hisi_hba, task, slot); 2190 if (ts->stat != SAS_DATA_UNDERRUN) 2191 dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", 2192 slot->idx, task, sas_dev->device_id, 2193 dw0, dw1, complete_hdr->act, dw3, 2194 error_info[0], error_info[1], 2195 error_info[2], error_info[3]); 2196 if (unlikely(slot->abort)) 2197 return ts->stat; 2198 goto out; 2199 } 2200 2201 switch (task->task_proto) { 2202 case SAS_PROTOCOL_SSP: { 2203 struct ssp_response_iu *iu = 2204 hisi_sas_status_buf_addr_mem(slot) + 2205 sizeof(struct hisi_sas_err_record); 2206 2207 sas_ssp_task_response(dev, task, iu); 2208 break; 2209 } 2210 case SAS_PROTOCOL_SMP: { 2211 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 2212 void *to; 2213 2214 ts->stat = SAM_STAT_GOOD; 2215 to = kmap_atomic(sg_page(sg_resp)); 2216 2217 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1, 2218 DMA_FROM_DEVICE); 2219 dma_unmap_sg(dev, &task->smp_task.smp_req, 1, 2220 DMA_TO_DEVICE); 2221 memcpy(to + sg_resp->offset, 2222 hisi_sas_status_buf_addr_mem(slot) + 2223 sizeof(struct hisi_sas_err_record), 2224 sg_dma_len(sg_resp)); 2225 kunmap_atomic(to); 2226 break; 2227 } 2228 case SAS_PROTOCOL_SATA: 2229 case SAS_PROTOCOL_STP: 2230 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2231 ts->stat = SAM_STAT_GOOD; 2232 hisi_sas_sata_done(task, slot); 2233 break; 2234 default: 2235 ts->stat = SAM_STAT_CHECK_CONDITION; 2236 break; 2237 } 2238 2239 if (!slot->port->port_attached) { 2240 dev_warn(dev, "slot complete: port %d has removed\n", 2241 slot->port->sas_port.id); 2242 ts->stat = SAS_PHY_DOWN; 2243 } 2244 2245 out: 2246 sts = ts->stat; 2247 spin_lock_irqsave(&task->task_state_lock, flags); 2248 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 2249 spin_unlock_irqrestore(&task->task_state_lock, flags); 2250 dev_info(dev, "slot complete: task(%p) aborted\n", task); 2251 return SAS_ABORTED_TASK; 2252 } 2253 task->task_state_flags |= SAS_TASK_STATE_DONE; 2254 spin_unlock_irqrestore(&task->task_state_lock, flags); 2255 hisi_sas_slot_task_free(hisi_hba, task, slot); 2256 2257 if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { 2258 spin_lock_irqsave(&device->done_lock, flags); 2259 if (test_bit(SAS_HA_FROZEN, &ha->state)) { 2260 spin_unlock_irqrestore(&device->done_lock, flags); 2261 dev_info(dev, "slot complete: task(%p) ignored\n ", 2262 task); 2263 return sts; 2264 } 2265 spin_unlock_irqrestore(&device->done_lock, flags); 2266 } 2267 2268 if (task->task_done) 2269 task->task_done(task); 2270 2271 return sts; 2272 } 2273 2274 static void cq_tasklet_v3_hw(unsigned long val) 2275 { 2276 struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; 2277 struct hisi_hba *hisi_hba = cq->hisi_hba; 2278 struct hisi_sas_slot *slot; 2279 struct hisi_sas_complete_v3_hdr *complete_queue; 2280 u32 rd_point = cq->rd_point, wr_point; 2281 int queue = cq->id; 2282 2283 complete_queue = hisi_hba->complete_hdr[queue]; 2284 2285 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + 2286 (0x14 * queue)); 2287 2288 while (rd_point != wr_point) { 2289 struct hisi_sas_complete_v3_hdr *complete_hdr; 2290 struct device *dev = hisi_hba->dev; 2291 u32 dw1; 2292 int iptt; 2293 2294 complete_hdr = &complete_queue[rd_point]; 2295 dw1 = le32_to_cpu(complete_hdr->dw1); 2296 2297 iptt = dw1 & CMPLT_HDR_IPTT_MSK; 2298 if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { 2299 slot = &hisi_hba->slot_info[iptt]; 2300 slot->cmplt_queue_slot = rd_point; 2301 slot->cmplt_queue = queue; 2302 slot_complete_v3_hw(hisi_hba, slot); 2303 } else 2304 dev_err(dev, "IPTT %d is invalid, discard it.\n", iptt); 2305 2306 if (++rd_point >= HISI_SAS_QUEUE_SLOTS) 2307 rd_point = 0; 2308 } 2309 2310 /* update rd_point */ 2311 cq->rd_point = rd_point; 2312 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 2313 } 2314 2315 static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) 2316 { 2317 struct hisi_sas_cq *cq = p; 2318 struct hisi_hba *hisi_hba = cq->hisi_hba; 2319 int queue = cq->id; 2320 2321 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); 2322 2323 tasklet_schedule(&cq->tasklet); 2324 2325 return IRQ_HANDLED; 2326 } 2327 2328 static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs) 2329 { 2330 const struct cpumask *mask; 2331 int queue, cpu; 2332 2333 for (queue = 0; queue < nvecs; queue++) { 2334 struct hisi_sas_cq *cq = &hisi_hba->cq[queue]; 2335 2336 mask = pci_irq_get_affinity(hisi_hba->pci_dev, queue + 2337 BASE_VECTORS_V3_HW); 2338 if (!mask) 2339 goto fallback; 2340 cq->pci_irq_mask = mask; 2341 for_each_cpu(cpu, mask) 2342 hisi_hba->reply_map[cpu] = queue; 2343 } 2344 return; 2345 2346 fallback: 2347 for_each_possible_cpu(cpu) 2348 hisi_hba->reply_map[cpu] = cpu % hisi_hba->queue_count; 2349 /* Don't clean all CQ masks */ 2350 } 2351 2352 static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) 2353 { 2354 struct device *dev = hisi_hba->dev; 2355 struct pci_dev *pdev = hisi_hba->pci_dev; 2356 int vectors, rc; 2357 int i, k; 2358 int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi; 2359 2360 if (auto_affine_msi_experimental) { 2361 struct irq_affinity desc = { 2362 .pre_vectors = BASE_VECTORS_V3_HW, 2363 }; 2364 2365 min_msi = MIN_AFFINE_VECTORS_V3_HW; 2366 2367 hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids, 2368 sizeof(unsigned int), 2369 GFP_KERNEL); 2370 if (!hisi_hba->reply_map) 2371 return -ENOMEM; 2372 vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev, 2373 min_msi, max_msi, 2374 PCI_IRQ_MSI | 2375 PCI_IRQ_AFFINITY, 2376 &desc); 2377 if (vectors < 0) 2378 return -ENOENT; 2379 setup_reply_map_v3_hw(hisi_hba, vectors - BASE_VECTORS_V3_HW); 2380 } else { 2381 min_msi = max_msi; 2382 vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, min_msi, 2383 max_msi, PCI_IRQ_MSI); 2384 if (vectors < 0) 2385 return vectors; 2386 } 2387 2388 hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW; 2389 2390 rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), 2391 int_phy_up_down_bcast_v3_hw, 0, 2392 DRV_NAME " phy", hisi_hba); 2393 if (rc) { 2394 dev_err(dev, "could not request phy interrupt, rc=%d\n", rc); 2395 rc = -ENOENT; 2396 goto free_irq_vectors; 2397 } 2398 2399 rc = devm_request_irq(dev, pci_irq_vector(pdev, 2), 2400 int_chnl_int_v3_hw, 0, 2401 DRV_NAME " channel", hisi_hba); 2402 if (rc) { 2403 dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc); 2404 rc = -ENOENT; 2405 goto free_phy_irq; 2406 } 2407 2408 rc = devm_request_irq(dev, pci_irq_vector(pdev, 11), 2409 fatal_axi_int_v3_hw, 0, 2410 DRV_NAME " fatal", hisi_hba); 2411 if (rc) { 2412 dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc); 2413 rc = -ENOENT; 2414 goto free_chnl_interrupt; 2415 } 2416 2417 /* Init tasklets for cq only */ 2418 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2419 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2420 struct tasklet_struct *t = &cq->tasklet; 2421 int nr = hisi_sas_intr_conv ? 16 : 16 + i; 2422 unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 0; 2423 2424 rc = devm_request_irq(dev, pci_irq_vector(pdev, nr), 2425 cq_interrupt_v3_hw, irqflags, 2426 DRV_NAME " cq", cq); 2427 if (rc) { 2428 dev_err(dev, "could not request cq%d interrupt, rc=%d\n", 2429 i, rc); 2430 rc = -ENOENT; 2431 goto free_cq_irqs; 2432 } 2433 2434 tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq); 2435 } 2436 2437 return 0; 2438 2439 free_cq_irqs: 2440 for (k = 0; k < i; k++) { 2441 struct hisi_sas_cq *cq = &hisi_hba->cq[k]; 2442 int nr = hisi_sas_intr_conv ? 16 : 16 + k; 2443 2444 free_irq(pci_irq_vector(pdev, nr), cq); 2445 } 2446 free_irq(pci_irq_vector(pdev, 11), hisi_hba); 2447 free_chnl_interrupt: 2448 free_irq(pci_irq_vector(pdev, 2), hisi_hba); 2449 free_phy_irq: 2450 free_irq(pci_irq_vector(pdev, 1), hisi_hba); 2451 free_irq_vectors: 2452 pci_free_irq_vectors(pdev); 2453 return rc; 2454 } 2455 2456 static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) 2457 { 2458 int rc; 2459 2460 rc = hw_init_v3_hw(hisi_hba); 2461 if (rc) 2462 return rc; 2463 2464 rc = interrupt_init_v3_hw(hisi_hba); 2465 if (rc) 2466 return rc; 2467 2468 return 0; 2469 } 2470 2471 static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no, 2472 struct sas_phy_linkrates *r) 2473 { 2474 enum sas_linkrate max = r->maximum_linkrate; 2475 u32 prog_phy_link_rate = 0x800; 2476 2477 prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); 2478 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 2479 prog_phy_link_rate); 2480 } 2481 2482 static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) 2483 { 2484 struct pci_dev *pdev = hisi_hba->pci_dev; 2485 int i; 2486 2487 synchronize_irq(pci_irq_vector(pdev, 1)); 2488 synchronize_irq(pci_irq_vector(pdev, 2)); 2489 synchronize_irq(pci_irq_vector(pdev, 11)); 2490 for (i = 0; i < hisi_hba->queue_count; i++) { 2491 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); 2492 synchronize_irq(pci_irq_vector(pdev, i + 16)); 2493 } 2494 2495 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); 2496 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); 2497 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); 2498 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); 2499 2500 for (i = 0; i < hisi_hba->n_phy; i++) { 2501 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 2502 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); 2503 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1); 2504 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1); 2505 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1); 2506 } 2507 } 2508 2509 static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba) 2510 { 2511 return hisi_sas_read32(hisi_hba, PHY_STATE); 2512 } 2513 2514 static int disable_host_v3_hw(struct hisi_hba *hisi_hba) 2515 { 2516 struct device *dev = hisi_hba->dev; 2517 u32 status, reg_val; 2518 int rc; 2519 2520 interrupt_disable_v3_hw(hisi_hba); 2521 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); 2522 hisi_sas_kill_tasklets(hisi_hba); 2523 2524 hisi_sas_stop_phys(hisi_hba); 2525 2526 mdelay(10); 2527 2528 reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + 2529 AM_CTRL_GLOBAL); 2530 reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; 2531 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 2532 AM_CTRL_GLOBAL, reg_val); 2533 2534 /* wait until bus idle */ 2535 rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE + 2536 AM_CURR_TRANS_RETURN, status, 2537 status == 0x3, 10, 100); 2538 if (rc) { 2539 dev_err(dev, "axi bus is not idle, rc=%d\n", rc); 2540 return rc; 2541 } 2542 2543 return 0; 2544 } 2545 2546 static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) 2547 { 2548 struct device *dev = hisi_hba->dev; 2549 int rc; 2550 2551 rc = disable_host_v3_hw(hisi_hba); 2552 if (rc) { 2553 dev_err(dev, "soft reset: disable host failed rc=%d\n", rc); 2554 return rc; 2555 } 2556 2557 hisi_sas_init_mem(hisi_hba); 2558 2559 return hw_init_v3_hw(hisi_hba); 2560 } 2561 2562 static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type, 2563 u8 reg_index, u8 reg_count, u8 *write_data) 2564 { 2565 struct device *dev = hisi_hba->dev; 2566 u32 *data = (u32 *)write_data; 2567 int i; 2568 2569 switch (reg_type) { 2570 case SAS_GPIO_REG_TX: 2571 if ((reg_index + reg_count) > ((hisi_hba->n_phy + 3) / 4)) { 2572 dev_err(dev, "write gpio: invalid reg range[%d, %d]\n", 2573 reg_index, reg_index + reg_count - 1); 2574 return -EINVAL; 2575 } 2576 2577 for (i = 0; i < reg_count; i++) 2578 hisi_sas_write32(hisi_hba, 2579 SAS_GPIO_TX_0_1 + (reg_index + i) * 4, 2580 data[i]); 2581 break; 2582 default: 2583 dev_err(dev, "write gpio: unsupported or bad reg type %d\n", 2584 reg_type); 2585 return -EINVAL; 2586 } 2587 2588 return 0; 2589 } 2590 2591 static int wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, 2592 int delay_ms, int timeout_ms) 2593 { 2594 struct device *dev = hisi_hba->dev; 2595 int entries, entries_old = 0, time; 2596 2597 for (time = 0; time < timeout_ms; time += delay_ms) { 2598 entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT); 2599 if (entries == entries_old) 2600 break; 2601 2602 entries_old = entries; 2603 msleep(delay_ms); 2604 } 2605 2606 if (time >= timeout_ms) 2607 return -ETIMEDOUT; 2608 2609 dev_dbg(dev, "wait commands complete %dms\n", time); 2610 2611 return 0; 2612 } 2613 2614 static ssize_t intr_conv_v3_hw_show(struct device *dev, 2615 struct device_attribute *attr, char *buf) 2616 { 2617 return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv); 2618 } 2619 static DEVICE_ATTR_RO(intr_conv_v3_hw); 2620 2621 static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba) 2622 { 2623 /* config those registers between enable and disable PHYs */ 2624 hisi_sas_stop_phys(hisi_hba); 2625 2626 if (hisi_hba->intr_coal_ticks == 0 || 2627 hisi_hba->intr_coal_count == 0) { 2628 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); 2629 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); 2630 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); 2631 } else { 2632 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); 2633 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 2634 hisi_hba->intr_coal_ticks); 2635 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 2636 hisi_hba->intr_coal_count); 2637 } 2638 phys_init_v3_hw(hisi_hba); 2639 } 2640 2641 static ssize_t intr_coal_ticks_v3_hw_show(struct device *dev, 2642 struct device_attribute *attr, 2643 char *buf) 2644 { 2645 struct Scsi_Host *shost = class_to_shost(dev); 2646 struct hisi_hba *hisi_hba = shost_priv(shost); 2647 2648 return scnprintf(buf, PAGE_SIZE, "%u\n", 2649 hisi_hba->intr_coal_ticks); 2650 } 2651 2652 static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev, 2653 struct device_attribute *attr, 2654 const char *buf, size_t count) 2655 { 2656 struct Scsi_Host *shost = class_to_shost(dev); 2657 struct hisi_hba *hisi_hba = shost_priv(shost); 2658 u32 intr_coal_ticks; 2659 int ret; 2660 2661 ret = kstrtou32(buf, 10, &intr_coal_ticks); 2662 if (ret) { 2663 dev_err(dev, "Input data of interrupt coalesce unmatch\n"); 2664 return -EINVAL; 2665 } 2666 2667 if (intr_coal_ticks >= BIT(24)) { 2668 dev_err(dev, "intr_coal_ticks must be less than 2^24!\n"); 2669 return -EINVAL; 2670 } 2671 2672 hisi_hba->intr_coal_ticks = intr_coal_ticks; 2673 2674 config_intr_coal_v3_hw(hisi_hba); 2675 2676 return count; 2677 } 2678 static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw); 2679 2680 static ssize_t intr_coal_count_v3_hw_show(struct device *dev, 2681 struct device_attribute 2682 *attr, char *buf) 2683 { 2684 struct Scsi_Host *shost = class_to_shost(dev); 2685 struct hisi_hba *hisi_hba = shost_priv(shost); 2686 2687 return scnprintf(buf, PAGE_SIZE, "%u\n", 2688 hisi_hba->intr_coal_count); 2689 } 2690 2691 static ssize_t intr_coal_count_v3_hw_store(struct device *dev, 2692 struct device_attribute 2693 *attr, const char *buf, size_t count) 2694 { 2695 struct Scsi_Host *shost = class_to_shost(dev); 2696 struct hisi_hba *hisi_hba = shost_priv(shost); 2697 u32 intr_coal_count; 2698 int ret; 2699 2700 ret = kstrtou32(buf, 10, &intr_coal_count); 2701 if (ret) { 2702 dev_err(dev, "Input data of interrupt coalesce unmatch\n"); 2703 return -EINVAL; 2704 } 2705 2706 if (intr_coal_count >= BIT(8)) { 2707 dev_err(dev, "intr_coal_count must be less than 2^8!\n"); 2708 return -EINVAL; 2709 } 2710 2711 hisi_hba->intr_coal_count = intr_coal_count; 2712 2713 config_intr_coal_v3_hw(hisi_hba); 2714 2715 return count; 2716 } 2717 static DEVICE_ATTR_RW(intr_coal_count_v3_hw); 2718 2719 static struct device_attribute *host_attrs_v3_hw[] = { 2720 &dev_attr_phy_event_threshold, 2721 &dev_attr_intr_conv_v3_hw, 2722 &dev_attr_intr_coal_ticks_v3_hw, 2723 &dev_attr_intr_coal_count_v3_hw, 2724 NULL 2725 }; 2726 2727 static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = { 2728 HISI_SAS_DEBUGFS_REG(PHY_CFG), 2729 HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE), 2730 HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE), 2731 HISI_SAS_DEBUGFS_REG(PHY_CTRL), 2732 HISI_SAS_DEBUGFS_REG(SL_CFG), 2733 HISI_SAS_DEBUGFS_REG(AIP_LIMIT), 2734 HISI_SAS_DEBUGFS_REG(SL_CONTROL), 2735 HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS), 2736 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0), 2737 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1), 2738 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2), 2739 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3), 2740 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4), 2741 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5), 2742 HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6), 2743 HISI_SAS_DEBUGFS_REG(TXID_AUTO), 2744 HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0), 2745 HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H), 2746 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER), 2747 HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE), 2748 HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER), 2749 HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG), 2750 HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG), 2751 HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG), 2752 HISI_SAS_DEBUGFS_REG(CHL_INT0), 2753 HISI_SAS_DEBUGFS_REG(CHL_INT1), 2754 HISI_SAS_DEBUGFS_REG(CHL_INT2), 2755 HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK), 2756 HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK), 2757 HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK), 2758 HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME), 2759 HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN), 2760 HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER), 2761 HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK), 2762 HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK), 2763 HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK), 2764 HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK), 2765 HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK), 2766 HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK), 2767 HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS), 2768 HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS), 2769 HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME), 2770 HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST), 2771 HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB), 2772 HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW), 2773 HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR), 2774 HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR), 2775 {} 2776 }; 2777 2778 static const struct hisi_sas_debugfs_reg debugfs_port_reg = { 2779 .lu = debugfs_port_reg_lu, 2780 .count = 0x100, 2781 .base_off = PORT_BASE, 2782 .read_port_reg = hisi_sas_phy_read32, 2783 }; 2784 2785 static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = { 2786 HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE), 2787 HISI_SAS_DEBUGFS_REG(PHY_CONTEXT), 2788 HISI_SAS_DEBUGFS_REG(PHY_STATE), 2789 HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA), 2790 HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE), 2791 HISI_SAS_DEBUGFS_REG(ITCT_CLR), 2792 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO), 2793 HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI), 2794 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO), 2795 HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI), 2796 HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG), 2797 HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL), 2798 HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL), 2799 HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME), 2800 HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE), 2801 HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME), 2802 HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME), 2803 HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME), 2804 HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME), 2805 HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME), 2806 HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN), 2807 HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME), 2808 HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2), 2809 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT), 2810 HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE), 2811 HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS), 2812 HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS), 2813 HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO), 2814 HISI_SAS_DEBUGFS_REG(INT_COAL_EN), 2815 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME), 2816 HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT), 2817 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME), 2818 HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT), 2819 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC), 2820 HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK), 2821 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1), 2822 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2), 2823 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3), 2824 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1), 2825 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2), 2826 HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3), 2827 HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK), 2828 HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK), 2829 HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK), 2830 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR), 2831 HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK), 2832 HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN), 2833 HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT), 2834 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH), 2835 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR), 2836 HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR), 2837 HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG), 2838 HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK), 2839 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH), 2840 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR), 2841 HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR), 2842 HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG), 2843 HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG), 2844 HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX), 2845 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0), 2846 HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1), 2847 HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1), 2848 HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD), 2849 {} 2850 }; 2851 2852 static const struct hisi_sas_debugfs_reg debugfs_global_reg = { 2853 .lu = debugfs_global_reg_lu, 2854 .count = 0x800, 2855 .read_global_reg = hisi_sas_read32, 2856 }; 2857 2858 static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) 2859 { 2860 struct device *dev = hisi_hba->dev; 2861 2862 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 2863 2864 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 2865 2866 if (wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000) == -ETIMEDOUT) 2867 dev_dbg(dev, "Wait commands complete timeout!\n"); 2868 2869 hisi_sas_kill_tasklets(hisi_hba); 2870 } 2871 2872 static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) 2873 { 2874 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 2875 (u32)((1ULL << hisi_hba->queue_count) - 1)); 2876 2877 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 2878 } 2879 2880 static struct scsi_host_template sht_v3_hw = { 2881 .name = DRV_NAME, 2882 .module = THIS_MODULE, 2883 .queuecommand = sas_queuecommand, 2884 .target_alloc = sas_target_alloc, 2885 .slave_configure = hisi_sas_slave_configure, 2886 .scan_finished = hisi_sas_scan_finished, 2887 .scan_start = hisi_sas_scan_start, 2888 .change_queue_depth = sas_change_queue_depth, 2889 .bios_param = sas_bios_param, 2890 .this_id = -1, 2891 .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, 2892 .sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT, 2893 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 2894 .eh_device_reset_handler = sas_eh_device_reset_handler, 2895 .eh_target_reset_handler = sas_eh_target_reset_handler, 2896 .target_destroy = sas_target_destroy, 2897 .ioctl = sas_ioctl, 2898 .shost_attrs = host_attrs_v3_hw, 2899 .tag_alloc_policy = BLK_TAG_ALLOC_RR, 2900 .host_reset = hisi_sas_host_reset, 2901 }; 2902 2903 static const struct hisi_sas_hw hisi_sas_v3_hw = { 2904 .hw_init = hisi_sas_v3_init, 2905 .setup_itct = setup_itct_v3_hw, 2906 .get_wideport_bitmap = get_wideport_bitmap_v3_hw, 2907 .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), 2908 .clear_itct = clear_itct_v3_hw, 2909 .sl_notify_ssp = sl_notify_ssp_v3_hw, 2910 .prep_ssp = prep_ssp_v3_hw, 2911 .prep_smp = prep_smp_v3_hw, 2912 .prep_stp = prep_ata_v3_hw, 2913 .prep_abort = prep_abort_v3_hw, 2914 .start_delivery = start_delivery_v3_hw, 2915 .slot_complete = slot_complete_v3_hw, 2916 .phys_init = phys_init_v3_hw, 2917 .phy_start = start_phy_v3_hw, 2918 .phy_disable = disable_phy_v3_hw, 2919 .phy_hard_reset = phy_hard_reset_v3_hw, 2920 .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw, 2921 .phy_set_linkrate = phy_set_linkrate_v3_hw, 2922 .dereg_device = dereg_device_v3_hw, 2923 .soft_reset = soft_reset_v3_hw, 2924 .get_phys_state = get_phys_state_v3_hw, 2925 .get_events = phy_get_events_v3_hw, 2926 .write_gpio = write_gpio_v3_hw, 2927 .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw, 2928 .debugfs_reg_global = &debugfs_global_reg, 2929 .debugfs_reg_port = &debugfs_port_reg, 2930 .snapshot_prepare = debugfs_snapshot_prepare_v3_hw, 2931 .snapshot_restore = debugfs_snapshot_restore_v3_hw, 2932 }; 2933 2934 static struct Scsi_Host * 2935 hisi_sas_shost_alloc_pci(struct pci_dev *pdev) 2936 { 2937 struct Scsi_Host *shost; 2938 struct hisi_hba *hisi_hba; 2939 struct device *dev = &pdev->dev; 2940 2941 shost = scsi_host_alloc(&sht_v3_hw, sizeof(*hisi_hba)); 2942 if (!shost) { 2943 dev_err(dev, "shost alloc failed\n"); 2944 return NULL; 2945 } 2946 hisi_hba = shost_priv(shost); 2947 2948 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2949 INIT_WORK(&hisi_hba->debugfs_work, hisi_sas_debugfs_work_handler); 2950 hisi_hba->hw = &hisi_sas_v3_hw; 2951 hisi_hba->pci_dev = pdev; 2952 hisi_hba->dev = dev; 2953 hisi_hba->shost = shost; 2954 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2955 2956 if (prot_mask & ~HISI_SAS_PROT_MASK) 2957 dev_err(dev, "unsupported protection mask 0x%x, using default (0x0)\n", 2958 prot_mask); 2959 else 2960 hisi_hba->prot_mask = prot_mask; 2961 2962 timer_setup(&hisi_hba->timer, NULL, 0); 2963 2964 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2965 goto err_out; 2966 2967 if (hisi_sas_alloc(hisi_hba)) { 2968 hisi_sas_free(hisi_hba); 2969 goto err_out; 2970 } 2971 2972 return shost; 2973 err_out: 2974 scsi_host_put(shost); 2975 dev_err(dev, "shost alloc failed\n"); 2976 return NULL; 2977 } 2978 2979 static int 2980 hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2981 { 2982 struct Scsi_Host *shost; 2983 struct hisi_hba *hisi_hba; 2984 struct device *dev = &pdev->dev; 2985 struct asd_sas_phy **arr_phy; 2986 struct asd_sas_port **arr_port; 2987 struct sas_ha_struct *sha; 2988 int rc, phy_nr, port_nr, i; 2989 2990 rc = pci_enable_device(pdev); 2991 if (rc) 2992 goto err_out; 2993 2994 pci_set_master(pdev); 2995 2996 rc = pci_request_regions(pdev, DRV_NAME); 2997 if (rc) 2998 goto err_out_disable_device; 2999 3000 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3001 if (rc) 3002 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3003 if (rc) { 3004 dev_err(dev, "No usable DMA addressing method\n"); 3005 rc = -ENODEV; 3006 goto err_out_regions; 3007 } 3008 3009 shost = hisi_sas_shost_alloc_pci(pdev); 3010 if (!shost) { 3011 rc = -ENOMEM; 3012 goto err_out_regions; 3013 } 3014 3015 sha = SHOST_TO_SAS_HA(shost); 3016 hisi_hba = shost_priv(shost); 3017 dev_set_drvdata(dev, sha); 3018 3019 hisi_hba->regs = pcim_iomap(pdev, 5, 0); 3020 if (!hisi_hba->regs) { 3021 dev_err(dev, "cannot map register\n"); 3022 rc = -ENOMEM; 3023 goto err_out_ha; 3024 } 3025 3026 phy_nr = port_nr = hisi_hba->n_phy; 3027 3028 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 3029 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 3030 if (!arr_phy || !arr_port) { 3031 rc = -ENOMEM; 3032 goto err_out_ha; 3033 } 3034 3035 sha->sas_phy = arr_phy; 3036 sha->sas_port = arr_port; 3037 sha->core.shost = shost; 3038 sha->lldd_ha = hisi_hba; 3039 3040 shost->transportt = hisi_sas_stt; 3041 shost->max_id = HISI_SAS_MAX_DEVICES; 3042 shost->max_lun = ~0; 3043 shost->max_channel = 1; 3044 shost->max_cmd_len = 16; 3045 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 3046 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 3047 3048 sha->sas_ha_name = DRV_NAME; 3049 sha->dev = dev; 3050 sha->lldd_module = THIS_MODULE; 3051 sha->sas_addr = &hisi_hba->sas_addr[0]; 3052 sha->num_phys = hisi_hba->n_phy; 3053 sha->core.shost = hisi_hba->shost; 3054 3055 for (i = 0; i < hisi_hba->n_phy; i++) { 3056 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 3057 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 3058 } 3059 3060 if (hisi_hba->prot_mask) { 3061 dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", 3062 prot_mask); 3063 scsi_host_set_prot(hisi_hba->shost, prot_mask); 3064 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 3065 scsi_host_set_guard(hisi_hba->shost, 3066 SHOST_DIX_GUARD_CRC); 3067 } 3068 3069 if (hisi_sas_debugfs_enable) 3070 hisi_sas_debugfs_init(hisi_hba); 3071 3072 rc = scsi_add_host(shost, dev); 3073 if (rc) 3074 goto err_out_ha; 3075 3076 rc = sas_register_ha(sha); 3077 if (rc) 3078 goto err_out_register_ha; 3079 3080 rc = hisi_hba->hw->hw_init(hisi_hba); 3081 if (rc) 3082 goto err_out_register_ha; 3083 3084 scsi_scan_host(shost); 3085 3086 return 0; 3087 3088 err_out_register_ha: 3089 scsi_remove_host(shost); 3090 err_out_ha: 3091 scsi_host_put(shost); 3092 err_out_regions: 3093 pci_release_regions(pdev); 3094 err_out_disable_device: 3095 pci_disable_device(pdev); 3096 err_out: 3097 return rc; 3098 } 3099 3100 static void 3101 hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) 3102 { 3103 int i; 3104 3105 free_irq(pci_irq_vector(pdev, 1), hisi_hba); 3106 free_irq(pci_irq_vector(pdev, 2), hisi_hba); 3107 free_irq(pci_irq_vector(pdev, 11), hisi_hba); 3108 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 3109 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 3110 int nr = hisi_sas_intr_conv ? 16 : 16 + i; 3111 3112 free_irq(pci_irq_vector(pdev, nr), cq); 3113 } 3114 pci_free_irq_vectors(pdev); 3115 } 3116 3117 static void hisi_sas_v3_remove(struct pci_dev *pdev) 3118 { 3119 struct device *dev = &pdev->dev; 3120 struct sas_ha_struct *sha = dev_get_drvdata(dev); 3121 struct hisi_hba *hisi_hba = sha->lldd_ha; 3122 struct Scsi_Host *shost = sha->core.shost; 3123 3124 hisi_sas_debugfs_exit(hisi_hba); 3125 3126 if (timer_pending(&hisi_hba->timer)) 3127 del_timer(&hisi_hba->timer); 3128 3129 sas_unregister_ha(sha); 3130 sas_remove_host(sha->core.shost); 3131 3132 hisi_sas_v3_destroy_irqs(pdev, hisi_hba); 3133 hisi_sas_kill_tasklets(hisi_hba); 3134 pci_release_regions(pdev); 3135 pci_disable_device(pdev); 3136 hisi_sas_free(hisi_hba); 3137 scsi_host_put(shost); 3138 } 3139 3140 static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev) 3141 { 3142 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 3143 struct hisi_hba *hisi_hba = sha->lldd_ha; 3144 struct device *dev = hisi_hba->dev; 3145 int rc; 3146 3147 dev_info(dev, "FLR prepare\n"); 3148 set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 3149 hisi_sas_controller_reset_prepare(hisi_hba); 3150 3151 rc = disable_host_v3_hw(hisi_hba); 3152 if (rc) 3153 dev_err(dev, "FLR: disable host failed rc=%d\n", rc); 3154 } 3155 3156 static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev) 3157 { 3158 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 3159 struct hisi_hba *hisi_hba = sha->lldd_ha; 3160 struct device *dev = hisi_hba->dev; 3161 int rc; 3162 3163 hisi_sas_init_mem(hisi_hba); 3164 3165 rc = hw_init_v3_hw(hisi_hba); 3166 if (rc) { 3167 dev_err(dev, "FLR: hw init failed rc=%d\n", rc); 3168 return; 3169 } 3170 3171 hisi_sas_controller_reset_done(hisi_hba); 3172 dev_info(dev, "FLR done\n"); 3173 } 3174 3175 enum { 3176 /* instances of the controller */ 3177 hip08, 3178 }; 3179 3180 static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state) 3181 { 3182 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 3183 struct hisi_hba *hisi_hba = sha->lldd_ha; 3184 struct device *dev = hisi_hba->dev; 3185 struct Scsi_Host *shost = hisi_hba->shost; 3186 pci_power_t device_state; 3187 int rc; 3188 3189 if (!pdev->pm_cap) { 3190 dev_err(dev, "PCI PM not supported\n"); 3191 return -ENODEV; 3192 } 3193 3194 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 3195 return -1; 3196 3197 scsi_block_requests(shost); 3198 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 3199 flush_workqueue(hisi_hba->wq); 3200 3201 rc = disable_host_v3_hw(hisi_hba); 3202 if (rc) { 3203 dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc); 3204 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 3205 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 3206 scsi_unblock_requests(shost); 3207 return rc; 3208 } 3209 3210 hisi_sas_init_mem(hisi_hba); 3211 3212 device_state = pci_choose_state(pdev, state); 3213 dev_warn(dev, "entering operating state [D%d]\n", 3214 device_state); 3215 pci_save_state(pdev); 3216 pci_disable_device(pdev); 3217 pci_set_power_state(pdev, device_state); 3218 3219 hisi_sas_release_tasks(hisi_hba); 3220 3221 sas_suspend_ha(sha); 3222 return 0; 3223 } 3224 3225 static int hisi_sas_v3_resume(struct pci_dev *pdev) 3226 { 3227 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 3228 struct hisi_hba *hisi_hba = sha->lldd_ha; 3229 struct Scsi_Host *shost = hisi_hba->shost; 3230 struct device *dev = hisi_hba->dev; 3231 unsigned int rc; 3232 pci_power_t device_state = pdev->current_state; 3233 3234 dev_warn(dev, "resuming from operating state [D%d]\n", 3235 device_state); 3236 pci_set_power_state(pdev, PCI_D0); 3237 pci_enable_wake(pdev, PCI_D0, 0); 3238 pci_restore_state(pdev); 3239 rc = pci_enable_device(pdev); 3240 if (rc) 3241 dev_err(dev, "enable device failed during resume (%d)\n", rc); 3242 3243 pci_set_master(pdev); 3244 scsi_unblock_requests(shost); 3245 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 3246 3247 sas_prep_resume_ha(sha); 3248 init_reg_v3_hw(hisi_hba); 3249 hisi_hba->hw->phys_init(hisi_hba); 3250 sas_resume_ha(sha); 3251 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 3252 3253 return 0; 3254 } 3255 3256 static const struct pci_device_id sas_v3_pci_table[] = { 3257 { PCI_VDEVICE(HUAWEI, 0xa230), hip08 }, 3258 {} 3259 }; 3260 MODULE_DEVICE_TABLE(pci, sas_v3_pci_table); 3261 3262 static const struct pci_error_handlers hisi_sas_err_handler = { 3263 .reset_prepare = hisi_sas_reset_prepare_v3_hw, 3264 .reset_done = hisi_sas_reset_done_v3_hw, 3265 }; 3266 3267 static struct pci_driver sas_v3_pci_driver = { 3268 .name = DRV_NAME, 3269 .id_table = sas_v3_pci_table, 3270 .probe = hisi_sas_v3_probe, 3271 .remove = hisi_sas_v3_remove, 3272 .suspend = hisi_sas_v3_suspend, 3273 .resume = hisi_sas_v3_resume, 3274 .err_handler = &hisi_sas_err_handler, 3275 }; 3276 3277 module_pci_driver(sas_v3_pci_driver); 3278 module_param_named(intr_conv, hisi_sas_intr_conv, bool, 0444); 3279 3280 MODULE_LICENSE("GPL"); 3281 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 3282 MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device"); 3283 MODULE_ALIAS("pci:" DRV_NAME); 3284