1 /* 2 * Copyright (c) 2016 Linaro Ltd. 3 * Copyright (c) 2016 Hisilicon Limited. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 */ 11 12 #include "hisi_sas.h" 13 #define DRV_NAME "hisi_sas_v2_hw" 14 15 /* global registers need init*/ 16 #define DLVRY_QUEUE_ENABLE 0x0 17 #define IOST_BASE_ADDR_LO 0x8 18 #define IOST_BASE_ADDR_HI 0xc 19 #define ITCT_BASE_ADDR_LO 0x10 20 #define ITCT_BASE_ADDR_HI 0x14 21 #define IO_BROKEN_MSG_ADDR_LO 0x18 22 #define IO_BROKEN_MSG_ADDR_HI 0x1c 23 #define PHY_CONTEXT 0x20 24 #define PHY_STATE 0x24 25 #define PHY_PORT_NUM_MA 0x28 26 #define PORT_STATE 0x2c 27 #define PORT_STATE_PHY8_PORT_NUM_OFF 16 28 #define PORT_STATE_PHY8_PORT_NUM_MSK (0xf << PORT_STATE_PHY8_PORT_NUM_OFF) 29 #define PORT_STATE_PHY8_CONN_RATE_OFF 20 30 #define PORT_STATE_PHY8_CONN_RATE_MSK (0xf << PORT_STATE_PHY8_CONN_RATE_OFF) 31 #define PHY_CONN_RATE 0x30 32 #define HGC_TRANS_TASK_CNT_LIMIT 0x38 33 #define AXI_AHB_CLK_CFG 0x3c 34 #define ITCT_CLR 0x44 35 #define ITCT_CLR_EN_OFF 16 36 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) 37 #define ITCT_DEV_OFF 0 38 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) 39 #define AXI_USER1 0x48 40 #define AXI_USER2 0x4c 41 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 42 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c 43 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 44 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 45 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 46 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 47 #define HGC_GET_ITV_TIME 0x90 48 #define DEVICE_MSG_WORK_MODE 0x94 49 #define OPENA_WT_CONTI_TIME 0x9c 50 #define I_T_NEXUS_LOSS_TIME 0xa0 51 #define MAX_CON_TIME_LIMIT_TIME 0xa4 52 #define BUS_INACTIVE_LIMIT_TIME 0xa8 53 #define REJECT_TO_OPEN_LIMIT_TIME 0xac 54 #define CFG_AGING_TIME 0xbc 55 #define HGC_DFX_CFG2 0xc0 56 #define HGC_IOMB_PROC1_STATUS 0x104 57 #define CFG_1US_TIMER_TRSH 0xcc 58 #define HGC_LM_DFX_STATUS2 0x128 59 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0 60 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \ 61 HGC_LM_DFX_STATUS2_IOSTLIST_OFF) 62 #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12 63 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \ 64 HGC_LM_DFX_STATUS2_ITCTLIST_OFF) 65 #define HGC_CQE_ECC_ADDR 0x13c 66 #define HGC_CQE_ECC_1B_ADDR_OFF 0 67 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF) 68 #define HGC_CQE_ECC_MB_ADDR_OFF 8 69 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF) 70 #define HGC_IOST_ECC_ADDR 0x140 71 #define HGC_IOST_ECC_1B_ADDR_OFF 0 72 #define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF) 73 #define HGC_IOST_ECC_MB_ADDR_OFF 16 74 #define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF) 75 #define HGC_DQE_ECC_ADDR 0x144 76 #define HGC_DQE_ECC_1B_ADDR_OFF 0 77 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF) 78 #define HGC_DQE_ECC_MB_ADDR_OFF 16 79 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) 80 #define HGC_INVLD_DQE_INFO 0x148 81 #define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9 82 #define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF) 83 #define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18 84 #define HGC_ITCT_ECC_ADDR 0x150 85 #define HGC_ITCT_ECC_1B_ADDR_OFF 0 86 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ 87 HGC_ITCT_ECC_1B_ADDR_OFF) 88 #define HGC_ITCT_ECC_MB_ADDR_OFF 16 89 #define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \ 90 HGC_ITCT_ECC_MB_ADDR_OFF) 91 #define HGC_AXI_FIFO_ERR_INFO 0x154 92 #define AXI_ERR_INFO_OFF 0 93 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) 94 #define FIFO_ERR_INFO_OFF 8 95 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) 96 #define INT_COAL_EN 0x19c 97 #define OQ_INT_COAL_TIME 0x1a0 98 #define OQ_INT_COAL_CNT 0x1a4 99 #define ENT_INT_COAL_TIME 0x1a8 100 #define ENT_INT_COAL_CNT 0x1ac 101 #define OQ_INT_SRC 0x1b0 102 #define OQ_INT_SRC_MSK 0x1b4 103 #define ENT_INT_SRC1 0x1b8 104 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 105 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) 106 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 107 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) 108 #define ENT_INT_SRC2 0x1bc 109 #define ENT_INT_SRC3 0x1c0 110 #define ENT_INT_SRC3_WP_DEPTH_OFF 8 111 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 112 #define ENT_INT_SRC3_RP_DEPTH_OFF 10 113 #define ENT_INT_SRC3_AXI_OFF 11 114 #define ENT_INT_SRC3_FIFO_OFF 12 115 #define ENT_INT_SRC3_LM_OFF 14 116 #define ENT_INT_SRC3_ITC_INT_OFF 15 117 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) 118 #define ENT_INT_SRC3_ABT_OFF 16 119 #define ENT_INT_SRC_MSK1 0x1c4 120 #define ENT_INT_SRC_MSK2 0x1c8 121 #define ENT_INT_SRC_MSK3 0x1cc 122 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 123 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) 124 #define SAS_ECC_INTR 0x1e8 125 #define SAS_ECC_INTR_DQE_ECC_1B_OFF 0 126 #define SAS_ECC_INTR_DQE_ECC_MB_OFF 1 127 #define SAS_ECC_INTR_IOST_ECC_1B_OFF 2 128 #define SAS_ECC_INTR_IOST_ECC_MB_OFF 3 129 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF 4 130 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF 5 131 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 6 132 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 7 133 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 8 134 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 9 135 #define SAS_ECC_INTR_CQE_ECC_1B_OFF 10 136 #define SAS_ECC_INTR_CQE_ECC_MB_OFF 11 137 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 12 138 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 13 139 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 14 140 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 15 141 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 16 142 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 17 143 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 18 144 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19 145 #define SAS_ECC_INTR_MSK 0x1ec 146 #define HGC_ERR_STAT_EN 0x238 147 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 148 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 149 #define DLVRY_Q_0_DEPTH 0x268 150 #define DLVRY_Q_0_WR_PTR 0x26c 151 #define DLVRY_Q_0_RD_PTR 0x270 152 #define HYPER_STREAM_ID_EN_CFG 0xc80 153 #define OQ0_INT_SRC_MSK 0xc90 154 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0 155 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4 156 #define COMPL_Q_0_DEPTH 0x4e8 157 #define COMPL_Q_0_WR_PTR 0x4ec 158 #define COMPL_Q_0_RD_PTR 0x4f0 159 #define HGC_RXM_DFX_STATUS14 0xae8 160 #define HGC_RXM_DFX_STATUS14_MEM0_OFF 0 161 #define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \ 162 HGC_RXM_DFX_STATUS14_MEM0_OFF) 163 #define HGC_RXM_DFX_STATUS14_MEM1_OFF 9 164 #define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \ 165 HGC_RXM_DFX_STATUS14_MEM1_OFF) 166 #define HGC_RXM_DFX_STATUS14_MEM2_OFF 18 167 #define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \ 168 HGC_RXM_DFX_STATUS14_MEM2_OFF) 169 #define HGC_RXM_DFX_STATUS15 0xaec 170 #define HGC_RXM_DFX_STATUS15_MEM3_OFF 0 171 #define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \ 172 HGC_RXM_DFX_STATUS15_MEM3_OFF) 173 /* phy registers need init */ 174 #define PORT_BASE (0x2000) 175 176 #define PHY_CFG (PORT_BASE + 0x0) 177 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4) 178 #define PHY_CFG_ENA_OFF 0 179 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) 180 #define PHY_CFG_DC_OPT_OFF 2 181 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) 182 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) 183 #define PROG_PHY_LINK_RATE_MAX_OFF 0 184 #define PROG_PHY_LINK_RATE_MAX_MSK (0xff << PROG_PHY_LINK_RATE_MAX_OFF) 185 #define PHY_CTRL (PORT_BASE + 0x14) 186 #define PHY_CTRL_RESET_OFF 0 187 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) 188 #define SAS_PHY_CTRL (PORT_BASE + 0x20) 189 #define SL_CFG (PORT_BASE + 0x84) 190 #define PHY_PCN (PORT_BASE + 0x44) 191 #define SL_TOUT_CFG (PORT_BASE + 0x8c) 192 #define SL_CONTROL (PORT_BASE + 0x94) 193 #define SL_CONTROL_NOTIFY_EN_OFF 0 194 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) 195 #define SL_CONTROL_CTA_OFF 17 196 #define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF) 197 #define RX_PRIMS_STATUS (PORT_BASE + 0x98) 198 #define RX_BCAST_CHG_OFF 1 199 #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) 200 #define TX_ID_DWORD0 (PORT_BASE + 0x9c) 201 #define TX_ID_DWORD1 (PORT_BASE + 0xa0) 202 #define TX_ID_DWORD2 (PORT_BASE + 0xa4) 203 #define TX_ID_DWORD3 (PORT_BASE + 0xa8) 204 #define TX_ID_DWORD4 (PORT_BASE + 0xaC) 205 #define TX_ID_DWORD5 (PORT_BASE + 0xb0) 206 #define TX_ID_DWORD6 (PORT_BASE + 0xb4) 207 #define TXID_AUTO (PORT_BASE + 0xb8) 208 #define TXID_AUTO_CT3_OFF 1 209 #define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF) 210 #define TXID_AUTO_CTB_OFF 11 211 #define TXID_AUTO_CTB_MSK (0x1 << TXID_AUTO_CTB_OFF) 212 #define TX_HARDRST_OFF 2 213 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) 214 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) 215 #define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) 216 #define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) 217 #define RX_IDAF_DWORD3 (PORT_BASE + 0xd0) 218 #define RX_IDAF_DWORD4 (PORT_BASE + 0xd4) 219 #define RX_IDAF_DWORD5 (PORT_BASE + 0xd8) 220 #define RX_IDAF_DWORD6 (PORT_BASE + 0xdc) 221 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) 222 #define CON_CONTROL (PORT_BASE + 0x118) 223 #define CON_CONTROL_CFG_OPEN_ACC_STP_OFF 0 224 #define CON_CONTROL_CFG_OPEN_ACC_STP_MSK \ 225 (0x01 << CON_CONTROL_CFG_OPEN_ACC_STP_OFF) 226 #define DONE_RECEIVED_TIME (PORT_BASE + 0x11c) 227 #define CHL_INT0 (PORT_BASE + 0x1b4) 228 #define CHL_INT0_HOTPLUG_TOUT_OFF 0 229 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) 230 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1 231 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) 232 #define CHL_INT0_SL_PHY_ENABLE_OFF 2 233 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) 234 #define CHL_INT0_NOT_RDY_OFF 4 235 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) 236 #define CHL_INT0_PHY_RDY_OFF 5 237 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) 238 #define CHL_INT1 (PORT_BASE + 0x1b8) 239 #define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15 240 #define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) 241 #define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 242 #define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) 243 #define CHL_INT2 (PORT_BASE + 0x1bc) 244 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) 245 #define CHL_INT1_MSK (PORT_BASE + 0x1c4) 246 #define CHL_INT2_MSK (PORT_BASE + 0x1c8) 247 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) 248 #define DMA_TX_DFX0 (PORT_BASE + 0x200) 249 #define DMA_TX_DFX1 (PORT_BASE + 0x204) 250 #define DMA_TX_DFX1_IPTT_OFF 0 251 #define DMA_TX_DFX1_IPTT_MSK (0xffff << DMA_TX_DFX1_IPTT_OFF) 252 #define DMA_TX_FIFO_DFX0 (PORT_BASE + 0x240) 253 #define PORT_DFX0 (PORT_BASE + 0x258) 254 #define LINK_DFX2 (PORT_BASE + 0X264) 255 #define LINK_DFX2_RCVR_HOLD_STS_OFF 9 256 #define LINK_DFX2_RCVR_HOLD_STS_MSK (0x1 << LINK_DFX2_RCVR_HOLD_STS_OFF) 257 #define LINK_DFX2_SEND_HOLD_STS_OFF 10 258 #define LINK_DFX2_SEND_HOLD_STS_MSK (0x1 << LINK_DFX2_SEND_HOLD_STS_OFF) 259 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) 260 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) 261 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) 262 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) 263 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) 264 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) 265 #define DMA_TX_STATUS (PORT_BASE + 0x2d0) 266 #define DMA_TX_STATUS_BUSY_OFF 0 267 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) 268 #define DMA_RX_STATUS (PORT_BASE + 0x2e8) 269 #define DMA_RX_STATUS_BUSY_OFF 0 270 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) 271 272 #define AXI_CFG (0x5100) 273 #define AM_CFG_MAX_TRANS (0x5010) 274 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) 275 276 #define AXI_MASTER_CFG_BASE (0x5000) 277 #define AM_CTRL_GLOBAL (0x0) 278 #define AM_CURR_TRANS_RETURN (0x150) 279 280 /* HW dma structures */ 281 /* Delivery queue header */ 282 /* dw0 */ 283 #define CMD_HDR_ABORT_FLAG_OFF 0 284 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) 285 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 286 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) 287 #define CMD_HDR_RESP_REPORT_OFF 5 288 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) 289 #define CMD_HDR_TLR_CTRL_OFF 6 290 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) 291 #define CMD_HDR_PORT_OFF 18 292 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) 293 #define CMD_HDR_PRIORITY_OFF 27 294 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) 295 #define CMD_HDR_CMD_OFF 29 296 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) 297 /* dw1 */ 298 #define CMD_HDR_DIR_OFF 5 299 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) 300 #define CMD_HDR_RESET_OFF 7 301 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) 302 #define CMD_HDR_VDTL_OFF 10 303 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) 304 #define CMD_HDR_FRAME_TYPE_OFF 11 305 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) 306 #define CMD_HDR_DEV_ID_OFF 16 307 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) 308 /* dw2 */ 309 #define CMD_HDR_CFL_OFF 0 310 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) 311 #define CMD_HDR_NCQ_TAG_OFF 10 312 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) 313 #define CMD_HDR_MRFL_OFF 15 314 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) 315 #define CMD_HDR_SG_MOD_OFF 24 316 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) 317 #define CMD_HDR_FIRST_BURST_OFF 26 318 #define CMD_HDR_FIRST_BURST_MSK (0x1 << CMD_HDR_SG_MOD_OFF) 319 /* dw3 */ 320 #define CMD_HDR_IPTT_OFF 0 321 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) 322 /* dw6 */ 323 #define CMD_HDR_DIF_SGL_LEN_OFF 0 324 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) 325 #define CMD_HDR_DATA_SGL_LEN_OFF 16 326 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) 327 #define CMD_HDR_ABORT_IPTT_OFF 16 328 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) 329 330 /* Completion header */ 331 /* dw0 */ 332 #define CMPLT_HDR_ERR_PHASE_OFF 2 333 #define CMPLT_HDR_ERR_PHASE_MSK (0xff << CMPLT_HDR_ERR_PHASE_OFF) 334 #define CMPLT_HDR_RSPNS_XFRD_OFF 10 335 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) 336 #define CMPLT_HDR_ERX_OFF 12 337 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) 338 #define CMPLT_HDR_ABORT_STAT_OFF 13 339 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) 340 /* abort_stat */ 341 #define STAT_IO_NOT_VALID 0x1 342 #define STAT_IO_NO_DEVICE 0x2 343 #define STAT_IO_COMPLETE 0x3 344 #define STAT_IO_ABORTED 0x4 345 /* dw1 */ 346 #define CMPLT_HDR_IPTT_OFF 0 347 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) 348 #define CMPLT_HDR_DEV_ID_OFF 16 349 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) 350 351 /* ITCT header */ 352 /* qw0 */ 353 #define ITCT_HDR_DEV_TYPE_OFF 0 354 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) 355 #define ITCT_HDR_VALID_OFF 2 356 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) 357 #define ITCT_HDR_MCR_OFF 5 358 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) 359 #define ITCT_HDR_VLN_OFF 9 360 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) 361 #define ITCT_HDR_SMP_TIMEOUT_OFF 16 362 #define ITCT_HDR_SMP_TIMEOUT_8US 1 363 #define ITCT_HDR_SMP_TIMEOUT (ITCT_HDR_SMP_TIMEOUT_8US * \ 364 250) /* 2ms */ 365 #define ITCT_HDR_AWT_CONTINUE_OFF 25 366 #define ITCT_HDR_PORT_ID_OFF 28 367 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) 368 /* qw2 */ 369 #define ITCT_HDR_INLT_OFF 0 370 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) 371 #define ITCT_HDR_BITLT_OFF 16 372 #define ITCT_HDR_BITLT_MSK (0xffffULL << ITCT_HDR_BITLT_OFF) 373 #define ITCT_HDR_MCTLT_OFF 32 374 #define ITCT_HDR_MCTLT_MSK (0xffffULL << ITCT_HDR_MCTLT_OFF) 375 #define ITCT_HDR_RTOLT_OFF 48 376 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) 377 378 #define HISI_SAS_FATAL_INT_NR 2 379 380 struct hisi_sas_complete_v2_hdr { 381 __le32 dw0; 382 __le32 dw1; 383 __le32 act; 384 __le32 dw3; 385 }; 386 387 struct hisi_sas_err_record_v2 { 388 /* dw0 */ 389 __le32 trans_tx_fail_type; 390 391 /* dw1 */ 392 __le32 trans_rx_fail_type; 393 394 /* dw2 */ 395 __le16 dma_tx_err_type; 396 __le16 sipc_rx_err_type; 397 398 /* dw3 */ 399 __le32 dma_rx_err_type; 400 }; 401 402 enum { 403 HISI_SAS_PHY_PHY_UPDOWN, 404 HISI_SAS_PHY_CHNL_INT, 405 HISI_SAS_PHY_INT_NR 406 }; 407 408 enum { 409 TRANS_TX_FAIL_BASE = 0x0, /* dw0 */ 410 TRANS_RX_FAIL_BASE = 0x20, /* dw1 */ 411 DMA_TX_ERR_BASE = 0x40, /* dw2 bit 15-0 */ 412 SIPC_RX_ERR_BASE = 0x50, /* dw2 bit 31-16*/ 413 DMA_RX_ERR_BASE = 0x60, /* dw3 */ 414 415 /* trans tx*/ 416 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */ 417 TRANS_TX_ERR_PHY_NOT_ENABLE, /* 0x1 */ 418 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, /* 0x2 */ 419 TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, /* 0x3 */ 420 TRANS_TX_OPEN_CNX_ERR_BY_OTHER, /* 0x4 */ 421 RESERVED0, /* 0x5 */ 422 TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, /* 0x6 */ 423 TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, /* 0x7 */ 424 TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, /* 0x8 */ 425 TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, /* 0x9 */ 426 TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, /* 0xa */ 427 TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, /* 0xb */ 428 TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, /* 0xc */ 429 TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, /* 0xd */ 430 TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, /* 0xe */ 431 TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, /* 0xf */ 432 TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, /* 0x10 */ 433 TRANS_TX_ERR_FRAME_TXED, /* 0x11 */ 434 TRANS_TX_ERR_WITH_BREAK_TIMEOUT, /* 0x12 */ 435 TRANS_TX_ERR_WITH_BREAK_REQUEST, /* 0x13 */ 436 TRANS_TX_ERR_WITH_BREAK_RECEVIED, /* 0x14 */ 437 TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, /* 0x15 */ 438 TRANS_TX_ERR_WITH_CLOSE_NORMAL, /* 0x16 for ssp*/ 439 TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, /* 0x17 */ 440 TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x18 */ 441 TRANS_TX_ERR_WITH_CLOSE_COMINIT, /* 0x19 */ 442 TRANS_TX_ERR_WITH_NAK_RECEVIED, /* 0x1a for ssp*/ 443 TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, /* 0x1b for ssp*/ 444 /*IO_TX_ERR_WITH_R_ERR_RECEVIED, [> 0x1b for sata/stp<] */ 445 TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, /* 0x1c for ssp */ 446 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST 0x1c for sata/stp */ 447 TRANS_TX_ERR_WITH_IPTT_CONFLICT, /* 0x1d for ssp/smp */ 448 TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, /* 0x1e */ 449 /*IO_TX_ERR_WITH_SYNC_RXD, [> 0x1e <] for sata/stp */ 450 TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */ 451 452 /* trans rx */ 453 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x20 */ 454 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x21 for sata/stp */ 455 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x22 for ssp/smp */ 456 /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x22 <] for sata/stp */ 457 TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x23 for sata/stp */ 458 TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x24 for sata/stp */ 459 TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x25 for smp */ 460 /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x25 <] for sata/stp */ 461 TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x26 for sata/stp*/ 462 TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x27 */ 463 TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x28 */ 464 TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x29 */ 465 TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x2a */ 466 RESERVED1, /* 0x2b */ 467 TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x2c */ 468 TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x2d */ 469 TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x2e */ 470 TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x2f */ 471 TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x30 for ssp/smp */ 472 TRANS_RX_ERR_WITH_BAD_HASH, /* 0x31 for ssp */ 473 /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x31 <] for sata/stp */ 474 TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x32 for ssp*/ 475 /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x32 <] for sata/stp */ 476 TRANS_RX_SSP_FRM_LEN_ERR, /* 0x33 for ssp */ 477 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x33 <] for sata */ 478 RESERVED2, /* 0x34 */ 479 RESERVED3, /* 0x35 */ 480 RESERVED4, /* 0x36 */ 481 RESERVED5, /* 0x37 */ 482 TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x38 */ 483 TRANS_RX_SMP_FRM_LEN_ERR, /* 0x39 */ 484 TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x3a */ 485 RESERVED6, /* 0x3b */ 486 RESERVED7, /* 0x3c */ 487 RESERVED8, /* 0x3d */ 488 RESERVED9, /* 0x3e */ 489 TRANS_RX_R_ERR, /* 0x3f */ 490 491 /* dma tx */ 492 DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x40 */ 493 DMA_TX_DIF_APP_ERR, /* 0x41 */ 494 DMA_TX_DIF_RPP_ERR, /* 0x42 */ 495 DMA_TX_DATA_SGL_OVERFLOW, /* 0x43 */ 496 DMA_TX_DIF_SGL_OVERFLOW, /* 0x44 */ 497 DMA_TX_UNEXP_XFER_ERR, /* 0x45 */ 498 DMA_TX_UNEXP_RETRANS_ERR, /* 0x46 */ 499 DMA_TX_XFER_LEN_OVERFLOW, /* 0x47 */ 500 DMA_TX_XFER_OFFSET_ERR, /* 0x48 */ 501 DMA_TX_RAM_ECC_ERR, /* 0x49 */ 502 DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x4a */ 503 DMA_TX_MAX_ERR_CODE, 504 505 /* sipc rx */ 506 SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x50 */ 507 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x51 */ 508 SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x52 */ 509 SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x53 */ 510 SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x54 */ 511 SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x55 */ 512 SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x56 */ 513 SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x57 */ 514 SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x58 */ 515 SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x59 */ 516 SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x5a */ 517 SIPC_RX_MAX_ERR_CODE, 518 519 /* dma rx */ 520 DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x60 */ 521 DMA_RX_DIF_APP_ERR, /* 0x61 */ 522 DMA_RX_DIF_RPP_ERR, /* 0x62 */ 523 DMA_RX_DATA_SGL_OVERFLOW, /* 0x63 */ 524 DMA_RX_DIF_SGL_OVERFLOW, /* 0x64 */ 525 DMA_RX_DATA_LEN_OVERFLOW, /* 0x65 */ 526 DMA_RX_DATA_LEN_UNDERFLOW, /* 0x66 */ 527 DMA_RX_DATA_OFFSET_ERR, /* 0x67 */ 528 RESERVED10, /* 0x68 */ 529 DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x69 */ 530 DMA_RX_RESP_BUF_OVERFLOW, /* 0x6a */ 531 DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x6b */ 532 DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x6c */ 533 DMA_RX_UNEXP_RDFRAME_ERR, /* 0x6d */ 534 DMA_RX_PIO_DATA_LEN_ERR, /* 0x6e */ 535 DMA_RX_RDSETUP_STATUS_ERR, /* 0x6f */ 536 DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x70 */ 537 DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x71 */ 538 DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x72 */ 539 DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x73 */ 540 DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x74 */ 541 DMA_RX_RDSETUP_OFFSET_ERR, /* 0x75 */ 542 DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x76 */ 543 DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x77 */ 544 DMA_RX_RAM_ECC_ERR, /* 0x78 */ 545 DMA_RX_UNKNOWN_FRM_ERR, /* 0x79 */ 546 DMA_RX_MAX_ERR_CODE, 547 }; 548 549 #define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096 550 #define HISI_MAX_SATA_SUPPORT_V2_HW (HISI_SAS_COMMAND_ENTRIES_V2_HW/64 - 1) 551 552 #define DIR_NO_DATA 0 553 #define DIR_TO_INI 1 554 #define DIR_TO_DEVICE 2 555 #define DIR_RESERVED 3 556 557 #define ERR_ON_TX_PHASE(err_phase) (err_phase == 0x2 || \ 558 err_phase == 0x4 || err_phase == 0x8 ||\ 559 err_phase == 0x6 || err_phase == 0xa) 560 #define ERR_ON_RX_PHASE(err_phase) (err_phase == 0x10 || \ 561 err_phase == 0x20 || err_phase == 0x40) 562 563 static void link_timeout_disable_link(unsigned long data); 564 565 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) 566 { 567 void __iomem *regs = hisi_hba->regs + off; 568 569 return readl(regs); 570 } 571 572 static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) 573 { 574 void __iomem *regs = hisi_hba->regs + off; 575 576 return readl_relaxed(regs); 577 } 578 579 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) 580 { 581 void __iomem *regs = hisi_hba->regs + off; 582 583 writel(val, regs); 584 } 585 586 static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, 587 u32 off, u32 val) 588 { 589 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 590 591 writel(val, regs); 592 } 593 594 static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, 595 int phy_no, u32 off) 596 { 597 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 598 599 return readl(regs); 600 } 601 602 /* This function needs to be protected from pre-emption. */ 603 static int 604 slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx, 605 struct domain_device *device) 606 { 607 int sata_dev = dev_is_sata(device); 608 void *bitmap = hisi_hba->slot_index_tags; 609 struct hisi_sas_device *sas_dev = device->lldd_dev; 610 int sata_idx = sas_dev->sata_idx; 611 int start, end; 612 613 if (!sata_dev) { 614 /* 615 * STP link SoC bug workaround: index starts from 1. 616 * additionally, we can only allocate odd IPTT(1~4095) 617 * for SAS/SMP device. 618 */ 619 start = 1; 620 end = hisi_hba->slot_index_count; 621 } else { 622 if (sata_idx >= HISI_MAX_SATA_SUPPORT_V2_HW) 623 return -EINVAL; 624 625 /* 626 * For SATA device: allocate even IPTT in this interval 627 * [64*(sata_idx+1), 64*(sata_idx+2)], then each SATA device 628 * own 32 IPTTs. IPTT 0 shall not be used duing to STP link 629 * SoC bug workaround. So we ignore the first 32 even IPTTs. 630 */ 631 start = 64 * (sata_idx + 1); 632 end = 64 * (sata_idx + 2); 633 } 634 635 while (1) { 636 start = find_next_zero_bit(bitmap, 637 hisi_hba->slot_index_count, start); 638 if (start >= end) 639 return -SAS_QUEUE_FULL; 640 /* 641 * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0. 642 */ 643 if (sata_dev ^ (start & 1)) 644 break; 645 start++; 646 } 647 648 set_bit(start, bitmap); 649 *slot_idx = start; 650 return 0; 651 } 652 653 static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx) 654 { 655 unsigned int index; 656 struct device *dev = hisi_hba->dev; 657 void *bitmap = hisi_hba->sata_dev_bitmap; 658 659 index = find_first_zero_bit(bitmap, HISI_MAX_SATA_SUPPORT_V2_HW); 660 if (index >= HISI_MAX_SATA_SUPPORT_V2_HW) { 661 dev_warn(dev, "alloc sata index failed, index=%d\n", index); 662 return false; 663 } 664 665 set_bit(index, bitmap); 666 *idx = index; 667 return true; 668 } 669 670 671 static struct 672 hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) 673 { 674 struct hisi_hba *hisi_hba = device->port->ha->lldd_ha; 675 struct hisi_sas_device *sas_dev = NULL; 676 int i, sata_dev = dev_is_sata(device); 677 int sata_idx = -1; 678 679 spin_lock(&hisi_hba->lock); 680 681 if (sata_dev) 682 if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx)) 683 goto out; 684 685 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 686 /* 687 * SATA device id bit0 should be 0 688 */ 689 if (sata_dev && (i & 1)) 690 continue; 691 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 692 int queue = i % hisi_hba->queue_count; 693 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 694 695 hisi_hba->devices[i].device_id = i; 696 sas_dev = &hisi_hba->devices[i]; 697 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 698 sas_dev->dev_type = device->dev_type; 699 sas_dev->hisi_hba = hisi_hba; 700 sas_dev->sas_device = device; 701 sas_dev->sata_idx = sata_idx; 702 sas_dev->dq = dq; 703 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 704 break; 705 } 706 } 707 708 out: 709 spin_unlock(&hisi_hba->lock); 710 711 return sas_dev; 712 } 713 714 static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 715 { 716 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 717 718 cfg &= ~PHY_CFG_DC_OPT_MSK; 719 cfg |= 1 << PHY_CFG_DC_OPT_OFF; 720 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 721 } 722 723 static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 724 { 725 struct sas_identify_frame identify_frame; 726 u32 *identify_buffer; 727 728 memset(&identify_frame, 0, sizeof(identify_frame)); 729 identify_frame.dev_type = SAS_END_DEVICE; 730 identify_frame.frame_type = 0; 731 identify_frame._un1 = 1; 732 identify_frame.initiator_bits = SAS_PROTOCOL_ALL; 733 identify_frame.target_bits = SAS_PROTOCOL_NONE; 734 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 735 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 736 identify_frame.phy_id = phy_no; 737 identify_buffer = (u32 *)(&identify_frame); 738 739 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, 740 __swab32(identify_buffer[0])); 741 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, 742 __swab32(identify_buffer[1])); 743 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, 744 __swab32(identify_buffer[2])); 745 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, 746 __swab32(identify_buffer[3])); 747 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, 748 __swab32(identify_buffer[4])); 749 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, 750 __swab32(identify_buffer[5])); 751 } 752 753 static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, 754 struct hisi_sas_device *sas_dev) 755 { 756 struct domain_device *device = sas_dev->sas_device; 757 struct device *dev = hisi_hba->dev; 758 u64 qw0, device_id = sas_dev->device_id; 759 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; 760 struct domain_device *parent_dev = device->parent; 761 struct asd_sas_port *sas_port = device->port; 762 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 763 764 memset(itct, 0, sizeof(*itct)); 765 766 /* qw0 */ 767 qw0 = 0; 768 switch (sas_dev->dev_type) { 769 case SAS_END_DEVICE: 770 case SAS_EDGE_EXPANDER_DEVICE: 771 case SAS_FANOUT_EXPANDER_DEVICE: 772 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; 773 break; 774 case SAS_SATA_DEV: 775 case SAS_SATA_PENDING: 776 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 777 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; 778 else 779 qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; 780 break; 781 default: 782 dev_warn(dev, "setup itct: unsupported dev type (%d)\n", 783 sas_dev->dev_type); 784 } 785 786 qw0 |= ((1 << ITCT_HDR_VALID_OFF) | 787 (device->linkrate << ITCT_HDR_MCR_OFF) | 788 (1 << ITCT_HDR_VLN_OFF) | 789 (ITCT_HDR_SMP_TIMEOUT << ITCT_HDR_SMP_TIMEOUT_OFF) | 790 (1 << ITCT_HDR_AWT_CONTINUE_OFF) | 791 (port->id << ITCT_HDR_PORT_ID_OFF)); 792 itct->qw0 = cpu_to_le64(qw0); 793 794 /* qw1 */ 795 memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE); 796 itct->sas_addr = __swab64(itct->sas_addr); 797 798 /* qw2 */ 799 if (!dev_is_sata(device)) 800 itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | 801 (0x1ULL << ITCT_HDR_BITLT_OFF) | 802 (0x32ULL << ITCT_HDR_MCTLT_OFF) | 803 (0x1ULL << ITCT_HDR_RTOLT_OFF)); 804 } 805 806 static void free_device_v2_hw(struct hisi_hba *hisi_hba, 807 struct hisi_sas_device *sas_dev) 808 { 809 u64 dev_id = sas_dev->device_id; 810 struct device *dev = hisi_hba->dev; 811 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; 812 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 813 int i; 814 815 /* SoC bug workaround */ 816 if (dev_is_sata(sas_dev->sas_device)) 817 clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap); 818 819 /* clear the itct interrupt state */ 820 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) 821 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 822 ENT_INT_SRC3_ITC_INT_MSK); 823 824 /* clear the itct int*/ 825 for (i = 0; i < 2; i++) { 826 /* clear the itct table*/ 827 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); 828 reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); 829 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); 830 831 udelay(10); 832 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 833 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) { 834 dev_dbg(dev, "got clear ITCT done interrupt\n"); 835 836 /* invalid the itct state*/ 837 memset(itct, 0, sizeof(struct hisi_sas_itct)); 838 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 839 ENT_INT_SRC3_ITC_INT_MSK); 840 841 /* clear the itct */ 842 hisi_sas_write32(hisi_hba, ITCT_CLR, 0); 843 dev_dbg(dev, "clear ITCT ok\n"); 844 break; 845 } 846 } 847 } 848 849 static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) 850 { 851 int i, reset_val; 852 u32 val; 853 unsigned long end_time; 854 struct device *dev = hisi_hba->dev; 855 856 /* The mask needs to be set depending on the number of phys */ 857 if (hisi_hba->n_phy == 9) 858 reset_val = 0x1fffff; 859 else 860 reset_val = 0x7ffff; 861 862 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 863 864 /* Disable all of the PHYs */ 865 for (i = 0; i < hisi_hba->n_phy; i++) { 866 u32 phy_cfg = hisi_sas_phy_read32(hisi_hba, i, PHY_CFG); 867 868 phy_cfg &= ~PHY_CTRL_RESET_MSK; 869 hisi_sas_phy_write32(hisi_hba, i, PHY_CFG, phy_cfg); 870 } 871 udelay(50); 872 873 /* Ensure DMA tx & rx idle */ 874 for (i = 0; i < hisi_hba->n_phy; i++) { 875 u32 dma_tx_status, dma_rx_status; 876 877 end_time = jiffies + msecs_to_jiffies(1000); 878 879 while (1) { 880 dma_tx_status = hisi_sas_phy_read32(hisi_hba, i, 881 DMA_TX_STATUS); 882 dma_rx_status = hisi_sas_phy_read32(hisi_hba, i, 883 DMA_RX_STATUS); 884 885 if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) && 886 !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK)) 887 break; 888 889 msleep(20); 890 if (time_after(jiffies, end_time)) 891 return -EIO; 892 } 893 } 894 895 /* Ensure axi bus idle */ 896 end_time = jiffies + msecs_to_jiffies(1000); 897 while (1) { 898 u32 axi_status = 899 hisi_sas_read32(hisi_hba, AXI_CFG); 900 901 if (axi_status == 0) 902 break; 903 904 msleep(20); 905 if (time_after(jiffies, end_time)) 906 return -EIO; 907 } 908 909 if (ACPI_HANDLE(dev)) { 910 acpi_status s; 911 912 s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); 913 if (ACPI_FAILURE(s)) { 914 dev_err(dev, "Reset failed\n"); 915 return -EIO; 916 } 917 } else if (hisi_hba->ctrl) { 918 /* reset and disable clock*/ 919 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, 920 reset_val); 921 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, 922 reset_val); 923 msleep(1); 924 regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); 925 if (reset_val != (val & reset_val)) { 926 dev_err(dev, "SAS reset fail.\n"); 927 return -EIO; 928 } 929 930 /* De-reset and enable clock*/ 931 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, 932 reset_val); 933 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, 934 reset_val); 935 msleep(1); 936 regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, 937 &val); 938 if (val & reset_val) { 939 dev_err(dev, "SAS de-reset fail.\n"); 940 return -EIO; 941 } 942 } else 943 dev_warn(dev, "no reset method\n"); 944 945 return 0; 946 } 947 948 /* This function needs to be called after resetting SAS controller. */ 949 static void phys_reject_stp_links_v2_hw(struct hisi_hba *hisi_hba) 950 { 951 u32 cfg; 952 int phy_no; 953 954 hisi_hba->reject_stp_links_msk = (1 << hisi_hba->n_phy) - 1; 955 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 956 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, CON_CONTROL); 957 if (!(cfg & CON_CONTROL_CFG_OPEN_ACC_STP_MSK)) 958 continue; 959 960 cfg &= ~CON_CONTROL_CFG_OPEN_ACC_STP_MSK; 961 hisi_sas_phy_write32(hisi_hba, phy_no, CON_CONTROL, cfg); 962 } 963 } 964 965 static void phys_try_accept_stp_links_v2_hw(struct hisi_hba *hisi_hba) 966 { 967 int phy_no; 968 u32 dma_tx_dfx1; 969 970 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 971 if (!(hisi_hba->reject_stp_links_msk & BIT(phy_no))) 972 continue; 973 974 dma_tx_dfx1 = hisi_sas_phy_read32(hisi_hba, phy_no, 975 DMA_TX_DFX1); 976 if (dma_tx_dfx1 & DMA_TX_DFX1_IPTT_MSK) { 977 u32 cfg = hisi_sas_phy_read32(hisi_hba, 978 phy_no, CON_CONTROL); 979 980 cfg |= CON_CONTROL_CFG_OPEN_ACC_STP_MSK; 981 hisi_sas_phy_write32(hisi_hba, phy_no, 982 CON_CONTROL, cfg); 983 clear_bit(phy_no, &hisi_hba->reject_stp_links_msk); 984 } 985 } 986 } 987 988 static void init_reg_v2_hw(struct hisi_hba *hisi_hba) 989 { 990 struct device *dev = hisi_hba->dev; 991 int i; 992 993 /* Global registers init */ 994 995 /* Deal with am-max-transmissions quirk */ 996 if (device_property_present(dev, "hip06-sas-v2-quirk-amt")) { 997 hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020); 998 hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS, 999 0x2020); 1000 } /* Else, use defaults -> do nothing */ 1001 1002 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 1003 (u32)((1ULL << hisi_hba->queue_count) - 1)); 1004 hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000); 1005 hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000); 1006 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x0); 1007 hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF); 1008 hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1); 1009 hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4); 1010 hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x32); 1011 hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1); 1012 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); 1013 hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1); 1014 hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1); 1015 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0xc); 1016 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x60); 1017 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x3); 1018 hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1); 1019 hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1); 1020 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0); 1021 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); 1022 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); 1023 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); 1024 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe); 1025 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe); 1026 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe); 1027 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30); 1028 for (i = 0; i < hisi_hba->queue_count; i++) 1029 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); 1030 1031 hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1); 1032 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); 1033 1034 for (i = 0; i < hisi_hba->n_phy; i++) { 1035 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855); 1036 hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908); 1037 hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); 1038 hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0); 1039 hisi_sas_phy_write32(hisi_hba, i, TXID_AUTO, 0x2); 1040 hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x8); 1041 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); 1042 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 1043 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff); 1044 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 1045 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 1046 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); 1047 hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc); 1048 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); 1049 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); 1050 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); 1051 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); 1052 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); 1053 hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0); 1054 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0); 1055 if (hisi_hba->refclk_frequency_mhz == 66) 1056 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694); 1057 /* else, do nothing -> leave it how you found it */ 1058 } 1059 1060 for (i = 0; i < hisi_hba->queue_count; i++) { 1061 /* Delivery queue */ 1062 hisi_sas_write32(hisi_hba, 1063 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), 1064 upper_32_bits(hisi_hba->cmd_hdr_dma[i])); 1065 1066 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), 1067 lower_32_bits(hisi_hba->cmd_hdr_dma[i])); 1068 1069 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), 1070 HISI_SAS_QUEUE_SLOTS); 1071 1072 /* Completion queue */ 1073 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), 1074 upper_32_bits(hisi_hba->complete_hdr_dma[i])); 1075 1076 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), 1077 lower_32_bits(hisi_hba->complete_hdr_dma[i])); 1078 1079 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), 1080 HISI_SAS_QUEUE_SLOTS); 1081 } 1082 1083 /* itct */ 1084 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, 1085 lower_32_bits(hisi_hba->itct_dma)); 1086 1087 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, 1088 upper_32_bits(hisi_hba->itct_dma)); 1089 1090 /* iost */ 1091 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, 1092 lower_32_bits(hisi_hba->iost_dma)); 1093 1094 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, 1095 upper_32_bits(hisi_hba->iost_dma)); 1096 1097 /* breakpoint */ 1098 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, 1099 lower_32_bits(hisi_hba->breakpoint_dma)); 1100 1101 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, 1102 upper_32_bits(hisi_hba->breakpoint_dma)); 1103 1104 /* SATA broken msg */ 1105 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, 1106 lower_32_bits(hisi_hba->sata_breakpoint_dma)); 1107 1108 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, 1109 upper_32_bits(hisi_hba->sata_breakpoint_dma)); 1110 1111 /* SATA initial fis */ 1112 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, 1113 lower_32_bits(hisi_hba->initial_fis_dma)); 1114 1115 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, 1116 upper_32_bits(hisi_hba->initial_fis_dma)); 1117 } 1118 1119 static void link_timeout_enable_link(unsigned long data) 1120 { 1121 struct hisi_hba *hisi_hba = (struct hisi_hba *)data; 1122 int i, reg_val; 1123 1124 for (i = 0; i < hisi_hba->n_phy; i++) { 1125 if (hisi_hba->reject_stp_links_msk & BIT(i)) 1126 continue; 1127 1128 reg_val = hisi_sas_phy_read32(hisi_hba, i, CON_CONTROL); 1129 if (!(reg_val & BIT(0))) { 1130 hisi_sas_phy_write32(hisi_hba, i, 1131 CON_CONTROL, 0x7); 1132 break; 1133 } 1134 } 1135 1136 hisi_hba->timer.function = link_timeout_disable_link; 1137 mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900)); 1138 } 1139 1140 static void link_timeout_disable_link(unsigned long data) 1141 { 1142 struct hisi_hba *hisi_hba = (struct hisi_hba *)data; 1143 int i, reg_val; 1144 1145 reg_val = hisi_sas_read32(hisi_hba, PHY_STATE); 1146 for (i = 0; i < hisi_hba->n_phy && reg_val; i++) { 1147 if (hisi_hba->reject_stp_links_msk & BIT(i)) 1148 continue; 1149 1150 if (reg_val & BIT(i)) { 1151 hisi_sas_phy_write32(hisi_hba, i, 1152 CON_CONTROL, 0x6); 1153 break; 1154 } 1155 } 1156 1157 hisi_hba->timer.function = link_timeout_enable_link; 1158 mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100)); 1159 } 1160 1161 static void set_link_timer_quirk(struct hisi_hba *hisi_hba) 1162 { 1163 hisi_hba->timer.data = (unsigned long)hisi_hba; 1164 hisi_hba->timer.function = link_timeout_disable_link; 1165 hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000); 1166 add_timer(&hisi_hba->timer); 1167 } 1168 1169 static int hw_init_v2_hw(struct hisi_hba *hisi_hba) 1170 { 1171 struct device *dev = hisi_hba->dev; 1172 int rc; 1173 1174 rc = reset_hw_v2_hw(hisi_hba); 1175 if (rc) { 1176 dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc); 1177 return rc; 1178 } 1179 1180 msleep(100); 1181 init_reg_v2_hw(hisi_hba); 1182 1183 return 0; 1184 } 1185 1186 static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1187 { 1188 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 1189 1190 cfg |= PHY_CFG_ENA_MSK; 1191 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 1192 } 1193 1194 static bool is_sata_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1195 { 1196 u32 context; 1197 1198 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); 1199 if (context & (1 << phy_no)) 1200 return true; 1201 1202 return false; 1203 } 1204 1205 static bool tx_fifo_is_empty_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1206 { 1207 u32 dfx_val; 1208 1209 dfx_val = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX1); 1210 1211 if (dfx_val & BIT(16)) 1212 return false; 1213 1214 return true; 1215 } 1216 1217 static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1218 { 1219 int i, max_loop = 1000; 1220 struct device *dev = hisi_hba->dev; 1221 u32 status, axi_status, dfx_val, dfx_tx_val; 1222 1223 for (i = 0; i < max_loop; i++) { 1224 status = hisi_sas_read32_relaxed(hisi_hba, 1225 AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN); 1226 1227 axi_status = hisi_sas_read32(hisi_hba, AXI_CFG); 1228 dfx_val = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX1); 1229 dfx_tx_val = hisi_sas_phy_read32(hisi_hba, 1230 phy_no, DMA_TX_FIFO_DFX0); 1231 1232 if ((status == 0x3) && (axi_status == 0x0) && 1233 (dfx_val & BIT(20)) && (dfx_tx_val & BIT(10))) 1234 return true; 1235 udelay(10); 1236 } 1237 dev_err(dev, "bus is not idle phy%d, axi150:0x%x axi100:0x%x port204:0x%x port240:0x%x\n", 1238 phy_no, status, axi_status, 1239 dfx_val, dfx_tx_val); 1240 return false; 1241 } 1242 1243 static bool wait_io_done_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1244 { 1245 int i, max_loop = 1000; 1246 struct device *dev = hisi_hba->dev; 1247 u32 status, tx_dfx0; 1248 1249 for (i = 0; i < max_loop; i++) { 1250 status = hisi_sas_phy_read32(hisi_hba, phy_no, LINK_DFX2); 1251 status = (status & 0x3fc0) >> 6; 1252 1253 if (status != 0x1) 1254 return true; 1255 1256 tx_dfx0 = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX0); 1257 if ((tx_dfx0 & 0x1ff) == 0x2) 1258 return true; 1259 udelay(10); 1260 } 1261 dev_err(dev, "IO not done phy%d, port264:0x%x port200:0x%x\n", 1262 phy_no, status, tx_dfx0); 1263 return false; 1264 } 1265 1266 static bool allowed_disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1267 { 1268 if (tx_fifo_is_empty_v2_hw(hisi_hba, phy_no)) 1269 return true; 1270 1271 if (!axi_bus_is_idle_v2_hw(hisi_hba, phy_no)) 1272 return false; 1273 1274 if (!wait_io_done_v2_hw(hisi_hba, phy_no)) 1275 return false; 1276 1277 return true; 1278 } 1279 1280 1281 static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1282 { 1283 u32 cfg, axi_val, dfx0_val, txid_auto; 1284 struct device *dev = hisi_hba->dev; 1285 1286 /* Close axi bus. */ 1287 axi_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + 1288 AM_CTRL_GLOBAL); 1289 axi_val |= 0x1; 1290 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 1291 AM_CTRL_GLOBAL, axi_val); 1292 1293 if (is_sata_phy_v2_hw(hisi_hba, phy_no)) { 1294 if (allowed_disable_phy_v2_hw(hisi_hba, phy_no)) 1295 goto do_disable; 1296 1297 /* Reset host controller. */ 1298 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1299 return; 1300 } 1301 1302 dfx0_val = hisi_sas_phy_read32(hisi_hba, phy_no, PORT_DFX0); 1303 dfx0_val = (dfx0_val & 0x1fc0) >> 6; 1304 if (dfx0_val != 0x4) 1305 goto do_disable; 1306 1307 if (!tx_fifo_is_empty_v2_hw(hisi_hba, phy_no)) { 1308 dev_warn(dev, "phy%d, wait tx fifo need send break\n", 1309 phy_no); 1310 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, 1311 TXID_AUTO); 1312 txid_auto |= TXID_AUTO_CTB_MSK; 1313 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 1314 txid_auto); 1315 } 1316 1317 do_disable: 1318 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 1319 cfg &= ~PHY_CFG_ENA_MSK; 1320 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 1321 1322 /* Open axi bus. */ 1323 axi_val &= ~0x1; 1324 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 1325 AM_CTRL_GLOBAL, axi_val); 1326 } 1327 1328 static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1329 { 1330 config_id_frame_v2_hw(hisi_hba, phy_no); 1331 config_phy_opt_mode_v2_hw(hisi_hba, phy_no); 1332 enable_phy_v2_hw(hisi_hba, phy_no); 1333 } 1334 1335 static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1336 { 1337 disable_phy_v2_hw(hisi_hba, phy_no); 1338 } 1339 1340 static void stop_phys_v2_hw(struct hisi_hba *hisi_hba) 1341 { 1342 int i; 1343 1344 for (i = 0; i < hisi_hba->n_phy; i++) 1345 stop_phy_v2_hw(hisi_hba, i); 1346 } 1347 1348 static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1349 { 1350 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1351 u32 txid_auto; 1352 1353 stop_phy_v2_hw(hisi_hba, phy_no); 1354 if (phy->identify.device_type == SAS_END_DEVICE) { 1355 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 1356 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 1357 txid_auto | TX_HARDRST_MSK); 1358 } 1359 msleep(100); 1360 start_phy_v2_hw(hisi_hba, phy_no); 1361 } 1362 1363 static void start_phys_v2_hw(struct hisi_hba *hisi_hba) 1364 { 1365 int i; 1366 1367 for (i = 0; i < hisi_hba->n_phy; i++) 1368 start_phy_v2_hw(hisi_hba, i); 1369 } 1370 1371 static void phys_init_v2_hw(struct hisi_hba *hisi_hba) 1372 { 1373 start_phys_v2_hw(hisi_hba); 1374 } 1375 1376 static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1377 { 1378 u32 sl_control; 1379 1380 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1381 sl_control |= SL_CONTROL_NOTIFY_EN_MSK; 1382 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 1383 msleep(1); 1384 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1385 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; 1386 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 1387 } 1388 1389 static enum sas_linkrate phy_get_max_linkrate_v2_hw(void) 1390 { 1391 return SAS_LINK_RATE_12_0_GBPS; 1392 } 1393 1394 static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no, 1395 struct sas_phy_linkrates *r) 1396 { 1397 u32 prog_phy_link_rate = 1398 hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); 1399 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1400 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1401 int i; 1402 enum sas_linkrate min, max; 1403 u32 rate_mask = 0; 1404 1405 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1406 max = sas_phy->phy->maximum_linkrate; 1407 min = r->minimum_linkrate; 1408 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1409 max = r->maximum_linkrate; 1410 min = sas_phy->phy->minimum_linkrate; 1411 } else 1412 return; 1413 1414 sas_phy->phy->maximum_linkrate = max; 1415 sas_phy->phy->minimum_linkrate = min; 1416 1417 min -= SAS_LINK_RATE_1_5_GBPS; 1418 max -= SAS_LINK_RATE_1_5_GBPS; 1419 1420 for (i = 0; i <= max; i++) 1421 rate_mask |= 1 << (i * 2); 1422 1423 prog_phy_link_rate &= ~0xff; 1424 prog_phy_link_rate |= rate_mask; 1425 1426 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 1427 prog_phy_link_rate); 1428 1429 phy_hard_reset_v2_hw(hisi_hba, phy_no); 1430 } 1431 1432 static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) 1433 { 1434 int i, bitmap = 0; 1435 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 1436 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1437 1438 for (i = 0; i < (hisi_hba->n_phy < 9 ? hisi_hba->n_phy : 8); i++) 1439 if (phy_state & 1 << i) 1440 if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) 1441 bitmap |= 1 << i; 1442 1443 if (hisi_hba->n_phy == 9) { 1444 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 1445 1446 if (phy_state & 1 << 8) 1447 if (((port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> 1448 PORT_STATE_PHY8_PORT_NUM_OFF) == port_id) 1449 bitmap |= 1 << 9; 1450 } 1451 1452 return bitmap; 1453 } 1454 1455 /* 1456 * The callpath to this function and upto writing the write 1457 * queue pointer should be safe from interruption. 1458 */ 1459 static int 1460 get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) 1461 { 1462 struct device *dev = hisi_hba->dev; 1463 int queue = dq->id; 1464 u32 r, w; 1465 1466 w = dq->wr_point; 1467 r = hisi_sas_read32_relaxed(hisi_hba, 1468 DLVRY_Q_0_RD_PTR + (queue * 0x14)); 1469 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { 1470 dev_warn(dev, "full queue=%d r=%d w=%d\n\n", 1471 queue, r, w); 1472 return -EAGAIN; 1473 } 1474 1475 return 0; 1476 } 1477 1478 static void start_delivery_v2_hw(struct hisi_sas_dq *dq) 1479 { 1480 struct hisi_hba *hisi_hba = dq->hisi_hba; 1481 int dlvry_queue = dq->slot_prep->dlvry_queue; 1482 int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot; 1483 1484 dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; 1485 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), 1486 dq->wr_point); 1487 } 1488 1489 static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, 1490 struct hisi_sas_slot *slot, 1491 struct hisi_sas_cmd_hdr *hdr, 1492 struct scatterlist *scatter, 1493 int n_elem) 1494 { 1495 struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); 1496 struct device *dev = hisi_hba->dev; 1497 struct scatterlist *sg; 1498 int i; 1499 1500 if (n_elem > HISI_SAS_SGE_PAGE_CNT) { 1501 dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", 1502 n_elem); 1503 return -EINVAL; 1504 } 1505 1506 for_each_sg(scatter, sg, n_elem, i) { 1507 struct hisi_sas_sge *entry = &sge_page->sge[i]; 1508 1509 entry->addr = cpu_to_le64(sg_dma_address(sg)); 1510 entry->page_ctrl_0 = entry->page_ctrl_1 = 0; 1511 entry->data_len = cpu_to_le32(sg_dma_len(sg)); 1512 entry->data_off = 0; 1513 } 1514 1515 hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); 1516 1517 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); 1518 1519 return 0; 1520 } 1521 1522 static int prep_smp_v2_hw(struct hisi_hba *hisi_hba, 1523 struct hisi_sas_slot *slot) 1524 { 1525 struct sas_task *task = slot->task; 1526 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1527 struct domain_device *device = task->dev; 1528 struct device *dev = hisi_hba->dev; 1529 struct hisi_sas_port *port = slot->port; 1530 struct scatterlist *sg_req, *sg_resp; 1531 struct hisi_sas_device *sas_dev = device->lldd_dev; 1532 dma_addr_t req_dma_addr; 1533 unsigned int req_len, resp_len; 1534 int elem, rc; 1535 1536 /* 1537 * DMA-map SMP request, response buffers 1538 */ 1539 /* req */ 1540 sg_req = &task->smp_task.smp_req; 1541 elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE); 1542 if (!elem) 1543 return -ENOMEM; 1544 req_len = sg_dma_len(sg_req); 1545 req_dma_addr = sg_dma_address(sg_req); 1546 1547 /* resp */ 1548 sg_resp = &task->smp_task.smp_resp; 1549 elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE); 1550 if (!elem) { 1551 rc = -ENOMEM; 1552 goto err_out_req; 1553 } 1554 resp_len = sg_dma_len(sg_resp); 1555 if ((req_len & 0x3) || (resp_len & 0x3)) { 1556 rc = -EINVAL; 1557 goto err_out_resp; 1558 } 1559 1560 /* create header */ 1561 /* dw0 */ 1562 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | 1563 (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ 1564 (2 << CMD_HDR_CMD_OFF)); /* smp */ 1565 1566 /* map itct entry */ 1567 hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | 1568 (1 << CMD_HDR_FRAME_TYPE_OFF) | 1569 (DIR_NO_DATA << CMD_HDR_DIR_OFF)); 1570 1571 /* dw2 */ 1572 hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | 1573 (HISI_SAS_MAX_SMP_RESP_SZ / 4 << 1574 CMD_HDR_MRFL_OFF)); 1575 1576 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); 1577 1578 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); 1579 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1580 1581 return 0; 1582 1583 err_out_resp: 1584 dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1, 1585 DMA_FROM_DEVICE); 1586 err_out_req: 1587 dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1, 1588 DMA_TO_DEVICE); 1589 return rc; 1590 } 1591 1592 static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, 1593 struct hisi_sas_slot *slot, int is_tmf, 1594 struct hisi_sas_tmf_task *tmf) 1595 { 1596 struct sas_task *task = slot->task; 1597 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1598 struct domain_device *device = task->dev; 1599 struct hisi_sas_device *sas_dev = device->lldd_dev; 1600 struct hisi_sas_port *port = slot->port; 1601 struct sas_ssp_task *ssp_task = &task->ssp_task; 1602 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 1603 int has_data = 0, rc, priority = is_tmf; 1604 u8 *buf_cmd; 1605 u32 dw1 = 0, dw2 = 0; 1606 1607 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | 1608 (2 << CMD_HDR_TLR_CTRL_OFF) | 1609 (port->id << CMD_HDR_PORT_OFF) | 1610 (priority << CMD_HDR_PRIORITY_OFF) | 1611 (1 << CMD_HDR_CMD_OFF)); /* ssp */ 1612 1613 dw1 = 1 << CMD_HDR_VDTL_OFF; 1614 if (is_tmf) { 1615 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; 1616 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; 1617 } else { 1618 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; 1619 switch (scsi_cmnd->sc_data_direction) { 1620 case DMA_TO_DEVICE: 1621 has_data = 1; 1622 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1623 break; 1624 case DMA_FROM_DEVICE: 1625 has_data = 1; 1626 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1627 break; 1628 default: 1629 dw1 &= ~CMD_HDR_DIR_MSK; 1630 } 1631 } 1632 1633 /* map itct entry */ 1634 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1635 hdr->dw1 = cpu_to_le32(dw1); 1636 1637 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) 1638 + 3) / 4) << CMD_HDR_CFL_OFF) | 1639 ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | 1640 (2 << CMD_HDR_SG_MOD_OFF); 1641 hdr->dw2 = cpu_to_le32(dw2); 1642 1643 hdr->transfer_tags = cpu_to_le32(slot->idx); 1644 1645 if (has_data) { 1646 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, 1647 slot->n_elem); 1648 if (rc) 1649 return rc; 1650 } 1651 1652 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1653 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1654 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1655 1656 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + 1657 sizeof(struct ssp_frame_hdr); 1658 1659 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 1660 if (!is_tmf) { 1661 buf_cmd[9] = task->ssp_task.task_attr | 1662 (task->ssp_task.task_prio << 3); 1663 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, 1664 task->ssp_task.cmd->cmd_len); 1665 } else { 1666 buf_cmd[10] = tmf->tmf; 1667 switch (tmf->tmf) { 1668 case TMF_ABORT_TASK: 1669 case TMF_QUERY_TASK: 1670 buf_cmd[12] = 1671 (tmf->tag_of_task_to_be_managed >> 8) & 0xff; 1672 buf_cmd[13] = 1673 tmf->tag_of_task_to_be_managed & 0xff; 1674 break; 1675 default: 1676 break; 1677 } 1678 } 1679 1680 return 0; 1681 } 1682 1683 #define TRANS_TX_ERR 0 1684 #define TRANS_RX_ERR 1 1685 #define DMA_TX_ERR 2 1686 #define SIPC_RX_ERR 3 1687 #define DMA_RX_ERR 4 1688 1689 #define DMA_TX_ERR_OFF 0 1690 #define DMA_TX_ERR_MSK (0xffff << DMA_TX_ERR_OFF) 1691 #define SIPC_RX_ERR_OFF 16 1692 #define SIPC_RX_ERR_MSK (0xffff << SIPC_RX_ERR_OFF) 1693 1694 static int parse_trans_tx_err_code_v2_hw(u32 err_msk) 1695 { 1696 static const u8 trans_tx_err_code_prio[] = { 1697 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS, 1698 TRANS_TX_ERR_PHY_NOT_ENABLE, 1699 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, 1700 TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, 1701 TRANS_TX_OPEN_CNX_ERR_BY_OTHER, 1702 RESERVED0, 1703 TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, 1704 TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, 1705 TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, 1706 TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, 1707 TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, 1708 TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, 1709 TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, 1710 TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, 1711 TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, 1712 TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, 1713 TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, 1714 TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, 1715 TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, 1716 TRANS_TX_ERR_WITH_CLOSE_COMINIT, 1717 TRANS_TX_ERR_WITH_BREAK_TIMEOUT, 1718 TRANS_TX_ERR_WITH_BREAK_REQUEST, 1719 TRANS_TX_ERR_WITH_BREAK_RECEVIED, 1720 TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, 1721 TRANS_TX_ERR_WITH_CLOSE_NORMAL, 1722 TRANS_TX_ERR_WITH_NAK_RECEVIED, 1723 TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, 1724 TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, 1725 TRANS_TX_ERR_WITH_IPTT_CONFLICT, 1726 TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, 1727 TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, 1728 }; 1729 int index, i; 1730 1731 for (i = 0; i < ARRAY_SIZE(trans_tx_err_code_prio); i++) { 1732 index = trans_tx_err_code_prio[i] - TRANS_TX_FAIL_BASE; 1733 if (err_msk & (1 << index)) 1734 return trans_tx_err_code_prio[i]; 1735 } 1736 return -1; 1737 } 1738 1739 static int parse_trans_rx_err_code_v2_hw(u32 err_msk) 1740 { 1741 static const u8 trans_rx_err_code_prio[] = { 1742 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR, 1743 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, 1744 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, 1745 TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, 1746 TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, 1747 TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, 1748 TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, 1749 TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, 1750 TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, 1751 TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, 1752 TRANS_RX_ERR_WITH_CLOSE_COMINIT, 1753 TRANS_RX_ERR_WITH_BREAK_TIMEOUT, 1754 TRANS_RX_ERR_WITH_BREAK_REQUEST, 1755 TRANS_RX_ERR_WITH_BREAK_RECEVIED, 1756 RESERVED1, 1757 TRANS_RX_ERR_WITH_CLOSE_NORMAL, 1758 TRANS_RX_ERR_WITH_DATA_LEN0, 1759 TRANS_RX_ERR_WITH_BAD_HASH, 1760 TRANS_RX_XRDY_WLEN_ZERO_ERR, 1761 TRANS_RX_SSP_FRM_LEN_ERR, 1762 RESERVED2, 1763 RESERVED3, 1764 RESERVED4, 1765 RESERVED5, 1766 TRANS_RX_ERR_WITH_BAD_FRM_TYPE, 1767 TRANS_RX_SMP_FRM_LEN_ERR, 1768 TRANS_RX_SMP_RESP_TIMEOUT_ERR, 1769 RESERVED6, 1770 RESERVED7, 1771 RESERVED8, 1772 RESERVED9, 1773 TRANS_RX_R_ERR, 1774 }; 1775 int index, i; 1776 1777 for (i = 0; i < ARRAY_SIZE(trans_rx_err_code_prio); i++) { 1778 index = trans_rx_err_code_prio[i] - TRANS_RX_FAIL_BASE; 1779 if (err_msk & (1 << index)) 1780 return trans_rx_err_code_prio[i]; 1781 } 1782 return -1; 1783 } 1784 1785 static int parse_dma_tx_err_code_v2_hw(u32 err_msk) 1786 { 1787 static const u8 dma_tx_err_code_prio[] = { 1788 DMA_TX_UNEXP_XFER_ERR, 1789 DMA_TX_UNEXP_RETRANS_ERR, 1790 DMA_TX_XFER_LEN_OVERFLOW, 1791 DMA_TX_XFER_OFFSET_ERR, 1792 DMA_TX_RAM_ECC_ERR, 1793 DMA_TX_DIF_LEN_ALIGN_ERR, 1794 DMA_TX_DIF_CRC_ERR, 1795 DMA_TX_DIF_APP_ERR, 1796 DMA_TX_DIF_RPP_ERR, 1797 DMA_TX_DATA_SGL_OVERFLOW, 1798 DMA_TX_DIF_SGL_OVERFLOW, 1799 }; 1800 int index, i; 1801 1802 for (i = 0; i < ARRAY_SIZE(dma_tx_err_code_prio); i++) { 1803 index = dma_tx_err_code_prio[i] - DMA_TX_ERR_BASE; 1804 err_msk = err_msk & DMA_TX_ERR_MSK; 1805 if (err_msk & (1 << index)) 1806 return dma_tx_err_code_prio[i]; 1807 } 1808 return -1; 1809 } 1810 1811 static int parse_sipc_rx_err_code_v2_hw(u32 err_msk) 1812 { 1813 static const u8 sipc_rx_err_code_prio[] = { 1814 SIPC_RX_FIS_STATUS_ERR_BIT_VLD, 1815 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, 1816 SIPC_RX_FIS_STATUS_BSY_BIT_ERR, 1817 SIPC_RX_WRSETUP_LEN_ODD_ERR, 1818 SIPC_RX_WRSETUP_LEN_ZERO_ERR, 1819 SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, 1820 SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, 1821 SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, 1822 SIPC_RX_SATA_UNEXP_FIS_ERR, 1823 SIPC_RX_WRSETUP_ESTATUS_ERR, 1824 SIPC_RX_DATA_UNDERFLOW_ERR, 1825 }; 1826 int index, i; 1827 1828 for (i = 0; i < ARRAY_SIZE(sipc_rx_err_code_prio); i++) { 1829 index = sipc_rx_err_code_prio[i] - SIPC_RX_ERR_BASE; 1830 err_msk = err_msk & SIPC_RX_ERR_MSK; 1831 if (err_msk & (1 << (index + 0x10))) 1832 return sipc_rx_err_code_prio[i]; 1833 } 1834 return -1; 1835 } 1836 1837 static int parse_dma_rx_err_code_v2_hw(u32 err_msk) 1838 { 1839 static const u8 dma_rx_err_code_prio[] = { 1840 DMA_RX_UNKNOWN_FRM_ERR, 1841 DMA_RX_DATA_LEN_OVERFLOW, 1842 DMA_RX_DATA_LEN_UNDERFLOW, 1843 DMA_RX_DATA_OFFSET_ERR, 1844 RESERVED10, 1845 DMA_RX_SATA_FRAME_TYPE_ERR, 1846 DMA_RX_RESP_BUF_OVERFLOW, 1847 DMA_RX_UNEXP_RETRANS_RESP_ERR, 1848 DMA_RX_UNEXP_NORM_RESP_ERR, 1849 DMA_RX_UNEXP_RDFRAME_ERR, 1850 DMA_RX_PIO_DATA_LEN_ERR, 1851 DMA_RX_RDSETUP_STATUS_ERR, 1852 DMA_RX_RDSETUP_STATUS_DRQ_ERR, 1853 DMA_RX_RDSETUP_STATUS_BSY_ERR, 1854 DMA_RX_RDSETUP_LEN_ODD_ERR, 1855 DMA_RX_RDSETUP_LEN_ZERO_ERR, 1856 DMA_RX_RDSETUP_LEN_OVER_ERR, 1857 DMA_RX_RDSETUP_OFFSET_ERR, 1858 DMA_RX_RDSETUP_ACTIVE_ERR, 1859 DMA_RX_RDSETUP_ESTATUS_ERR, 1860 DMA_RX_RAM_ECC_ERR, 1861 DMA_RX_DIF_CRC_ERR, 1862 DMA_RX_DIF_APP_ERR, 1863 DMA_RX_DIF_RPP_ERR, 1864 DMA_RX_DATA_SGL_OVERFLOW, 1865 DMA_RX_DIF_SGL_OVERFLOW, 1866 }; 1867 int index, i; 1868 1869 for (i = 0; i < ARRAY_SIZE(dma_rx_err_code_prio); i++) { 1870 index = dma_rx_err_code_prio[i] - DMA_RX_ERR_BASE; 1871 if (err_msk & (1 << index)) 1872 return dma_rx_err_code_prio[i]; 1873 } 1874 return -1; 1875 } 1876 1877 /* by default, task resp is complete */ 1878 static void slot_err_v2_hw(struct hisi_hba *hisi_hba, 1879 struct sas_task *task, 1880 struct hisi_sas_slot *slot, 1881 int err_phase) 1882 { 1883 struct task_status_struct *ts = &task->task_status; 1884 struct hisi_sas_err_record_v2 *err_record = 1885 hisi_sas_status_buf_addr_mem(slot); 1886 u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type); 1887 u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type); 1888 u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type); 1889 u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type); 1890 u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type); 1891 int error = -1; 1892 1893 if (err_phase == 1) { 1894 /* error in TX phase, the priority of error is: DW2 > DW0 */ 1895 error = parse_dma_tx_err_code_v2_hw(dma_tx_err_type); 1896 if (error == -1) 1897 error = parse_trans_tx_err_code_v2_hw( 1898 trans_tx_fail_type); 1899 } else if (err_phase == 2) { 1900 /* error in RX phase, the priority is: DW1 > DW3 > DW2 */ 1901 error = parse_trans_rx_err_code_v2_hw( 1902 trans_rx_fail_type); 1903 if (error == -1) { 1904 error = parse_dma_rx_err_code_v2_hw( 1905 dma_rx_err_type); 1906 if (error == -1) 1907 error = parse_sipc_rx_err_code_v2_hw( 1908 sipc_rx_err_type); 1909 } 1910 } 1911 1912 switch (task->task_proto) { 1913 case SAS_PROTOCOL_SSP: 1914 { 1915 switch (error) { 1916 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: 1917 { 1918 ts->stat = SAS_OPEN_REJECT; 1919 ts->open_rej_reason = SAS_OREJ_NO_DEST; 1920 break; 1921 } 1922 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: 1923 { 1924 ts->stat = SAS_OPEN_REJECT; 1925 ts->open_rej_reason = SAS_OREJ_EPROTO; 1926 break; 1927 } 1928 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: 1929 { 1930 ts->stat = SAS_OPEN_REJECT; 1931 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 1932 break; 1933 } 1934 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: 1935 { 1936 ts->stat = SAS_OPEN_REJECT; 1937 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 1938 break; 1939 } 1940 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: 1941 { 1942 ts->stat = SAS_OPEN_REJECT; 1943 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 1944 break; 1945 } 1946 case DMA_RX_UNEXP_NORM_RESP_ERR: 1947 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: 1948 case DMA_RX_RESP_BUF_OVERFLOW: 1949 { 1950 ts->stat = SAS_OPEN_REJECT; 1951 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 1952 break; 1953 } 1954 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: 1955 { 1956 /* not sure */ 1957 ts->stat = SAS_DEV_NO_RESPONSE; 1958 break; 1959 } 1960 case DMA_RX_DATA_LEN_OVERFLOW: 1961 { 1962 ts->stat = SAS_DATA_OVERRUN; 1963 ts->residual = 0; 1964 break; 1965 } 1966 case DMA_RX_DATA_LEN_UNDERFLOW: 1967 { 1968 ts->residual = dma_rx_err_type; 1969 ts->stat = SAS_DATA_UNDERRUN; 1970 break; 1971 } 1972 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: 1973 case TRANS_TX_ERR_PHY_NOT_ENABLE: 1974 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: 1975 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: 1976 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: 1977 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: 1978 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: 1979 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: 1980 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: 1981 case TRANS_TX_ERR_WITH_BREAK_REQUEST: 1982 case TRANS_TX_ERR_WITH_BREAK_RECEVIED: 1983 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: 1984 case TRANS_TX_ERR_WITH_CLOSE_NORMAL: 1985 case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE: 1986 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: 1987 case TRANS_TX_ERR_WITH_CLOSE_COMINIT: 1988 case TRANS_TX_ERR_WITH_NAK_RECEVIED: 1989 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: 1990 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: 1991 case TRANS_TX_ERR_WITH_IPTT_CONFLICT: 1992 case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR: 1993 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: 1994 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: 1995 case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN: 1996 case TRANS_RX_ERR_WITH_BREAK_TIMEOUT: 1997 case TRANS_RX_ERR_WITH_BREAK_REQUEST: 1998 case TRANS_RX_ERR_WITH_BREAK_RECEVIED: 1999 case TRANS_RX_ERR_WITH_CLOSE_NORMAL: 2000 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: 2001 case TRANS_RX_ERR_WITH_CLOSE_COMINIT: 2002 case TRANS_TX_ERR_FRAME_TXED: 2003 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: 2004 case TRANS_RX_ERR_WITH_DATA_LEN0: 2005 case TRANS_RX_ERR_WITH_BAD_HASH: 2006 case TRANS_RX_XRDY_WLEN_ZERO_ERR: 2007 case TRANS_RX_SSP_FRM_LEN_ERR: 2008 case TRANS_RX_ERR_WITH_BAD_FRM_TYPE: 2009 case DMA_TX_DATA_SGL_OVERFLOW: 2010 case DMA_TX_UNEXP_XFER_ERR: 2011 case DMA_TX_UNEXP_RETRANS_ERR: 2012 case DMA_TX_XFER_LEN_OVERFLOW: 2013 case DMA_TX_XFER_OFFSET_ERR: 2014 case SIPC_RX_DATA_UNDERFLOW_ERR: 2015 case DMA_RX_DATA_SGL_OVERFLOW: 2016 case DMA_RX_DATA_OFFSET_ERR: 2017 case DMA_RX_RDSETUP_LEN_ODD_ERR: 2018 case DMA_RX_RDSETUP_LEN_ZERO_ERR: 2019 case DMA_RX_RDSETUP_LEN_OVER_ERR: 2020 case DMA_RX_SATA_FRAME_TYPE_ERR: 2021 case DMA_RX_UNKNOWN_FRM_ERR: 2022 { 2023 /* This will request a retry */ 2024 ts->stat = SAS_QUEUE_FULL; 2025 slot->abort = 1; 2026 break; 2027 } 2028 default: 2029 break; 2030 } 2031 } 2032 break; 2033 case SAS_PROTOCOL_SMP: 2034 ts->stat = SAM_STAT_CHECK_CONDITION; 2035 break; 2036 2037 case SAS_PROTOCOL_SATA: 2038 case SAS_PROTOCOL_STP: 2039 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2040 { 2041 switch (error) { 2042 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: 2043 { 2044 ts->stat = SAS_OPEN_REJECT; 2045 ts->open_rej_reason = SAS_OREJ_NO_DEST; 2046 break; 2047 } 2048 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: 2049 { 2050 ts->resp = SAS_TASK_UNDELIVERED; 2051 ts->stat = SAS_DEV_NO_RESPONSE; 2052 break; 2053 } 2054 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: 2055 { 2056 ts->stat = SAS_OPEN_REJECT; 2057 ts->open_rej_reason = SAS_OREJ_EPROTO; 2058 break; 2059 } 2060 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: 2061 { 2062 ts->stat = SAS_OPEN_REJECT; 2063 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 2064 break; 2065 } 2066 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: 2067 { 2068 ts->stat = SAS_OPEN_REJECT; 2069 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 2070 break; 2071 } 2072 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: 2073 { 2074 ts->stat = SAS_OPEN_REJECT; 2075 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 2076 break; 2077 } 2078 case DMA_RX_RESP_BUF_OVERFLOW: 2079 case DMA_RX_UNEXP_NORM_RESP_ERR: 2080 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: 2081 { 2082 ts->stat = SAS_OPEN_REJECT; 2083 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2084 break; 2085 } 2086 case DMA_RX_DATA_LEN_OVERFLOW: 2087 { 2088 ts->stat = SAS_DATA_OVERRUN; 2089 ts->residual = 0; 2090 break; 2091 } 2092 case DMA_RX_DATA_LEN_UNDERFLOW: 2093 { 2094 ts->residual = dma_rx_err_type; 2095 ts->stat = SAS_DATA_UNDERRUN; 2096 break; 2097 } 2098 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: 2099 case TRANS_TX_ERR_PHY_NOT_ENABLE: 2100 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: 2101 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: 2102 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: 2103 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: 2104 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: 2105 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: 2106 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: 2107 case TRANS_TX_ERR_WITH_BREAK_REQUEST: 2108 case TRANS_TX_ERR_WITH_BREAK_RECEVIED: 2109 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: 2110 case TRANS_TX_ERR_WITH_CLOSE_NORMAL: 2111 case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE: 2112 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: 2113 case TRANS_TX_ERR_WITH_CLOSE_COMINIT: 2114 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: 2115 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: 2116 case TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS: 2117 case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT: 2118 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: 2119 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: 2120 case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR: 2121 case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR: 2122 case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN: 2123 case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP: 2124 case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN: 2125 case TRANS_RX_ERR_WITH_BREAK_TIMEOUT: 2126 case TRANS_RX_ERR_WITH_BREAK_REQUEST: 2127 case TRANS_RX_ERR_WITH_BREAK_RECEVIED: 2128 case TRANS_RX_ERR_WITH_CLOSE_NORMAL: 2129 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: 2130 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: 2131 case TRANS_RX_ERR_WITH_CLOSE_COMINIT: 2132 case TRANS_RX_ERR_WITH_DATA_LEN0: 2133 case TRANS_RX_ERR_WITH_BAD_HASH: 2134 case TRANS_RX_XRDY_WLEN_ZERO_ERR: 2135 case TRANS_RX_ERR_WITH_BAD_FRM_TYPE: 2136 case DMA_TX_DATA_SGL_OVERFLOW: 2137 case DMA_TX_UNEXP_XFER_ERR: 2138 case DMA_TX_UNEXP_RETRANS_ERR: 2139 case DMA_TX_XFER_LEN_OVERFLOW: 2140 case DMA_TX_XFER_OFFSET_ERR: 2141 case SIPC_RX_FIS_STATUS_ERR_BIT_VLD: 2142 case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR: 2143 case SIPC_RX_FIS_STATUS_BSY_BIT_ERR: 2144 case SIPC_RX_WRSETUP_LEN_ODD_ERR: 2145 case SIPC_RX_WRSETUP_LEN_ZERO_ERR: 2146 case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR: 2147 case SIPC_RX_SATA_UNEXP_FIS_ERR: 2148 case DMA_RX_DATA_SGL_OVERFLOW: 2149 case DMA_RX_DATA_OFFSET_ERR: 2150 case DMA_RX_SATA_FRAME_TYPE_ERR: 2151 case DMA_RX_UNEXP_RDFRAME_ERR: 2152 case DMA_RX_PIO_DATA_LEN_ERR: 2153 case DMA_RX_RDSETUP_STATUS_ERR: 2154 case DMA_RX_RDSETUP_STATUS_DRQ_ERR: 2155 case DMA_RX_RDSETUP_STATUS_BSY_ERR: 2156 case DMA_RX_RDSETUP_LEN_ODD_ERR: 2157 case DMA_RX_RDSETUP_LEN_ZERO_ERR: 2158 case DMA_RX_RDSETUP_LEN_OVER_ERR: 2159 case DMA_RX_RDSETUP_OFFSET_ERR: 2160 case DMA_RX_RDSETUP_ACTIVE_ERR: 2161 case DMA_RX_RDSETUP_ESTATUS_ERR: 2162 case DMA_RX_UNKNOWN_FRM_ERR: 2163 case TRANS_RX_SSP_FRM_LEN_ERR: 2164 case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY: 2165 { 2166 slot->abort = 1; 2167 ts->stat = SAS_PHY_DOWN; 2168 break; 2169 } 2170 default: 2171 { 2172 ts->stat = SAS_PROTO_RESPONSE; 2173 break; 2174 } 2175 } 2176 hisi_sas_sata_done(task, slot); 2177 } 2178 break; 2179 default: 2180 break; 2181 } 2182 } 2183 2184 static int 2185 slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) 2186 { 2187 struct sas_task *task = slot->task; 2188 struct hisi_sas_device *sas_dev; 2189 struct device *dev = hisi_hba->dev; 2190 struct task_status_struct *ts; 2191 struct domain_device *device; 2192 enum exec_status sts; 2193 struct hisi_sas_complete_v2_hdr *complete_queue = 2194 hisi_hba->complete_hdr[slot->cmplt_queue]; 2195 struct hisi_sas_complete_v2_hdr *complete_hdr = 2196 &complete_queue[slot->cmplt_queue_slot]; 2197 unsigned long flags; 2198 int aborted; 2199 2200 if (unlikely(!task || !task->lldd_task || !task->dev)) 2201 return -EINVAL; 2202 2203 ts = &task->task_status; 2204 device = task->dev; 2205 sas_dev = device->lldd_dev; 2206 2207 spin_lock_irqsave(&task->task_state_lock, flags); 2208 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; 2209 task->task_state_flags &= 2210 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 2211 spin_unlock_irqrestore(&task->task_state_lock, flags); 2212 2213 memset(ts, 0, sizeof(*ts)); 2214 ts->resp = SAS_TASK_COMPLETE; 2215 2216 if (unlikely(aborted)) { 2217 ts->stat = SAS_ABORTED_TASK; 2218 hisi_sas_slot_task_free(hisi_hba, task, slot); 2219 return -1; 2220 } 2221 2222 if (unlikely(!sas_dev)) { 2223 dev_dbg(dev, "slot complete: port has no device\n"); 2224 ts->stat = SAS_PHY_DOWN; 2225 goto out; 2226 } 2227 2228 /* Use SAS+TMF status codes */ 2229 switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK) 2230 >> CMPLT_HDR_ABORT_STAT_OFF) { 2231 case STAT_IO_ABORTED: 2232 /* this io has been aborted by abort command */ 2233 ts->stat = SAS_ABORTED_TASK; 2234 goto out; 2235 case STAT_IO_COMPLETE: 2236 /* internal abort command complete */ 2237 ts->stat = TMF_RESP_FUNC_SUCC; 2238 del_timer(&slot->internal_abort_timer); 2239 goto out; 2240 case STAT_IO_NO_DEVICE: 2241 ts->stat = TMF_RESP_FUNC_COMPLETE; 2242 del_timer(&slot->internal_abort_timer); 2243 goto out; 2244 case STAT_IO_NOT_VALID: 2245 /* abort single io, controller don't find 2246 * the io need to abort 2247 */ 2248 ts->stat = TMF_RESP_FUNC_FAILED; 2249 del_timer(&slot->internal_abort_timer); 2250 goto out; 2251 default: 2252 break; 2253 } 2254 2255 if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) && 2256 (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { 2257 u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK) 2258 >> CMPLT_HDR_ERR_PHASE_OFF; 2259 2260 /* Analyse error happens on which phase TX or RX */ 2261 if (ERR_ON_TX_PHASE(err_phase)) 2262 slot_err_v2_hw(hisi_hba, task, slot, 1); 2263 else if (ERR_ON_RX_PHASE(err_phase)) 2264 slot_err_v2_hw(hisi_hba, task, slot, 2); 2265 2266 if (unlikely(slot->abort)) 2267 return ts->stat; 2268 goto out; 2269 } 2270 2271 switch (task->task_proto) { 2272 case SAS_PROTOCOL_SSP: 2273 { 2274 struct hisi_sas_status_buffer *status_buffer = 2275 hisi_sas_status_buf_addr_mem(slot); 2276 struct ssp_response_iu *iu = (struct ssp_response_iu *) 2277 &status_buffer->iu[0]; 2278 2279 sas_ssp_task_response(dev, task, iu); 2280 break; 2281 } 2282 case SAS_PROTOCOL_SMP: 2283 { 2284 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 2285 void *to; 2286 2287 ts->stat = SAM_STAT_GOOD; 2288 to = kmap_atomic(sg_page(sg_resp)); 2289 2290 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1, 2291 DMA_FROM_DEVICE); 2292 dma_unmap_sg(dev, &task->smp_task.smp_req, 1, 2293 DMA_TO_DEVICE); 2294 memcpy(to + sg_resp->offset, 2295 hisi_sas_status_buf_addr_mem(slot) + 2296 sizeof(struct hisi_sas_err_record), 2297 sg_dma_len(sg_resp)); 2298 kunmap_atomic(to); 2299 break; 2300 } 2301 case SAS_PROTOCOL_SATA: 2302 case SAS_PROTOCOL_STP: 2303 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2304 { 2305 ts->stat = SAM_STAT_GOOD; 2306 hisi_sas_sata_done(task, slot); 2307 break; 2308 } 2309 default: 2310 ts->stat = SAM_STAT_CHECK_CONDITION; 2311 break; 2312 } 2313 2314 if (!slot->port->port_attached) { 2315 dev_err(dev, "slot complete: port %d has removed\n", 2316 slot->port->sas_port.id); 2317 ts->stat = SAS_PHY_DOWN; 2318 } 2319 2320 out: 2321 spin_lock_irqsave(&task->task_state_lock, flags); 2322 task->task_state_flags |= SAS_TASK_STATE_DONE; 2323 spin_unlock_irqrestore(&task->task_state_lock, flags); 2324 spin_lock_irqsave(&hisi_hba->lock, flags); 2325 hisi_sas_slot_task_free(hisi_hba, task, slot); 2326 spin_unlock_irqrestore(&hisi_hba->lock, flags); 2327 sts = ts->stat; 2328 2329 if (task->task_done) 2330 task->task_done(task); 2331 2332 return sts; 2333 } 2334 2335 static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, 2336 struct hisi_sas_slot *slot) 2337 { 2338 struct sas_task *task = slot->task; 2339 struct domain_device *device = task->dev; 2340 struct domain_device *parent_dev = device->parent; 2341 struct hisi_sas_device *sas_dev = device->lldd_dev; 2342 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 2343 struct asd_sas_port *sas_port = device->port; 2344 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 2345 u8 *buf_cmd; 2346 int has_data = 0, rc = 0, hdr_tag = 0; 2347 u32 dw1 = 0, dw2 = 0; 2348 2349 /* create header */ 2350 /* dw0 */ 2351 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); 2352 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 2353 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); 2354 else 2355 hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF); 2356 2357 /* dw1 */ 2358 switch (task->data_dir) { 2359 case DMA_TO_DEVICE: 2360 has_data = 1; 2361 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 2362 break; 2363 case DMA_FROM_DEVICE: 2364 has_data = 1; 2365 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 2366 break; 2367 default: 2368 dw1 &= ~CMD_HDR_DIR_MSK; 2369 } 2370 2371 if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && 2372 (task->ata_task.fis.control & ATA_SRST)) 2373 dw1 |= 1 << CMD_HDR_RESET_OFF; 2374 2375 dw1 |= (hisi_sas_get_ata_protocol( 2376 task->ata_task.fis.command, task->data_dir)) 2377 << CMD_HDR_FRAME_TYPE_OFF; 2378 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 2379 hdr->dw1 = cpu_to_le32(dw1); 2380 2381 /* dw2 */ 2382 if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) { 2383 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 2384 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; 2385 } 2386 2387 dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | 2388 2 << CMD_HDR_SG_MOD_OFF; 2389 hdr->dw2 = cpu_to_le32(dw2); 2390 2391 /* dw3 */ 2392 hdr->transfer_tags = cpu_to_le32(slot->idx); 2393 2394 if (has_data) { 2395 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, 2396 slot->n_elem); 2397 if (rc) 2398 return rc; 2399 } 2400 2401 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 2402 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 2403 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 2404 2405 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot); 2406 2407 if (likely(!task->ata_task.device_control_reg_update)) 2408 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 2409 /* fill in command FIS */ 2410 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 2411 2412 return 0; 2413 } 2414 2415 static void hisi_sas_internal_abort_quirk_timeout(unsigned long data) 2416 { 2417 struct hisi_sas_slot *slot = (struct hisi_sas_slot *)data; 2418 struct hisi_sas_port *port = slot->port; 2419 struct asd_sas_port *asd_sas_port; 2420 struct asd_sas_phy *sas_phy; 2421 2422 if (!port) 2423 return; 2424 2425 asd_sas_port = &port->sas_port; 2426 2427 /* Kick the hardware - send break command */ 2428 list_for_each_entry(sas_phy, &asd_sas_port->phy_list, port_phy_el) { 2429 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 2430 struct hisi_hba *hisi_hba = phy->hisi_hba; 2431 int phy_no = sas_phy->id; 2432 u32 link_dfx2; 2433 2434 link_dfx2 = hisi_sas_phy_read32(hisi_hba, phy_no, LINK_DFX2); 2435 if ((link_dfx2 == LINK_DFX2_RCVR_HOLD_STS_MSK) || 2436 (link_dfx2 & LINK_DFX2_SEND_HOLD_STS_MSK)) { 2437 u32 txid_auto; 2438 2439 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, 2440 TXID_AUTO); 2441 txid_auto |= TXID_AUTO_CTB_MSK; 2442 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 2443 txid_auto); 2444 return; 2445 } 2446 } 2447 } 2448 2449 static int prep_abort_v2_hw(struct hisi_hba *hisi_hba, 2450 struct hisi_sas_slot *slot, 2451 int device_id, int abort_flag, int tag_to_abort) 2452 { 2453 struct sas_task *task = slot->task; 2454 struct domain_device *dev = task->dev; 2455 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 2456 struct hisi_sas_port *port = slot->port; 2457 struct timer_list *timer = &slot->internal_abort_timer; 2458 2459 /* setup the quirk timer */ 2460 setup_timer(timer, hisi_sas_internal_abort_quirk_timeout, 2461 (unsigned long)slot); 2462 /* Set the timeout to 10ms less than internal abort timeout */ 2463 mod_timer(timer, jiffies + msecs_to_jiffies(100)); 2464 2465 /* dw0 */ 2466 hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/ 2467 (port->id << CMD_HDR_PORT_OFF) | 2468 ((dev_is_sata(dev) ? 1:0) << 2469 CMD_HDR_ABORT_DEVICE_TYPE_OFF) | 2470 (abort_flag << CMD_HDR_ABORT_FLAG_OFF)); 2471 2472 /* dw1 */ 2473 hdr->dw1 = cpu_to_le32(device_id << CMD_HDR_DEV_ID_OFF); 2474 2475 /* dw7 */ 2476 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF); 2477 hdr->transfer_tags = cpu_to_le32(slot->idx); 2478 2479 return 0; 2480 } 2481 2482 static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 2483 { 2484 int i, res = IRQ_HANDLED; 2485 u32 port_id, link_rate, hard_phy_linkrate; 2486 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2487 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2488 struct device *dev = hisi_hba->dev; 2489 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; 2490 struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; 2491 2492 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); 2493 2494 if (is_sata_phy_v2_hw(hisi_hba, phy_no)) 2495 goto end; 2496 2497 if (phy_no == 8) { 2498 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 2499 2500 port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> 2501 PORT_STATE_PHY8_PORT_NUM_OFF; 2502 link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> 2503 PORT_STATE_PHY8_CONN_RATE_OFF; 2504 } else { 2505 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 2506 port_id = (port_id >> (4 * phy_no)) & 0xf; 2507 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 2508 link_rate = (link_rate >> (phy_no * 4)) & 0xf; 2509 } 2510 2511 if (port_id == 0xf) { 2512 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); 2513 res = IRQ_NONE; 2514 goto end; 2515 } 2516 2517 for (i = 0; i < 6; i++) { 2518 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, 2519 RX_IDAF_DWORD0 + (i * 4)); 2520 frame_rcvd[i] = __swab32(idaf); 2521 } 2522 2523 sas_phy->linkrate = link_rate; 2524 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, 2525 HARD_PHY_LINKRATE); 2526 phy->maximum_linkrate = hard_phy_linkrate & 0xf; 2527 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; 2528 2529 sas_phy->oob_mode = SAS_OOB_MODE; 2530 memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE); 2531 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); 2532 phy->port_id = port_id; 2533 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 2534 phy->phy_type |= PORT_TYPE_SAS; 2535 phy->phy_attached = 1; 2536 phy->identify.device_type = id->dev_type; 2537 phy->frame_rcvd_size = sizeof(struct sas_identify_frame); 2538 if (phy->identify.device_type == SAS_END_DEVICE) 2539 phy->identify.target_port_protocols = 2540 SAS_PROTOCOL_SSP; 2541 else if (phy->identify.device_type != SAS_PHY_UNUSED) { 2542 phy->identify.target_port_protocols = 2543 SAS_PROTOCOL_SMP; 2544 if (!timer_pending(&hisi_hba->timer)) 2545 set_link_timer_quirk(hisi_hba); 2546 } 2547 queue_work(hisi_hba->wq, &phy->phyup_ws); 2548 2549 end: 2550 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 2551 CHL_INT0_SL_PHY_ENABLE_MSK); 2552 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); 2553 2554 return res; 2555 } 2556 2557 static bool check_any_wideports_v2_hw(struct hisi_hba *hisi_hba) 2558 { 2559 u32 port_state; 2560 2561 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 2562 if (port_state & 0x1ff) 2563 return true; 2564 2565 return false; 2566 } 2567 2568 static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 2569 { 2570 u32 phy_state, sl_ctrl, txid_auto; 2571 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2572 struct hisi_sas_port *port = phy->port; 2573 2574 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 2575 2576 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 2577 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); 2578 2579 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 2580 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, 2581 sl_ctrl & ~SL_CONTROL_CTA_MSK); 2582 if (port && !get_wideport_bitmap_v2_hw(hisi_hba, port->id)) 2583 if (!check_any_wideports_v2_hw(hisi_hba) && 2584 timer_pending(&hisi_hba->timer)) 2585 del_timer(&hisi_hba->timer); 2586 2587 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 2588 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 2589 txid_auto | TXID_AUTO_CT3_MSK); 2590 2591 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); 2592 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); 2593 2594 return IRQ_HANDLED; 2595 } 2596 2597 static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) 2598 { 2599 struct hisi_hba *hisi_hba = p; 2600 u32 irq_msk; 2601 int phy_no = 0; 2602 2603 irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) 2604 >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff; 2605 while (irq_msk) { 2606 if (irq_msk & 1) { 2607 u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, 2608 CHL_INT0); 2609 2610 switch (reg_value & (CHL_INT0_NOT_RDY_MSK | 2611 CHL_INT0_SL_PHY_ENABLE_MSK)) { 2612 2613 case CHL_INT0_SL_PHY_ENABLE_MSK: 2614 /* phy up */ 2615 if (phy_up_v2_hw(phy_no, hisi_hba) == 2616 IRQ_NONE) 2617 return IRQ_NONE; 2618 break; 2619 2620 case CHL_INT0_NOT_RDY_MSK: 2621 /* phy down */ 2622 if (phy_down_v2_hw(phy_no, hisi_hba) == 2623 IRQ_NONE) 2624 return IRQ_NONE; 2625 break; 2626 2627 case (CHL_INT0_NOT_RDY_MSK | 2628 CHL_INT0_SL_PHY_ENABLE_MSK): 2629 reg_value = hisi_sas_read32(hisi_hba, 2630 PHY_STATE); 2631 if (reg_value & BIT(phy_no)) { 2632 /* phy up */ 2633 if (phy_up_v2_hw(phy_no, hisi_hba) == 2634 IRQ_NONE) 2635 return IRQ_NONE; 2636 } else { 2637 /* phy down */ 2638 if (phy_down_v2_hw(phy_no, hisi_hba) == 2639 IRQ_NONE) 2640 return IRQ_NONE; 2641 } 2642 break; 2643 2644 default: 2645 break; 2646 } 2647 2648 } 2649 irq_msk >>= 1; 2650 phy_no++; 2651 } 2652 2653 return IRQ_HANDLED; 2654 } 2655 2656 static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 2657 { 2658 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2659 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2660 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 2661 u32 bcast_status; 2662 2663 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); 2664 bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); 2665 if (bcast_status & RX_BCAST_CHG_MSK) 2666 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); 2667 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 2668 CHL_INT0_SL_RX_BCST_ACK_MSK); 2669 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); 2670 } 2671 2672 static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) 2673 { 2674 struct hisi_hba *hisi_hba = p; 2675 struct device *dev = hisi_hba->dev; 2676 u32 ent_msk, ent_tmp, irq_msk; 2677 int phy_no = 0; 2678 2679 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 2680 ent_tmp = ent_msk; 2681 ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK; 2682 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk); 2683 2684 irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >> 2685 HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff; 2686 2687 while (irq_msk) { 2688 if (irq_msk & (1 << phy_no)) { 2689 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, 2690 CHL_INT0); 2691 u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, 2692 CHL_INT1); 2693 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, 2694 CHL_INT2); 2695 2696 if (irq_value1) { 2697 if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK | 2698 CHL_INT1_DMAC_TX_ECC_ERR_MSK)) 2699 panic("%s: DMAC RX/TX ecc bad error!\ 2700 (0x%x)", 2701 dev_name(dev), irq_value1); 2702 2703 hisi_sas_phy_write32(hisi_hba, phy_no, 2704 CHL_INT1, irq_value1); 2705 } 2706 2707 if (irq_value2) 2708 hisi_sas_phy_write32(hisi_hba, phy_no, 2709 CHL_INT2, irq_value2); 2710 2711 2712 if (irq_value0) { 2713 if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK) 2714 phy_bcast_v2_hw(phy_no, hisi_hba); 2715 2716 hisi_sas_phy_write32(hisi_hba, phy_no, 2717 CHL_INT0, irq_value0 2718 & (~CHL_INT0_HOTPLUG_TOUT_MSK) 2719 & (~CHL_INT0_SL_PHY_ENABLE_MSK) 2720 & (~CHL_INT0_NOT_RDY_MSK)); 2721 } 2722 } 2723 irq_msk &= ~(1 << phy_no); 2724 phy_no++; 2725 } 2726 2727 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp); 2728 2729 return IRQ_HANDLED; 2730 } 2731 2732 static void 2733 one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value) 2734 { 2735 struct device *dev = hisi_hba->dev; 2736 u32 reg_val; 2737 2738 if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) { 2739 reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR); 2740 dev_warn(dev, "hgc_dqe_acc1b_intr found: \ 2741 Ram address is 0x%08X\n", 2742 (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >> 2743 HGC_DQE_ECC_1B_ADDR_OFF); 2744 } 2745 2746 if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) { 2747 reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR); 2748 dev_warn(dev, "hgc_iost_acc1b_intr found: \ 2749 Ram address is 0x%08X\n", 2750 (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >> 2751 HGC_IOST_ECC_1B_ADDR_OFF); 2752 } 2753 2754 if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) { 2755 reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR); 2756 dev_warn(dev, "hgc_itct_acc1b_intr found: \ 2757 Ram address is 0x%08X\n", 2758 (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >> 2759 HGC_ITCT_ECC_1B_ADDR_OFF); 2760 } 2761 2762 if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) { 2763 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); 2764 dev_warn(dev, "hgc_iostl_acc1b_intr found: \ 2765 memory address is 0x%08X\n", 2766 (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >> 2767 HGC_LM_DFX_STATUS2_IOSTLIST_OFF); 2768 } 2769 2770 if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) { 2771 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); 2772 dev_warn(dev, "hgc_itctl_acc1b_intr found: \ 2773 memory address is 0x%08X\n", 2774 (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >> 2775 HGC_LM_DFX_STATUS2_ITCTLIST_OFF); 2776 } 2777 2778 if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) { 2779 reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR); 2780 dev_warn(dev, "hgc_cqe_acc1b_intr found: \ 2781 Ram address is 0x%08X\n", 2782 (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >> 2783 HGC_CQE_ECC_1B_ADDR_OFF); 2784 } 2785 2786 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) { 2787 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2788 dev_warn(dev, "rxm_mem0_acc1b_intr found: \ 2789 memory address is 0x%08X\n", 2790 (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >> 2791 HGC_RXM_DFX_STATUS14_MEM0_OFF); 2792 } 2793 2794 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) { 2795 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2796 dev_warn(dev, "rxm_mem1_acc1b_intr found: \ 2797 memory address is 0x%08X\n", 2798 (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >> 2799 HGC_RXM_DFX_STATUS14_MEM1_OFF); 2800 } 2801 2802 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) { 2803 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2804 dev_warn(dev, "rxm_mem2_acc1b_intr found: \ 2805 memory address is 0x%08X\n", 2806 (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >> 2807 HGC_RXM_DFX_STATUS14_MEM2_OFF); 2808 } 2809 2810 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) { 2811 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15); 2812 dev_warn(dev, "rxm_mem3_acc1b_intr found: \ 2813 memory address is 0x%08X\n", 2814 (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >> 2815 HGC_RXM_DFX_STATUS15_MEM3_OFF); 2816 } 2817 2818 } 2819 2820 static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, 2821 u32 irq_value) 2822 { 2823 u32 reg_val; 2824 struct device *dev = hisi_hba->dev; 2825 2826 if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) { 2827 reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR); 2828 dev_warn(dev, "hgc_dqe_accbad_intr (0x%x) found: \ 2829 Ram address is 0x%08X\n", 2830 irq_value, 2831 (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >> 2832 HGC_DQE_ECC_MB_ADDR_OFF); 2833 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2834 } 2835 2836 if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) { 2837 reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR); 2838 dev_warn(dev, "hgc_iost_accbad_intr (0x%x) found: \ 2839 Ram address is 0x%08X\n", 2840 irq_value, 2841 (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >> 2842 HGC_IOST_ECC_MB_ADDR_OFF); 2843 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2844 } 2845 2846 if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) { 2847 reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR); 2848 dev_warn(dev,"hgc_itct_accbad_intr (0x%x) found: \ 2849 Ram address is 0x%08X\n", 2850 irq_value, 2851 (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >> 2852 HGC_ITCT_ECC_MB_ADDR_OFF); 2853 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2854 } 2855 2856 if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) { 2857 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); 2858 dev_warn(dev, "hgc_iostl_accbad_intr (0x%x) found: \ 2859 memory address is 0x%08X\n", 2860 irq_value, 2861 (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >> 2862 HGC_LM_DFX_STATUS2_IOSTLIST_OFF); 2863 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2864 } 2865 2866 if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) { 2867 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); 2868 dev_warn(dev, "hgc_itctl_accbad_intr (0x%x) found: \ 2869 memory address is 0x%08X\n", 2870 irq_value, 2871 (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >> 2872 HGC_LM_DFX_STATUS2_ITCTLIST_OFF); 2873 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2874 } 2875 2876 if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) { 2877 reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR); 2878 dev_warn(dev, "hgc_cqe_accbad_intr (0x%x) found: \ 2879 Ram address is 0x%08X\n", 2880 irq_value, 2881 (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >> 2882 HGC_CQE_ECC_MB_ADDR_OFF); 2883 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2884 } 2885 2886 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) { 2887 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2888 dev_warn(dev, "rxm_mem0_accbad_intr (0x%x) found: \ 2889 memory address is 0x%08X\n", 2890 irq_value, 2891 (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >> 2892 HGC_RXM_DFX_STATUS14_MEM0_OFF); 2893 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2894 } 2895 2896 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) { 2897 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2898 dev_warn(dev, "rxm_mem1_accbad_intr (0x%x) found: \ 2899 memory address is 0x%08X\n", 2900 irq_value, 2901 (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >> 2902 HGC_RXM_DFX_STATUS14_MEM1_OFF); 2903 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2904 } 2905 2906 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) { 2907 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2908 dev_warn(dev, "rxm_mem2_accbad_intr (0x%x) found: \ 2909 memory address is 0x%08X\n", 2910 irq_value, 2911 (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >> 2912 HGC_RXM_DFX_STATUS14_MEM2_OFF); 2913 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2914 } 2915 2916 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) { 2917 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15); 2918 dev_warn(dev, "rxm_mem3_accbad_intr (0x%x) found: \ 2919 memory address is 0x%08X\n", 2920 irq_value, 2921 (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >> 2922 HGC_RXM_DFX_STATUS15_MEM3_OFF); 2923 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2924 } 2925 2926 return; 2927 } 2928 2929 static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p) 2930 { 2931 struct hisi_hba *hisi_hba = p; 2932 u32 irq_value, irq_msk; 2933 2934 irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); 2935 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff); 2936 2937 irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); 2938 if (irq_value) { 2939 one_bit_ecc_error_process_v2_hw(hisi_hba, irq_value); 2940 multi_bit_ecc_error_process_v2_hw(hisi_hba, irq_value); 2941 } 2942 2943 hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value); 2944 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk); 2945 2946 return IRQ_HANDLED; 2947 } 2948 2949 #define AXI_ERR_NR 8 2950 static const char axi_err_info[AXI_ERR_NR][32] = { 2951 "IOST_AXI_W_ERR", 2952 "IOST_AXI_R_ERR", 2953 "ITCT_AXI_W_ERR", 2954 "ITCT_AXI_R_ERR", 2955 "SATA_AXI_W_ERR", 2956 "SATA_AXI_R_ERR", 2957 "DQE_AXI_R_ERR", 2958 "CQE_AXI_W_ERR" 2959 }; 2960 2961 #define FIFO_ERR_NR 5 2962 static const char fifo_err_info[FIFO_ERR_NR][32] = { 2963 "CQE_WINFO_FIFO", 2964 "CQE_MSG_FIFIO", 2965 "GETDQE_FIFO", 2966 "CMDP_FIFO", 2967 "AWTCTRL_FIFO" 2968 }; 2969 2970 static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) 2971 { 2972 struct hisi_hba *hisi_hba = p; 2973 u32 irq_value, irq_msk, err_value; 2974 struct device *dev = hisi_hba->dev; 2975 2976 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 2977 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe); 2978 2979 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 2980 if (irq_value) { 2981 if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) { 2982 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 2983 1 << ENT_INT_SRC3_WP_DEPTH_OFF); 2984 dev_warn(dev, "write pointer and depth error (0x%x) \ 2985 found!\n", 2986 irq_value); 2987 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2988 } 2989 2990 if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) { 2991 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 2992 1 << 2993 ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF); 2994 dev_warn(dev, "iptt no match slot error (0x%x) found!\n", 2995 irq_value); 2996 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2997 } 2998 2999 if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF)) { 3000 dev_warn(dev, "read pointer and depth error (0x%x) \ 3001 found!\n", 3002 irq_value); 3003 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3004 } 3005 3006 if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) { 3007 int i; 3008 3009 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 3010 1 << ENT_INT_SRC3_AXI_OFF); 3011 err_value = hisi_sas_read32(hisi_hba, 3012 HGC_AXI_FIFO_ERR_INFO); 3013 3014 for (i = 0; i < AXI_ERR_NR; i++) { 3015 if (err_value & BIT(i)) { 3016 dev_warn(dev, "%s (0x%x) found!\n", 3017 axi_err_info[i], irq_value); 3018 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3019 } 3020 } 3021 } 3022 3023 if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) { 3024 int i; 3025 3026 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 3027 1 << ENT_INT_SRC3_FIFO_OFF); 3028 err_value = hisi_sas_read32(hisi_hba, 3029 HGC_AXI_FIFO_ERR_INFO); 3030 3031 for (i = 0; i < FIFO_ERR_NR; i++) { 3032 if (err_value & BIT(AXI_ERR_NR + i)) { 3033 dev_warn(dev, "%s (0x%x) found!\n", 3034 fifo_err_info[i], irq_value); 3035 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3036 } 3037 } 3038 3039 } 3040 3041 if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) { 3042 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 3043 1 << ENT_INT_SRC3_LM_OFF); 3044 dev_warn(dev, "LM add/fetch list error (0x%x) found!\n", 3045 irq_value); 3046 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3047 } 3048 3049 if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) { 3050 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 3051 1 << ENT_INT_SRC3_ABT_OFF); 3052 dev_warn(dev, "SAS_HGC_ABT fetch LM list error (0x%x) found!\n", 3053 irq_value); 3054 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3055 } 3056 } 3057 3058 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); 3059 3060 return IRQ_HANDLED; 3061 } 3062 3063 static void cq_tasklet_v2_hw(unsigned long val) 3064 { 3065 struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; 3066 struct hisi_hba *hisi_hba = cq->hisi_hba; 3067 struct hisi_sas_slot *slot; 3068 struct hisi_sas_itct *itct; 3069 struct hisi_sas_complete_v2_hdr *complete_queue; 3070 u32 rd_point = cq->rd_point, wr_point, dev_id; 3071 int queue = cq->id; 3072 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 3073 3074 if (unlikely(hisi_hba->reject_stp_links_msk)) 3075 phys_try_accept_stp_links_v2_hw(hisi_hba); 3076 3077 complete_queue = hisi_hba->complete_hdr[queue]; 3078 3079 spin_lock(&dq->lock); 3080 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + 3081 (0x14 * queue)); 3082 3083 while (rd_point != wr_point) { 3084 struct hisi_sas_complete_v2_hdr *complete_hdr; 3085 int iptt; 3086 3087 complete_hdr = &complete_queue[rd_point]; 3088 3089 /* Check for NCQ completion */ 3090 if (complete_hdr->act) { 3091 u32 act_tmp = complete_hdr->act; 3092 int ncq_tag_count = ffs(act_tmp); 3093 3094 dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >> 3095 CMPLT_HDR_DEV_ID_OFF; 3096 itct = &hisi_hba->itct[dev_id]; 3097 3098 /* The NCQ tags are held in the itct header */ 3099 while (ncq_tag_count) { 3100 __le64 *ncq_tag = &itct->qw4_15[0]; 3101 3102 ncq_tag_count -= 1; 3103 iptt = (ncq_tag[ncq_tag_count / 5] 3104 >> (ncq_tag_count % 5) * 12) & 0xfff; 3105 3106 slot = &hisi_hba->slot_info[iptt]; 3107 slot->cmplt_queue_slot = rd_point; 3108 slot->cmplt_queue = queue; 3109 slot_complete_v2_hw(hisi_hba, slot); 3110 3111 act_tmp &= ~(1 << ncq_tag_count); 3112 ncq_tag_count = ffs(act_tmp); 3113 } 3114 } else { 3115 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; 3116 slot = &hisi_hba->slot_info[iptt]; 3117 slot->cmplt_queue_slot = rd_point; 3118 slot->cmplt_queue = queue; 3119 slot_complete_v2_hw(hisi_hba, slot); 3120 } 3121 3122 if (++rd_point >= HISI_SAS_QUEUE_SLOTS) 3123 rd_point = 0; 3124 } 3125 3126 /* update rd_point */ 3127 cq->rd_point = rd_point; 3128 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 3129 spin_unlock(&dq->lock); 3130 } 3131 3132 static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) 3133 { 3134 struct hisi_sas_cq *cq = p; 3135 struct hisi_hba *hisi_hba = cq->hisi_hba; 3136 int queue = cq->id; 3137 3138 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); 3139 3140 tasklet_schedule(&cq->tasklet); 3141 3142 return IRQ_HANDLED; 3143 } 3144 3145 static irqreturn_t sata_int_v2_hw(int irq_no, void *p) 3146 { 3147 struct hisi_sas_phy *phy = p; 3148 struct hisi_hba *hisi_hba = phy->hisi_hba; 3149 struct asd_sas_phy *sas_phy = &phy->sas_phy; 3150 struct device *dev = hisi_hba->dev; 3151 struct hisi_sas_initial_fis *initial_fis; 3152 struct dev_to_host_fis *fis; 3153 u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; 3154 irqreturn_t res = IRQ_HANDLED; 3155 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 3156 int phy_no, offset; 3157 3158 phy_no = sas_phy->id; 3159 initial_fis = &hisi_hba->initial_fis[phy_no]; 3160 fis = &initial_fis->fis; 3161 3162 offset = 4 * (phy_no / 4); 3163 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1 + offset); 3164 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, 3165 ent_msk | 1 << ((phy_no % 4) * 8)); 3166 3167 ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1 + offset); 3168 ent_tmp = ent_int & (1 << (ENT_INT_SRC1_D2H_FIS_CH1_OFF * 3169 (phy_no % 4))); 3170 ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4); 3171 if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) { 3172 dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no); 3173 res = IRQ_NONE; 3174 goto end; 3175 } 3176 3177 /* check ERR bit of Status Register */ 3178 if (fis->status & ATA_ERR) { 3179 dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no, 3180 fis->status); 3181 disable_phy_v2_hw(hisi_hba, phy_no); 3182 enable_phy_v2_hw(hisi_hba, phy_no); 3183 res = IRQ_NONE; 3184 goto end; 3185 } 3186 3187 if (unlikely(phy_no == 8)) { 3188 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 3189 3190 port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> 3191 PORT_STATE_PHY8_PORT_NUM_OFF; 3192 link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> 3193 PORT_STATE_PHY8_CONN_RATE_OFF; 3194 } else { 3195 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 3196 port_id = (port_id >> (4 * phy_no)) & 0xf; 3197 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 3198 link_rate = (link_rate >> (phy_no * 4)) & 0xf; 3199 } 3200 3201 if (port_id == 0xf) { 3202 dev_err(dev, "sata int: phy%d invalid portid\n", phy_no); 3203 res = IRQ_NONE; 3204 goto end; 3205 } 3206 3207 sas_phy->linkrate = link_rate; 3208 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, 3209 HARD_PHY_LINKRATE); 3210 phy->maximum_linkrate = hard_phy_linkrate & 0xf; 3211 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; 3212 3213 sas_phy->oob_mode = SATA_OOB_MODE; 3214 /* Make up some unique SAS address */ 3215 attached_sas_addr[0] = 0x50; 3216 attached_sas_addr[7] = phy_no; 3217 memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE); 3218 memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis)); 3219 dev_info(dev, "sata int phyup: phy%d link_rate=%d\n", phy_no, link_rate); 3220 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 3221 phy->port_id = port_id; 3222 phy->phy_type |= PORT_TYPE_SATA; 3223 phy->phy_attached = 1; 3224 phy->identify.device_type = SAS_SATA_DEV; 3225 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 3226 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 3227 queue_work(hisi_hba->wq, &phy->phyup_ws); 3228 3229 end: 3230 hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp); 3231 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk); 3232 3233 return res; 3234 } 3235 3236 static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = { 3237 int_phy_updown_v2_hw, 3238 int_chnl_int_v2_hw, 3239 }; 3240 3241 static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = { 3242 fatal_ecc_int_v2_hw, 3243 fatal_axi_int_v2_hw 3244 }; 3245 3246 /** 3247 * There is a limitation in the hip06 chipset that we need 3248 * to map in all mbigen interrupts, even if they are not used. 3249 */ 3250 static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) 3251 { 3252 struct platform_device *pdev = hisi_hba->platform_dev; 3253 struct device *dev = &pdev->dev; 3254 int i, irq, rc, irq_map[128]; 3255 3256 3257 for (i = 0; i < 128; i++) 3258 irq_map[i] = platform_get_irq(pdev, i); 3259 3260 for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) { 3261 int idx = i; 3262 3263 irq = irq_map[idx + 1]; /* Phy up/down is irq1 */ 3264 if (!irq) { 3265 dev_err(dev, "irq init: fail map phy interrupt %d\n", 3266 idx); 3267 return -ENOENT; 3268 } 3269 3270 rc = devm_request_irq(dev, irq, phy_interrupts[i], 0, 3271 DRV_NAME " phy", hisi_hba); 3272 if (rc) { 3273 dev_err(dev, "irq init: could not request " 3274 "phy interrupt %d, rc=%d\n", 3275 irq, rc); 3276 return -ENOENT; 3277 } 3278 } 3279 3280 for (i = 0; i < hisi_hba->n_phy; i++) { 3281 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 3282 int idx = i + 72; /* First SATA interrupt is irq72 */ 3283 3284 irq = irq_map[idx]; 3285 if (!irq) { 3286 dev_err(dev, "irq init: fail map phy interrupt %d\n", 3287 idx); 3288 return -ENOENT; 3289 } 3290 3291 rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0, 3292 DRV_NAME " sata", phy); 3293 if (rc) { 3294 dev_err(dev, "irq init: could not request " 3295 "sata interrupt %d, rc=%d\n", 3296 irq, rc); 3297 return -ENOENT; 3298 } 3299 } 3300 3301 for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) { 3302 int idx = i; 3303 3304 irq = irq_map[idx + 81]; 3305 if (!irq) { 3306 dev_err(dev, "irq init: fail map fatal interrupt %d\n", 3307 idx); 3308 return -ENOENT; 3309 } 3310 3311 rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0, 3312 DRV_NAME " fatal", hisi_hba); 3313 if (rc) { 3314 dev_err(dev, 3315 "irq init: could not request fatal interrupt %d, rc=%d\n", 3316 irq, rc); 3317 return -ENOENT; 3318 } 3319 } 3320 3321 for (i = 0; i < hisi_hba->queue_count; i++) { 3322 int idx = i + 96; /* First cq interrupt is irq96 */ 3323 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 3324 struct tasklet_struct *t = &cq->tasklet; 3325 3326 irq = irq_map[idx]; 3327 if (!irq) { 3328 dev_err(dev, 3329 "irq init: could not map cq interrupt %d\n", 3330 idx); 3331 return -ENOENT; 3332 } 3333 rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0, 3334 DRV_NAME " cq", &hisi_hba->cq[i]); 3335 if (rc) { 3336 dev_err(dev, 3337 "irq init: could not request cq interrupt %d, rc=%d\n", 3338 irq, rc); 3339 return -ENOENT; 3340 } 3341 tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq); 3342 } 3343 3344 return 0; 3345 } 3346 3347 static int hisi_sas_v2_init(struct hisi_hba *hisi_hba) 3348 { 3349 int rc; 3350 3351 memset(hisi_hba->sata_dev_bitmap, 0, sizeof(hisi_hba->sata_dev_bitmap)); 3352 3353 rc = hw_init_v2_hw(hisi_hba); 3354 if (rc) 3355 return rc; 3356 3357 rc = interrupt_init_v2_hw(hisi_hba); 3358 if (rc) 3359 return rc; 3360 3361 return 0; 3362 } 3363 3364 static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba) 3365 { 3366 struct platform_device *pdev = hisi_hba->platform_dev; 3367 int i; 3368 3369 for (i = 0; i < hisi_hba->queue_count; i++) 3370 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); 3371 3372 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); 3373 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); 3374 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); 3375 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); 3376 3377 for (i = 0; i < hisi_hba->n_phy; i++) { 3378 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 3379 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); 3380 } 3381 3382 for (i = 0; i < 128; i++) 3383 synchronize_irq(platform_get_irq(pdev, i)); 3384 } 3385 3386 static int soft_reset_v2_hw(struct hisi_hba *hisi_hba) 3387 { 3388 struct device *dev = hisi_hba->dev; 3389 u32 old_state, state; 3390 int rc, cnt; 3391 int phy_no; 3392 3393 old_state = hisi_sas_read32(hisi_hba, PHY_STATE); 3394 3395 interrupt_disable_v2_hw(hisi_hba); 3396 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); 3397 3398 stop_phys_v2_hw(hisi_hba); 3399 3400 mdelay(10); 3401 3402 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1); 3403 3404 /* wait until bus idle */ 3405 cnt = 0; 3406 while (1) { 3407 u32 status = hisi_sas_read32_relaxed(hisi_hba, 3408 AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN); 3409 3410 if (status == 0x3) 3411 break; 3412 3413 udelay(10); 3414 if (cnt++ > 10) { 3415 dev_info(dev, "wait axi bus state to idle timeout!\n"); 3416 return -1; 3417 } 3418 } 3419 3420 hisi_sas_init_mem(hisi_hba); 3421 3422 rc = hw_init_v2_hw(hisi_hba); 3423 if (rc) 3424 return rc; 3425 3426 phys_reject_stp_links_v2_hw(hisi_hba); 3427 3428 /* Re-enable the PHYs */ 3429 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 3430 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 3431 struct asd_sas_phy *sas_phy = &phy->sas_phy; 3432 3433 if (sas_phy->enabled) 3434 start_phy_v2_hw(hisi_hba, phy_no); 3435 } 3436 3437 /* Wait for the PHYs to come up and read the PHY state */ 3438 msleep(1000); 3439 3440 state = hisi_sas_read32(hisi_hba, PHY_STATE); 3441 3442 hisi_sas_rescan_topology(hisi_hba, old_state, state); 3443 3444 return 0; 3445 } 3446 3447 static const struct hisi_sas_hw hisi_sas_v2_hw = { 3448 .hw_init = hisi_sas_v2_init, 3449 .setup_itct = setup_itct_v2_hw, 3450 .slot_index_alloc = slot_index_alloc_quirk_v2_hw, 3451 .alloc_dev = alloc_dev_quirk_v2_hw, 3452 .sl_notify = sl_notify_v2_hw, 3453 .get_wideport_bitmap = get_wideport_bitmap_v2_hw, 3454 .free_device = free_device_v2_hw, 3455 .prep_smp = prep_smp_v2_hw, 3456 .prep_ssp = prep_ssp_v2_hw, 3457 .prep_stp = prep_ata_v2_hw, 3458 .prep_abort = prep_abort_v2_hw, 3459 .get_free_slot = get_free_slot_v2_hw, 3460 .start_delivery = start_delivery_v2_hw, 3461 .slot_complete = slot_complete_v2_hw, 3462 .phys_init = phys_init_v2_hw, 3463 .phy_enable = enable_phy_v2_hw, 3464 .phy_disable = disable_phy_v2_hw, 3465 .phy_hard_reset = phy_hard_reset_v2_hw, 3466 .phy_set_linkrate = phy_set_linkrate_v2_hw, 3467 .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw, 3468 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW, 3469 .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), 3470 .soft_reset = soft_reset_v2_hw, 3471 }; 3472 3473 static int hisi_sas_v2_probe(struct platform_device *pdev) 3474 { 3475 /* 3476 * Check if we should defer the probe before we probe the 3477 * upper layer, as it's hard to defer later on. 3478 */ 3479 int ret = platform_get_irq(pdev, 0); 3480 3481 if (ret < 0) { 3482 if (ret != -EPROBE_DEFER) 3483 dev_err(&pdev->dev, "cannot obtain irq\n"); 3484 return ret; 3485 } 3486 3487 return hisi_sas_probe(pdev, &hisi_sas_v2_hw); 3488 } 3489 3490 static int hisi_sas_v2_remove(struct platform_device *pdev) 3491 { 3492 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 3493 struct hisi_hba *hisi_hba = sha->lldd_ha; 3494 3495 if (timer_pending(&hisi_hba->timer)) 3496 del_timer(&hisi_hba->timer); 3497 3498 return hisi_sas_remove(pdev); 3499 } 3500 3501 static const struct of_device_id sas_v2_of_match[] = { 3502 { .compatible = "hisilicon,hip06-sas-v2",}, 3503 { .compatible = "hisilicon,hip07-sas-v2",}, 3504 {}, 3505 }; 3506 MODULE_DEVICE_TABLE(of, sas_v2_of_match); 3507 3508 static const struct acpi_device_id sas_v2_acpi_match[] = { 3509 { "HISI0162", 0 }, 3510 { } 3511 }; 3512 3513 MODULE_DEVICE_TABLE(acpi, sas_v2_acpi_match); 3514 3515 static struct platform_driver hisi_sas_v2_driver = { 3516 .probe = hisi_sas_v2_probe, 3517 .remove = hisi_sas_v2_remove, 3518 .driver = { 3519 .name = DRV_NAME, 3520 .of_match_table = sas_v2_of_match, 3521 .acpi_match_table = ACPI_PTR(sas_v2_acpi_match), 3522 }, 3523 }; 3524 3525 module_platform_driver(hisi_sas_v2_driver); 3526 3527 MODULE_LICENSE("GPL"); 3528 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 3529 MODULE_DESCRIPTION("HISILICON SAS controller v2 hw driver"); 3530 MODULE_ALIAS("platform:" DRV_NAME); 3531