1 /* 2 * Copyright (c) 2016 Linaro Ltd. 3 * Copyright (c) 2016 Hisilicon Limited. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 */ 11 12 #include "hisi_sas.h" 13 #define DRV_NAME "hisi_sas_v2_hw" 14 15 /* global registers need init*/ 16 #define DLVRY_QUEUE_ENABLE 0x0 17 #define IOST_BASE_ADDR_LO 0x8 18 #define IOST_BASE_ADDR_HI 0xc 19 #define ITCT_BASE_ADDR_LO 0x10 20 #define ITCT_BASE_ADDR_HI 0x14 21 #define IO_BROKEN_MSG_ADDR_LO 0x18 22 #define IO_BROKEN_MSG_ADDR_HI 0x1c 23 #define PHY_CONTEXT 0x20 24 #define PHY_STATE 0x24 25 #define PHY_PORT_NUM_MA 0x28 26 #define PORT_STATE 0x2c 27 #define PORT_STATE_PHY8_PORT_NUM_OFF 16 28 #define PORT_STATE_PHY8_PORT_NUM_MSK (0xf << PORT_STATE_PHY8_PORT_NUM_OFF) 29 #define PORT_STATE_PHY8_CONN_RATE_OFF 20 30 #define PORT_STATE_PHY8_CONN_RATE_MSK (0xf << PORT_STATE_PHY8_CONN_RATE_OFF) 31 #define PHY_CONN_RATE 0x30 32 #define HGC_TRANS_TASK_CNT_LIMIT 0x38 33 #define AXI_AHB_CLK_CFG 0x3c 34 #define ITCT_CLR 0x44 35 #define ITCT_CLR_EN_OFF 16 36 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) 37 #define ITCT_DEV_OFF 0 38 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) 39 #define AXI_USER1 0x48 40 #define AXI_USER2 0x4c 41 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 42 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c 43 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 44 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 45 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 46 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 47 #define HGC_GET_ITV_TIME 0x90 48 #define DEVICE_MSG_WORK_MODE 0x94 49 #define OPENA_WT_CONTI_TIME 0x9c 50 #define I_T_NEXUS_LOSS_TIME 0xa0 51 #define MAX_CON_TIME_LIMIT_TIME 0xa4 52 #define BUS_INACTIVE_LIMIT_TIME 0xa8 53 #define REJECT_TO_OPEN_LIMIT_TIME 0xac 54 #define CFG_AGING_TIME 0xbc 55 #define HGC_DFX_CFG2 0xc0 56 #define HGC_IOMB_PROC1_STATUS 0x104 57 #define CFG_1US_TIMER_TRSH 0xcc 58 #define HGC_LM_DFX_STATUS2 0x128 59 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF 0 60 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \ 61 HGC_LM_DFX_STATUS2_IOSTLIST_OFF) 62 #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF 12 63 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \ 64 HGC_LM_DFX_STATUS2_ITCTLIST_OFF) 65 #define HGC_CQE_ECC_ADDR 0x13c 66 #define HGC_CQE_ECC_1B_ADDR_OFF 0 67 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF) 68 #define HGC_CQE_ECC_MB_ADDR_OFF 8 69 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF) 70 #define HGC_IOST_ECC_ADDR 0x140 71 #define HGC_IOST_ECC_1B_ADDR_OFF 0 72 #define HGC_IOST_ECC_1B_ADDR_MSK (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF) 73 #define HGC_IOST_ECC_MB_ADDR_OFF 16 74 #define HGC_IOST_ECC_MB_ADDR_MSK (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF) 75 #define HGC_DQE_ECC_ADDR 0x144 76 #define HGC_DQE_ECC_1B_ADDR_OFF 0 77 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF) 78 #define HGC_DQE_ECC_MB_ADDR_OFF 16 79 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) 80 #define HGC_INVLD_DQE_INFO 0x148 81 #define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9 82 #define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF) 83 #define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18 84 #define HGC_ITCT_ECC_ADDR 0x150 85 #define HGC_ITCT_ECC_1B_ADDR_OFF 0 86 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ 87 HGC_ITCT_ECC_1B_ADDR_OFF) 88 #define HGC_ITCT_ECC_MB_ADDR_OFF 16 89 #define HGC_ITCT_ECC_MB_ADDR_MSK (0x3ff << \ 90 HGC_ITCT_ECC_MB_ADDR_OFF) 91 #define HGC_AXI_FIFO_ERR_INFO 0x154 92 #define AXI_ERR_INFO_OFF 0 93 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) 94 #define FIFO_ERR_INFO_OFF 8 95 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) 96 #define INT_COAL_EN 0x19c 97 #define OQ_INT_COAL_TIME 0x1a0 98 #define OQ_INT_COAL_CNT 0x1a4 99 #define ENT_INT_COAL_TIME 0x1a8 100 #define ENT_INT_COAL_CNT 0x1ac 101 #define OQ_INT_SRC 0x1b0 102 #define OQ_INT_SRC_MSK 0x1b4 103 #define ENT_INT_SRC1 0x1b8 104 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 105 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) 106 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 107 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) 108 #define ENT_INT_SRC2 0x1bc 109 #define ENT_INT_SRC3 0x1c0 110 #define ENT_INT_SRC3_WP_DEPTH_OFF 8 111 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 112 #define ENT_INT_SRC3_RP_DEPTH_OFF 10 113 #define ENT_INT_SRC3_AXI_OFF 11 114 #define ENT_INT_SRC3_FIFO_OFF 12 115 #define ENT_INT_SRC3_LM_OFF 14 116 #define ENT_INT_SRC3_ITC_INT_OFF 15 117 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) 118 #define ENT_INT_SRC3_ABT_OFF 16 119 #define ENT_INT_SRC_MSK1 0x1c4 120 #define ENT_INT_SRC_MSK2 0x1c8 121 #define ENT_INT_SRC_MSK3 0x1cc 122 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 123 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) 124 #define SAS_ECC_INTR 0x1e8 125 #define SAS_ECC_INTR_DQE_ECC_1B_OFF 0 126 #define SAS_ECC_INTR_DQE_ECC_MB_OFF 1 127 #define SAS_ECC_INTR_IOST_ECC_1B_OFF 2 128 #define SAS_ECC_INTR_IOST_ECC_MB_OFF 3 129 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF 4 130 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF 5 131 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF 6 132 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF 7 133 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF 8 134 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF 9 135 #define SAS_ECC_INTR_CQE_ECC_1B_OFF 10 136 #define SAS_ECC_INTR_CQE_ECC_MB_OFF 11 137 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF 12 138 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF 13 139 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF 14 140 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF 15 141 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF 16 142 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF 17 143 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF 18 144 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19 145 #define SAS_ECC_INTR_MSK 0x1ec 146 #define HGC_ERR_STAT_EN 0x238 147 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 148 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 149 #define DLVRY_Q_0_DEPTH 0x268 150 #define DLVRY_Q_0_WR_PTR 0x26c 151 #define DLVRY_Q_0_RD_PTR 0x270 152 #define HYPER_STREAM_ID_EN_CFG 0xc80 153 #define OQ0_INT_SRC_MSK 0xc90 154 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0 155 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4 156 #define COMPL_Q_0_DEPTH 0x4e8 157 #define COMPL_Q_0_WR_PTR 0x4ec 158 #define COMPL_Q_0_RD_PTR 0x4f0 159 #define HGC_RXM_DFX_STATUS14 0xae8 160 #define HGC_RXM_DFX_STATUS14_MEM0_OFF 0 161 #define HGC_RXM_DFX_STATUS14_MEM0_MSK (0x1ff << \ 162 HGC_RXM_DFX_STATUS14_MEM0_OFF) 163 #define HGC_RXM_DFX_STATUS14_MEM1_OFF 9 164 #define HGC_RXM_DFX_STATUS14_MEM1_MSK (0x1ff << \ 165 HGC_RXM_DFX_STATUS14_MEM1_OFF) 166 #define HGC_RXM_DFX_STATUS14_MEM2_OFF 18 167 #define HGC_RXM_DFX_STATUS14_MEM2_MSK (0x1ff << \ 168 HGC_RXM_DFX_STATUS14_MEM2_OFF) 169 #define HGC_RXM_DFX_STATUS15 0xaec 170 #define HGC_RXM_DFX_STATUS15_MEM3_OFF 0 171 #define HGC_RXM_DFX_STATUS15_MEM3_MSK (0x1ff << \ 172 HGC_RXM_DFX_STATUS15_MEM3_OFF) 173 /* phy registers need init */ 174 #define PORT_BASE (0x2000) 175 176 #define PHY_CFG (PORT_BASE + 0x0) 177 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4) 178 #define PHY_CFG_ENA_OFF 0 179 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) 180 #define PHY_CFG_DC_OPT_OFF 2 181 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) 182 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) 183 #define PROG_PHY_LINK_RATE_MAX_OFF 0 184 #define PROG_PHY_LINK_RATE_MAX_MSK (0xff << PROG_PHY_LINK_RATE_MAX_OFF) 185 #define PHY_CTRL (PORT_BASE + 0x14) 186 #define PHY_CTRL_RESET_OFF 0 187 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) 188 #define SAS_PHY_CTRL (PORT_BASE + 0x20) 189 #define SL_CFG (PORT_BASE + 0x84) 190 #define PHY_PCN (PORT_BASE + 0x44) 191 #define SL_TOUT_CFG (PORT_BASE + 0x8c) 192 #define SL_CONTROL (PORT_BASE + 0x94) 193 #define SL_CONTROL_NOTIFY_EN_OFF 0 194 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) 195 #define SL_CONTROL_CTA_OFF 17 196 #define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF) 197 #define RX_PRIMS_STATUS (PORT_BASE + 0x98) 198 #define RX_BCAST_CHG_OFF 1 199 #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) 200 #define TX_ID_DWORD0 (PORT_BASE + 0x9c) 201 #define TX_ID_DWORD1 (PORT_BASE + 0xa0) 202 #define TX_ID_DWORD2 (PORT_BASE + 0xa4) 203 #define TX_ID_DWORD3 (PORT_BASE + 0xa8) 204 #define TX_ID_DWORD4 (PORT_BASE + 0xaC) 205 #define TX_ID_DWORD5 (PORT_BASE + 0xb0) 206 #define TX_ID_DWORD6 (PORT_BASE + 0xb4) 207 #define TXID_AUTO (PORT_BASE + 0xb8) 208 #define TXID_AUTO_CT3_OFF 1 209 #define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF) 210 #define TXID_AUTO_CTB_OFF 11 211 #define TXID_AUTO_CTB_MSK (0x1 << TXID_AUTO_CTB_OFF) 212 #define TX_HARDRST_OFF 2 213 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) 214 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) 215 #define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) 216 #define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) 217 #define RX_IDAF_DWORD3 (PORT_BASE + 0xd0) 218 #define RX_IDAF_DWORD4 (PORT_BASE + 0xd4) 219 #define RX_IDAF_DWORD5 (PORT_BASE + 0xd8) 220 #define RX_IDAF_DWORD6 (PORT_BASE + 0xdc) 221 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) 222 #define CON_CONTROL (PORT_BASE + 0x118) 223 #define CON_CONTROL_CFG_OPEN_ACC_STP_OFF 0 224 #define CON_CONTROL_CFG_OPEN_ACC_STP_MSK \ 225 (0x01 << CON_CONTROL_CFG_OPEN_ACC_STP_OFF) 226 #define DONE_RECEIVED_TIME (PORT_BASE + 0x11c) 227 #define CHL_INT0 (PORT_BASE + 0x1b4) 228 #define CHL_INT0_HOTPLUG_TOUT_OFF 0 229 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) 230 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1 231 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) 232 #define CHL_INT0_SL_PHY_ENABLE_OFF 2 233 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) 234 #define CHL_INT0_NOT_RDY_OFF 4 235 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) 236 #define CHL_INT0_PHY_RDY_OFF 5 237 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) 238 #define CHL_INT1 (PORT_BASE + 0x1b8) 239 #define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15 240 #define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) 241 #define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 242 #define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) 243 #define CHL_INT2 (PORT_BASE + 0x1bc) 244 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) 245 #define CHL_INT1_MSK (PORT_BASE + 0x1c4) 246 #define CHL_INT2_MSK (PORT_BASE + 0x1c8) 247 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) 248 #define DMA_TX_DFX0 (PORT_BASE + 0x200) 249 #define DMA_TX_DFX1 (PORT_BASE + 0x204) 250 #define DMA_TX_DFX1_IPTT_OFF 0 251 #define DMA_TX_DFX1_IPTT_MSK (0xffff << DMA_TX_DFX1_IPTT_OFF) 252 #define DMA_TX_FIFO_DFX0 (PORT_BASE + 0x240) 253 #define PORT_DFX0 (PORT_BASE + 0x258) 254 #define LINK_DFX2 (PORT_BASE + 0X264) 255 #define LINK_DFX2_RCVR_HOLD_STS_OFF 9 256 #define LINK_DFX2_RCVR_HOLD_STS_MSK (0x1 << LINK_DFX2_RCVR_HOLD_STS_OFF) 257 #define LINK_DFX2_SEND_HOLD_STS_OFF 10 258 #define LINK_DFX2_SEND_HOLD_STS_MSK (0x1 << LINK_DFX2_SEND_HOLD_STS_OFF) 259 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) 260 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) 261 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) 262 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) 263 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) 264 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) 265 #define DMA_TX_STATUS (PORT_BASE + 0x2d0) 266 #define DMA_TX_STATUS_BUSY_OFF 0 267 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) 268 #define DMA_RX_STATUS (PORT_BASE + 0x2e8) 269 #define DMA_RX_STATUS_BUSY_OFF 0 270 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) 271 272 #define AXI_CFG (0x5100) 273 #define AM_CFG_MAX_TRANS (0x5010) 274 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) 275 276 #define AXI_MASTER_CFG_BASE (0x5000) 277 #define AM_CTRL_GLOBAL (0x0) 278 #define AM_CURR_TRANS_RETURN (0x150) 279 280 /* HW dma structures */ 281 /* Delivery queue header */ 282 /* dw0 */ 283 #define CMD_HDR_ABORT_FLAG_OFF 0 284 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) 285 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 286 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) 287 #define CMD_HDR_RESP_REPORT_OFF 5 288 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) 289 #define CMD_HDR_TLR_CTRL_OFF 6 290 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) 291 #define CMD_HDR_PORT_OFF 18 292 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) 293 #define CMD_HDR_PRIORITY_OFF 27 294 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) 295 #define CMD_HDR_CMD_OFF 29 296 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) 297 /* dw1 */ 298 #define CMD_HDR_DIR_OFF 5 299 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) 300 #define CMD_HDR_RESET_OFF 7 301 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) 302 #define CMD_HDR_VDTL_OFF 10 303 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) 304 #define CMD_HDR_FRAME_TYPE_OFF 11 305 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) 306 #define CMD_HDR_DEV_ID_OFF 16 307 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) 308 /* dw2 */ 309 #define CMD_HDR_CFL_OFF 0 310 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) 311 #define CMD_HDR_NCQ_TAG_OFF 10 312 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) 313 #define CMD_HDR_MRFL_OFF 15 314 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) 315 #define CMD_HDR_SG_MOD_OFF 24 316 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) 317 #define CMD_HDR_FIRST_BURST_OFF 26 318 #define CMD_HDR_FIRST_BURST_MSK (0x1 << CMD_HDR_SG_MOD_OFF) 319 /* dw3 */ 320 #define CMD_HDR_IPTT_OFF 0 321 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) 322 /* dw6 */ 323 #define CMD_HDR_DIF_SGL_LEN_OFF 0 324 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) 325 #define CMD_HDR_DATA_SGL_LEN_OFF 16 326 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) 327 #define CMD_HDR_ABORT_IPTT_OFF 16 328 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) 329 330 /* Completion header */ 331 /* dw0 */ 332 #define CMPLT_HDR_ERR_PHASE_OFF 2 333 #define CMPLT_HDR_ERR_PHASE_MSK (0xff << CMPLT_HDR_ERR_PHASE_OFF) 334 #define CMPLT_HDR_RSPNS_XFRD_OFF 10 335 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) 336 #define CMPLT_HDR_ERX_OFF 12 337 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) 338 #define CMPLT_HDR_ABORT_STAT_OFF 13 339 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) 340 /* abort_stat */ 341 #define STAT_IO_NOT_VALID 0x1 342 #define STAT_IO_NO_DEVICE 0x2 343 #define STAT_IO_COMPLETE 0x3 344 #define STAT_IO_ABORTED 0x4 345 /* dw1 */ 346 #define CMPLT_HDR_IPTT_OFF 0 347 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) 348 #define CMPLT_HDR_DEV_ID_OFF 16 349 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) 350 351 /* ITCT header */ 352 /* qw0 */ 353 #define ITCT_HDR_DEV_TYPE_OFF 0 354 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) 355 #define ITCT_HDR_VALID_OFF 2 356 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) 357 #define ITCT_HDR_MCR_OFF 5 358 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) 359 #define ITCT_HDR_VLN_OFF 9 360 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) 361 #define ITCT_HDR_SMP_TIMEOUT_OFF 16 362 #define ITCT_HDR_SMP_TIMEOUT_8US 1 363 #define ITCT_HDR_SMP_TIMEOUT (ITCT_HDR_SMP_TIMEOUT_8US * \ 364 250) /* 2ms */ 365 #define ITCT_HDR_AWT_CONTINUE_OFF 25 366 #define ITCT_HDR_PORT_ID_OFF 28 367 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) 368 /* qw2 */ 369 #define ITCT_HDR_INLT_OFF 0 370 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) 371 #define ITCT_HDR_BITLT_OFF 16 372 #define ITCT_HDR_BITLT_MSK (0xffffULL << ITCT_HDR_BITLT_OFF) 373 #define ITCT_HDR_MCTLT_OFF 32 374 #define ITCT_HDR_MCTLT_MSK (0xffffULL << ITCT_HDR_MCTLT_OFF) 375 #define ITCT_HDR_RTOLT_OFF 48 376 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) 377 378 #define HISI_SAS_FATAL_INT_NR 2 379 380 struct hisi_sas_complete_v2_hdr { 381 __le32 dw0; 382 __le32 dw1; 383 __le32 act; 384 __le32 dw3; 385 }; 386 387 struct hisi_sas_err_record_v2 { 388 /* dw0 */ 389 __le32 trans_tx_fail_type; 390 391 /* dw1 */ 392 __le32 trans_rx_fail_type; 393 394 /* dw2 */ 395 __le16 dma_tx_err_type; 396 __le16 sipc_rx_err_type; 397 398 /* dw3 */ 399 __le32 dma_rx_err_type; 400 }; 401 402 enum { 403 HISI_SAS_PHY_PHY_UPDOWN, 404 HISI_SAS_PHY_CHNL_INT, 405 HISI_SAS_PHY_INT_NR 406 }; 407 408 enum { 409 TRANS_TX_FAIL_BASE = 0x0, /* dw0 */ 410 TRANS_RX_FAIL_BASE = 0x20, /* dw1 */ 411 DMA_TX_ERR_BASE = 0x40, /* dw2 bit 15-0 */ 412 SIPC_RX_ERR_BASE = 0x50, /* dw2 bit 31-16*/ 413 DMA_RX_ERR_BASE = 0x60, /* dw3 */ 414 415 /* trans tx*/ 416 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */ 417 TRANS_TX_ERR_PHY_NOT_ENABLE, /* 0x1 */ 418 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, /* 0x2 */ 419 TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, /* 0x3 */ 420 TRANS_TX_OPEN_CNX_ERR_BY_OTHER, /* 0x4 */ 421 RESERVED0, /* 0x5 */ 422 TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, /* 0x6 */ 423 TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, /* 0x7 */ 424 TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, /* 0x8 */ 425 TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, /* 0x9 */ 426 TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, /* 0xa */ 427 TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, /* 0xb */ 428 TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, /* 0xc */ 429 TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, /* 0xd */ 430 TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, /* 0xe */ 431 TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, /* 0xf */ 432 TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, /* 0x10 */ 433 TRANS_TX_ERR_FRAME_TXED, /* 0x11 */ 434 TRANS_TX_ERR_WITH_BREAK_TIMEOUT, /* 0x12 */ 435 TRANS_TX_ERR_WITH_BREAK_REQUEST, /* 0x13 */ 436 TRANS_TX_ERR_WITH_BREAK_RECEVIED, /* 0x14 */ 437 TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, /* 0x15 */ 438 TRANS_TX_ERR_WITH_CLOSE_NORMAL, /* 0x16 for ssp*/ 439 TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, /* 0x17 */ 440 TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x18 */ 441 TRANS_TX_ERR_WITH_CLOSE_COMINIT, /* 0x19 */ 442 TRANS_TX_ERR_WITH_NAK_RECEVIED, /* 0x1a for ssp*/ 443 TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, /* 0x1b for ssp*/ 444 /*IO_TX_ERR_WITH_R_ERR_RECEVIED, [> 0x1b for sata/stp<] */ 445 TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, /* 0x1c for ssp */ 446 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST 0x1c for sata/stp */ 447 TRANS_TX_ERR_WITH_IPTT_CONFLICT, /* 0x1d for ssp/smp */ 448 TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, /* 0x1e */ 449 /*IO_TX_ERR_WITH_SYNC_RXD, [> 0x1e <] for sata/stp */ 450 TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */ 451 452 /* trans rx */ 453 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x20 */ 454 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x21 for sata/stp */ 455 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x22 for ssp/smp */ 456 /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x22 <] for sata/stp */ 457 TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x23 for sata/stp */ 458 TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x24 for sata/stp */ 459 TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x25 for smp */ 460 /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x25 <] for sata/stp */ 461 TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x26 for sata/stp*/ 462 TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x27 */ 463 TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x28 */ 464 TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x29 */ 465 TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x2a */ 466 RESERVED1, /* 0x2b */ 467 TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x2c */ 468 TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x2d */ 469 TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x2e */ 470 TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x2f */ 471 TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x30 for ssp/smp */ 472 TRANS_RX_ERR_WITH_BAD_HASH, /* 0x31 for ssp */ 473 /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x31 <] for sata/stp */ 474 TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x32 for ssp*/ 475 /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x32 <] for sata/stp */ 476 TRANS_RX_SSP_FRM_LEN_ERR, /* 0x33 for ssp */ 477 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x33 <] for sata */ 478 RESERVED2, /* 0x34 */ 479 RESERVED3, /* 0x35 */ 480 RESERVED4, /* 0x36 */ 481 RESERVED5, /* 0x37 */ 482 TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x38 */ 483 TRANS_RX_SMP_FRM_LEN_ERR, /* 0x39 */ 484 TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x3a */ 485 RESERVED6, /* 0x3b */ 486 RESERVED7, /* 0x3c */ 487 RESERVED8, /* 0x3d */ 488 RESERVED9, /* 0x3e */ 489 TRANS_RX_R_ERR, /* 0x3f */ 490 491 /* dma tx */ 492 DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x40 */ 493 DMA_TX_DIF_APP_ERR, /* 0x41 */ 494 DMA_TX_DIF_RPP_ERR, /* 0x42 */ 495 DMA_TX_DATA_SGL_OVERFLOW, /* 0x43 */ 496 DMA_TX_DIF_SGL_OVERFLOW, /* 0x44 */ 497 DMA_TX_UNEXP_XFER_ERR, /* 0x45 */ 498 DMA_TX_UNEXP_RETRANS_ERR, /* 0x46 */ 499 DMA_TX_XFER_LEN_OVERFLOW, /* 0x47 */ 500 DMA_TX_XFER_OFFSET_ERR, /* 0x48 */ 501 DMA_TX_RAM_ECC_ERR, /* 0x49 */ 502 DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x4a */ 503 DMA_TX_MAX_ERR_CODE, 504 505 /* sipc rx */ 506 SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x50 */ 507 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x51 */ 508 SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x52 */ 509 SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x53 */ 510 SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x54 */ 511 SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x55 */ 512 SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x56 */ 513 SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x57 */ 514 SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x58 */ 515 SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x59 */ 516 SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x5a */ 517 SIPC_RX_MAX_ERR_CODE, 518 519 /* dma rx */ 520 DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x60 */ 521 DMA_RX_DIF_APP_ERR, /* 0x61 */ 522 DMA_RX_DIF_RPP_ERR, /* 0x62 */ 523 DMA_RX_DATA_SGL_OVERFLOW, /* 0x63 */ 524 DMA_RX_DIF_SGL_OVERFLOW, /* 0x64 */ 525 DMA_RX_DATA_LEN_OVERFLOW, /* 0x65 */ 526 DMA_RX_DATA_LEN_UNDERFLOW, /* 0x66 */ 527 DMA_RX_DATA_OFFSET_ERR, /* 0x67 */ 528 RESERVED10, /* 0x68 */ 529 DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x69 */ 530 DMA_RX_RESP_BUF_OVERFLOW, /* 0x6a */ 531 DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x6b */ 532 DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x6c */ 533 DMA_RX_UNEXP_RDFRAME_ERR, /* 0x6d */ 534 DMA_RX_PIO_DATA_LEN_ERR, /* 0x6e */ 535 DMA_RX_RDSETUP_STATUS_ERR, /* 0x6f */ 536 DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x70 */ 537 DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x71 */ 538 DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x72 */ 539 DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x73 */ 540 DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x74 */ 541 DMA_RX_RDSETUP_OFFSET_ERR, /* 0x75 */ 542 DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x76 */ 543 DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x77 */ 544 DMA_RX_RAM_ECC_ERR, /* 0x78 */ 545 DMA_RX_UNKNOWN_FRM_ERR, /* 0x79 */ 546 DMA_RX_MAX_ERR_CODE, 547 }; 548 549 #define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096 550 #define HISI_MAX_SATA_SUPPORT_V2_HW (HISI_SAS_COMMAND_ENTRIES_V2_HW/64 - 1) 551 552 #define DIR_NO_DATA 0 553 #define DIR_TO_INI 1 554 #define DIR_TO_DEVICE 2 555 #define DIR_RESERVED 3 556 557 #define SATA_PROTOCOL_NONDATA 0x1 558 #define SATA_PROTOCOL_PIO 0x2 559 #define SATA_PROTOCOL_DMA 0x4 560 #define SATA_PROTOCOL_FPDMA 0x8 561 #define SATA_PROTOCOL_ATAPI 0x10 562 563 #define ERR_ON_TX_PHASE(err_phase) (err_phase == 0x2 || \ 564 err_phase == 0x4 || err_phase == 0x8 ||\ 565 err_phase == 0x6 || err_phase == 0xa) 566 #define ERR_ON_RX_PHASE(err_phase) (err_phase == 0x10 || \ 567 err_phase == 0x20 || err_phase == 0x40) 568 569 static void link_timeout_disable_link(unsigned long data); 570 571 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) 572 { 573 void __iomem *regs = hisi_hba->regs + off; 574 575 return readl(regs); 576 } 577 578 static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) 579 { 580 void __iomem *regs = hisi_hba->regs + off; 581 582 return readl_relaxed(regs); 583 } 584 585 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) 586 { 587 void __iomem *regs = hisi_hba->regs + off; 588 589 writel(val, regs); 590 } 591 592 static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, 593 u32 off, u32 val) 594 { 595 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 596 597 writel(val, regs); 598 } 599 600 static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, 601 int phy_no, u32 off) 602 { 603 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 604 605 return readl(regs); 606 } 607 608 /* This function needs to be protected from pre-emption. */ 609 static int 610 slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx, 611 struct domain_device *device) 612 { 613 int sata_dev = dev_is_sata(device); 614 void *bitmap = hisi_hba->slot_index_tags; 615 struct hisi_sas_device *sas_dev = device->lldd_dev; 616 int sata_idx = sas_dev->sata_idx; 617 int start, end; 618 619 if (!sata_dev) { 620 /* 621 * STP link SoC bug workaround: index starts from 1. 622 * additionally, we can only allocate odd IPTT(1~4095) 623 * for SAS/SMP device. 624 */ 625 start = 1; 626 end = hisi_hba->slot_index_count; 627 } else { 628 if (sata_idx >= HISI_MAX_SATA_SUPPORT_V2_HW) 629 return -EINVAL; 630 631 /* 632 * For SATA device: allocate even IPTT in this interval 633 * [64*(sata_idx+1), 64*(sata_idx+2)], then each SATA device 634 * own 32 IPTTs. IPTT 0 shall not be used duing to STP link 635 * SoC bug workaround. So we ignore the first 32 even IPTTs. 636 */ 637 start = 64 * (sata_idx + 1); 638 end = 64 * (sata_idx + 2); 639 } 640 641 while (1) { 642 start = find_next_zero_bit(bitmap, 643 hisi_hba->slot_index_count, start); 644 if (start >= end) 645 return -SAS_QUEUE_FULL; 646 /* 647 * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0. 648 */ 649 if (sata_dev ^ (start & 1)) 650 break; 651 start++; 652 } 653 654 set_bit(start, bitmap); 655 *slot_idx = start; 656 return 0; 657 } 658 659 static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx) 660 { 661 unsigned int index; 662 struct device *dev = &hisi_hba->pdev->dev; 663 void *bitmap = hisi_hba->sata_dev_bitmap; 664 665 index = find_first_zero_bit(bitmap, HISI_MAX_SATA_SUPPORT_V2_HW); 666 if (index >= HISI_MAX_SATA_SUPPORT_V2_HW) { 667 dev_warn(dev, "alloc sata index failed, index=%d\n", index); 668 return false; 669 } 670 671 set_bit(index, bitmap); 672 *idx = index; 673 return true; 674 } 675 676 677 static struct 678 hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) 679 { 680 struct hisi_hba *hisi_hba = device->port->ha->lldd_ha; 681 struct hisi_sas_device *sas_dev = NULL; 682 int i, sata_dev = dev_is_sata(device); 683 int sata_idx = -1; 684 685 spin_lock(&hisi_hba->lock); 686 687 if (sata_dev) 688 if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx)) 689 goto out; 690 691 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 692 /* 693 * SATA device id bit0 should be 0 694 */ 695 if (sata_dev && (i & 1)) 696 continue; 697 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 698 hisi_hba->devices[i].device_id = i; 699 sas_dev = &hisi_hba->devices[i]; 700 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 701 sas_dev->dev_type = device->dev_type; 702 sas_dev->hisi_hba = hisi_hba; 703 sas_dev->sas_device = device; 704 sas_dev->sata_idx = sata_idx; 705 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 706 break; 707 } 708 } 709 710 out: 711 spin_unlock(&hisi_hba->lock); 712 713 return sas_dev; 714 } 715 716 static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 717 { 718 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 719 720 cfg &= ~PHY_CFG_DC_OPT_MSK; 721 cfg |= 1 << PHY_CFG_DC_OPT_OFF; 722 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 723 } 724 725 static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 726 { 727 struct sas_identify_frame identify_frame; 728 u32 *identify_buffer; 729 730 memset(&identify_frame, 0, sizeof(identify_frame)); 731 identify_frame.dev_type = SAS_END_DEVICE; 732 identify_frame.frame_type = 0; 733 identify_frame._un1 = 1; 734 identify_frame.initiator_bits = SAS_PROTOCOL_ALL; 735 identify_frame.target_bits = SAS_PROTOCOL_NONE; 736 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 737 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 738 identify_frame.phy_id = phy_no; 739 identify_buffer = (u32 *)(&identify_frame); 740 741 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, 742 __swab32(identify_buffer[0])); 743 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, 744 __swab32(identify_buffer[1])); 745 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, 746 __swab32(identify_buffer[2])); 747 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, 748 __swab32(identify_buffer[3])); 749 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, 750 __swab32(identify_buffer[4])); 751 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, 752 __swab32(identify_buffer[5])); 753 } 754 755 static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, 756 struct hisi_sas_device *sas_dev) 757 { 758 struct domain_device *device = sas_dev->sas_device; 759 struct device *dev = &hisi_hba->pdev->dev; 760 u64 qw0, device_id = sas_dev->device_id; 761 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; 762 struct domain_device *parent_dev = device->parent; 763 struct asd_sas_port *sas_port = device->port; 764 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 765 766 memset(itct, 0, sizeof(*itct)); 767 768 /* qw0 */ 769 qw0 = 0; 770 switch (sas_dev->dev_type) { 771 case SAS_END_DEVICE: 772 case SAS_EDGE_EXPANDER_DEVICE: 773 case SAS_FANOUT_EXPANDER_DEVICE: 774 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; 775 break; 776 case SAS_SATA_DEV: 777 case SAS_SATA_PENDING: 778 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 779 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; 780 else 781 qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; 782 break; 783 default: 784 dev_warn(dev, "setup itct: unsupported dev type (%d)\n", 785 sas_dev->dev_type); 786 } 787 788 qw0 |= ((1 << ITCT_HDR_VALID_OFF) | 789 (device->linkrate << ITCT_HDR_MCR_OFF) | 790 (1 << ITCT_HDR_VLN_OFF) | 791 (ITCT_HDR_SMP_TIMEOUT << ITCT_HDR_SMP_TIMEOUT_OFF) | 792 (1 << ITCT_HDR_AWT_CONTINUE_OFF) | 793 (port->id << ITCT_HDR_PORT_ID_OFF)); 794 itct->qw0 = cpu_to_le64(qw0); 795 796 /* qw1 */ 797 memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE); 798 itct->sas_addr = __swab64(itct->sas_addr); 799 800 /* qw2 */ 801 if (!dev_is_sata(device)) 802 itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | 803 (0x1ULL << ITCT_HDR_BITLT_OFF) | 804 (0x32ULL << ITCT_HDR_MCTLT_OFF) | 805 (0x1ULL << ITCT_HDR_RTOLT_OFF)); 806 } 807 808 static void free_device_v2_hw(struct hisi_hba *hisi_hba, 809 struct hisi_sas_device *sas_dev) 810 { 811 u64 dev_id = sas_dev->device_id; 812 struct device *dev = &hisi_hba->pdev->dev; 813 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; 814 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 815 int i; 816 817 /* SoC bug workaround */ 818 if (dev_is_sata(sas_dev->sas_device)) 819 clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap); 820 821 /* clear the itct interrupt state */ 822 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) 823 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 824 ENT_INT_SRC3_ITC_INT_MSK); 825 826 /* clear the itct int*/ 827 for (i = 0; i < 2; i++) { 828 /* clear the itct table*/ 829 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); 830 reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); 831 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); 832 833 udelay(10); 834 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 835 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) { 836 dev_dbg(dev, "got clear ITCT done interrupt\n"); 837 838 /* invalid the itct state*/ 839 memset(itct, 0, sizeof(struct hisi_sas_itct)); 840 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 841 ENT_INT_SRC3_ITC_INT_MSK); 842 843 /* clear the itct */ 844 hisi_sas_write32(hisi_hba, ITCT_CLR, 0); 845 dev_dbg(dev, "clear ITCT ok\n"); 846 break; 847 } 848 } 849 } 850 851 static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) 852 { 853 int i, reset_val; 854 u32 val; 855 unsigned long end_time; 856 struct device *dev = &hisi_hba->pdev->dev; 857 858 /* The mask needs to be set depending on the number of phys */ 859 if (hisi_hba->n_phy == 9) 860 reset_val = 0x1fffff; 861 else 862 reset_val = 0x7ffff; 863 864 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 865 866 /* Disable all of the PHYs */ 867 for (i = 0; i < hisi_hba->n_phy; i++) { 868 u32 phy_cfg = hisi_sas_phy_read32(hisi_hba, i, PHY_CFG); 869 870 phy_cfg &= ~PHY_CTRL_RESET_MSK; 871 hisi_sas_phy_write32(hisi_hba, i, PHY_CFG, phy_cfg); 872 } 873 udelay(50); 874 875 /* Ensure DMA tx & rx idle */ 876 for (i = 0; i < hisi_hba->n_phy; i++) { 877 u32 dma_tx_status, dma_rx_status; 878 879 end_time = jiffies + msecs_to_jiffies(1000); 880 881 while (1) { 882 dma_tx_status = hisi_sas_phy_read32(hisi_hba, i, 883 DMA_TX_STATUS); 884 dma_rx_status = hisi_sas_phy_read32(hisi_hba, i, 885 DMA_RX_STATUS); 886 887 if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) && 888 !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK)) 889 break; 890 891 msleep(20); 892 if (time_after(jiffies, end_time)) 893 return -EIO; 894 } 895 } 896 897 /* Ensure axi bus idle */ 898 end_time = jiffies + msecs_to_jiffies(1000); 899 while (1) { 900 u32 axi_status = 901 hisi_sas_read32(hisi_hba, AXI_CFG); 902 903 if (axi_status == 0) 904 break; 905 906 msleep(20); 907 if (time_after(jiffies, end_time)) 908 return -EIO; 909 } 910 911 if (ACPI_HANDLE(dev)) { 912 acpi_status s; 913 914 s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); 915 if (ACPI_FAILURE(s)) { 916 dev_err(dev, "Reset failed\n"); 917 return -EIO; 918 } 919 } else if (hisi_hba->ctrl) { 920 /* reset and disable clock*/ 921 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, 922 reset_val); 923 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, 924 reset_val); 925 msleep(1); 926 regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); 927 if (reset_val != (val & reset_val)) { 928 dev_err(dev, "SAS reset fail.\n"); 929 return -EIO; 930 } 931 932 /* De-reset and enable clock*/ 933 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, 934 reset_val); 935 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, 936 reset_val); 937 msleep(1); 938 regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, 939 &val); 940 if (val & reset_val) { 941 dev_err(dev, "SAS de-reset fail.\n"); 942 return -EIO; 943 } 944 } else 945 dev_warn(dev, "no reset method\n"); 946 947 return 0; 948 } 949 950 /* This function needs to be called after resetting SAS controller. */ 951 static void phys_reject_stp_links_v2_hw(struct hisi_hba *hisi_hba) 952 { 953 u32 cfg; 954 int phy_no; 955 956 hisi_hba->reject_stp_links_msk = (1 << hisi_hba->n_phy) - 1; 957 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 958 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, CON_CONTROL); 959 if (!(cfg & CON_CONTROL_CFG_OPEN_ACC_STP_MSK)) 960 continue; 961 962 cfg &= ~CON_CONTROL_CFG_OPEN_ACC_STP_MSK; 963 hisi_sas_phy_write32(hisi_hba, phy_no, CON_CONTROL, cfg); 964 } 965 } 966 967 static void phys_try_accept_stp_links_v2_hw(struct hisi_hba *hisi_hba) 968 { 969 int phy_no; 970 u32 dma_tx_dfx1; 971 972 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 973 if (!(hisi_hba->reject_stp_links_msk & BIT(phy_no))) 974 continue; 975 976 dma_tx_dfx1 = hisi_sas_phy_read32(hisi_hba, phy_no, 977 DMA_TX_DFX1); 978 if (dma_tx_dfx1 & DMA_TX_DFX1_IPTT_MSK) { 979 u32 cfg = hisi_sas_phy_read32(hisi_hba, 980 phy_no, CON_CONTROL); 981 982 cfg |= CON_CONTROL_CFG_OPEN_ACC_STP_MSK; 983 hisi_sas_phy_write32(hisi_hba, phy_no, 984 CON_CONTROL, cfg); 985 clear_bit(phy_no, &hisi_hba->reject_stp_links_msk); 986 } 987 } 988 } 989 990 static void init_reg_v2_hw(struct hisi_hba *hisi_hba) 991 { 992 struct device *dev = &hisi_hba->pdev->dev; 993 int i; 994 995 /* Global registers init */ 996 997 /* Deal with am-max-transmissions quirk */ 998 if (device_property_present(dev, "hip06-sas-v2-quirk-amt")) { 999 hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020); 1000 hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS, 1001 0x2020); 1002 } /* Else, use defaults -> do nothing */ 1003 1004 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 1005 (u32)((1ULL << hisi_hba->queue_count) - 1)); 1006 hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000); 1007 hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000); 1008 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x0); 1009 hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF); 1010 hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1); 1011 hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4); 1012 hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x32); 1013 hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1); 1014 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); 1015 hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1); 1016 hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1); 1017 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0xc); 1018 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x60); 1019 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x3); 1020 hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1); 1021 hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1); 1022 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0); 1023 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); 1024 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); 1025 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); 1026 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe); 1027 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe); 1028 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe); 1029 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30); 1030 for (i = 0; i < hisi_hba->queue_count; i++) 1031 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); 1032 1033 hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1); 1034 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); 1035 1036 for (i = 0; i < hisi_hba->n_phy; i++) { 1037 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855); 1038 hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908); 1039 hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); 1040 hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0); 1041 hisi_sas_phy_write32(hisi_hba, i, TXID_AUTO, 0x2); 1042 hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x8); 1043 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); 1044 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 1045 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff); 1046 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 1047 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 1048 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); 1049 hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc); 1050 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); 1051 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); 1052 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); 1053 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); 1054 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); 1055 hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0); 1056 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0); 1057 if (hisi_hba->refclk_frequency_mhz == 66) 1058 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694); 1059 /* else, do nothing -> leave it how you found it */ 1060 } 1061 1062 for (i = 0; i < hisi_hba->queue_count; i++) { 1063 /* Delivery queue */ 1064 hisi_sas_write32(hisi_hba, 1065 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), 1066 upper_32_bits(hisi_hba->cmd_hdr_dma[i])); 1067 1068 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), 1069 lower_32_bits(hisi_hba->cmd_hdr_dma[i])); 1070 1071 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), 1072 HISI_SAS_QUEUE_SLOTS); 1073 1074 /* Completion queue */ 1075 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), 1076 upper_32_bits(hisi_hba->complete_hdr_dma[i])); 1077 1078 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), 1079 lower_32_bits(hisi_hba->complete_hdr_dma[i])); 1080 1081 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), 1082 HISI_SAS_QUEUE_SLOTS); 1083 } 1084 1085 /* itct */ 1086 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, 1087 lower_32_bits(hisi_hba->itct_dma)); 1088 1089 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, 1090 upper_32_bits(hisi_hba->itct_dma)); 1091 1092 /* iost */ 1093 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, 1094 lower_32_bits(hisi_hba->iost_dma)); 1095 1096 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, 1097 upper_32_bits(hisi_hba->iost_dma)); 1098 1099 /* breakpoint */ 1100 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, 1101 lower_32_bits(hisi_hba->breakpoint_dma)); 1102 1103 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, 1104 upper_32_bits(hisi_hba->breakpoint_dma)); 1105 1106 /* SATA broken msg */ 1107 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, 1108 lower_32_bits(hisi_hba->sata_breakpoint_dma)); 1109 1110 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, 1111 upper_32_bits(hisi_hba->sata_breakpoint_dma)); 1112 1113 /* SATA initial fis */ 1114 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, 1115 lower_32_bits(hisi_hba->initial_fis_dma)); 1116 1117 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, 1118 upper_32_bits(hisi_hba->initial_fis_dma)); 1119 } 1120 1121 static void link_timeout_enable_link(unsigned long data) 1122 { 1123 struct hisi_hba *hisi_hba = (struct hisi_hba *)data; 1124 int i, reg_val; 1125 1126 for (i = 0; i < hisi_hba->n_phy; i++) { 1127 if (hisi_hba->reject_stp_links_msk & BIT(i)) 1128 continue; 1129 1130 reg_val = hisi_sas_phy_read32(hisi_hba, i, CON_CONTROL); 1131 if (!(reg_val & BIT(0))) { 1132 hisi_sas_phy_write32(hisi_hba, i, 1133 CON_CONTROL, 0x7); 1134 break; 1135 } 1136 } 1137 1138 hisi_hba->timer.function = link_timeout_disable_link; 1139 mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900)); 1140 } 1141 1142 static void link_timeout_disable_link(unsigned long data) 1143 { 1144 struct hisi_hba *hisi_hba = (struct hisi_hba *)data; 1145 int i, reg_val; 1146 1147 reg_val = hisi_sas_read32(hisi_hba, PHY_STATE); 1148 for (i = 0; i < hisi_hba->n_phy && reg_val; i++) { 1149 if (hisi_hba->reject_stp_links_msk & BIT(i)) 1150 continue; 1151 1152 if (reg_val & BIT(i)) { 1153 hisi_sas_phy_write32(hisi_hba, i, 1154 CON_CONTROL, 0x6); 1155 break; 1156 } 1157 } 1158 1159 hisi_hba->timer.function = link_timeout_enable_link; 1160 mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100)); 1161 } 1162 1163 static void set_link_timer_quirk(struct hisi_hba *hisi_hba) 1164 { 1165 hisi_hba->timer.data = (unsigned long)hisi_hba; 1166 hisi_hba->timer.function = link_timeout_disable_link; 1167 hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000); 1168 add_timer(&hisi_hba->timer); 1169 } 1170 1171 static int hw_init_v2_hw(struct hisi_hba *hisi_hba) 1172 { 1173 struct device *dev = &hisi_hba->pdev->dev; 1174 int rc; 1175 1176 rc = reset_hw_v2_hw(hisi_hba); 1177 if (rc) { 1178 dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc); 1179 return rc; 1180 } 1181 1182 msleep(100); 1183 init_reg_v2_hw(hisi_hba); 1184 1185 return 0; 1186 } 1187 1188 static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1189 { 1190 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 1191 1192 cfg |= PHY_CFG_ENA_MSK; 1193 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 1194 } 1195 1196 static bool is_sata_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1197 { 1198 u32 context; 1199 1200 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); 1201 if (context & (1 << phy_no)) 1202 return true; 1203 1204 return false; 1205 } 1206 1207 static bool tx_fifo_is_empty_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1208 { 1209 u32 dfx_val; 1210 1211 dfx_val = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX1); 1212 1213 if (dfx_val & BIT(16)) 1214 return false; 1215 1216 return true; 1217 } 1218 1219 static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1220 { 1221 int i, max_loop = 1000; 1222 struct device *dev = &hisi_hba->pdev->dev; 1223 u32 status, axi_status, dfx_val, dfx_tx_val; 1224 1225 for (i = 0; i < max_loop; i++) { 1226 status = hisi_sas_read32_relaxed(hisi_hba, 1227 AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN); 1228 1229 axi_status = hisi_sas_read32(hisi_hba, AXI_CFG); 1230 dfx_val = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX1); 1231 dfx_tx_val = hisi_sas_phy_read32(hisi_hba, 1232 phy_no, DMA_TX_FIFO_DFX0); 1233 1234 if ((status == 0x3) && (axi_status == 0x0) && 1235 (dfx_val & BIT(20)) && (dfx_tx_val & BIT(10))) 1236 return true; 1237 udelay(10); 1238 } 1239 dev_err(dev, "bus is not idle phy%d, axi150:0x%x axi100:0x%x port204:0x%x port240:0x%x\n", 1240 phy_no, status, axi_status, 1241 dfx_val, dfx_tx_val); 1242 return false; 1243 } 1244 1245 static bool wait_io_done_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1246 { 1247 int i, max_loop = 1000; 1248 struct device *dev = &hisi_hba->pdev->dev; 1249 u32 status, tx_dfx0; 1250 1251 for (i = 0; i < max_loop; i++) { 1252 status = hisi_sas_phy_read32(hisi_hba, phy_no, LINK_DFX2); 1253 status = (status & 0x3fc0) >> 6; 1254 1255 if (status != 0x1) 1256 return true; 1257 1258 tx_dfx0 = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX0); 1259 if ((tx_dfx0 & 0x1ff) == 0x2) 1260 return true; 1261 udelay(10); 1262 } 1263 dev_err(dev, "IO not done phy%d, port264:0x%x port200:0x%x\n", 1264 phy_no, status, tx_dfx0); 1265 return false; 1266 } 1267 1268 static bool allowed_disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1269 { 1270 if (tx_fifo_is_empty_v2_hw(hisi_hba, phy_no)) 1271 return true; 1272 1273 if (!axi_bus_is_idle_v2_hw(hisi_hba, phy_no)) 1274 return false; 1275 1276 if (!wait_io_done_v2_hw(hisi_hba, phy_no)) 1277 return false; 1278 1279 return true; 1280 } 1281 1282 1283 static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1284 { 1285 u32 cfg, axi_val, dfx0_val, txid_auto; 1286 struct device *dev = &hisi_hba->pdev->dev; 1287 1288 /* Close axi bus. */ 1289 axi_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + 1290 AM_CTRL_GLOBAL); 1291 axi_val |= 0x1; 1292 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 1293 AM_CTRL_GLOBAL, axi_val); 1294 1295 if (is_sata_phy_v2_hw(hisi_hba, phy_no)) { 1296 if (allowed_disable_phy_v2_hw(hisi_hba, phy_no)) 1297 goto do_disable; 1298 1299 /* Reset host controller. */ 1300 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1301 return; 1302 } 1303 1304 dfx0_val = hisi_sas_phy_read32(hisi_hba, phy_no, PORT_DFX0); 1305 dfx0_val = (dfx0_val & 0x1fc0) >> 6; 1306 if (dfx0_val != 0x4) 1307 goto do_disable; 1308 1309 if (!tx_fifo_is_empty_v2_hw(hisi_hba, phy_no)) { 1310 dev_warn(dev, "phy%d, wait tx fifo need send break\n", 1311 phy_no); 1312 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, 1313 TXID_AUTO); 1314 txid_auto |= TXID_AUTO_CTB_MSK; 1315 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 1316 txid_auto); 1317 } 1318 1319 do_disable: 1320 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 1321 cfg &= ~PHY_CFG_ENA_MSK; 1322 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 1323 1324 /* Open axi bus. */ 1325 axi_val &= ~0x1; 1326 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 1327 AM_CTRL_GLOBAL, axi_val); 1328 } 1329 1330 static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1331 { 1332 config_id_frame_v2_hw(hisi_hba, phy_no); 1333 config_phy_opt_mode_v2_hw(hisi_hba, phy_no); 1334 enable_phy_v2_hw(hisi_hba, phy_no); 1335 } 1336 1337 static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1338 { 1339 disable_phy_v2_hw(hisi_hba, phy_no); 1340 } 1341 1342 static void stop_phys_v2_hw(struct hisi_hba *hisi_hba) 1343 { 1344 int i; 1345 1346 for (i = 0; i < hisi_hba->n_phy; i++) 1347 stop_phy_v2_hw(hisi_hba, i); 1348 } 1349 1350 static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1351 { 1352 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1353 u32 txid_auto; 1354 1355 stop_phy_v2_hw(hisi_hba, phy_no); 1356 if (phy->identify.device_type == SAS_END_DEVICE) { 1357 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 1358 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 1359 txid_auto | TX_HARDRST_MSK); 1360 } 1361 msleep(100); 1362 start_phy_v2_hw(hisi_hba, phy_no); 1363 } 1364 1365 static void start_phys_v2_hw(struct hisi_hba *hisi_hba) 1366 { 1367 int i; 1368 1369 for (i = 0; i < hisi_hba->n_phy; i++) 1370 start_phy_v2_hw(hisi_hba, i); 1371 } 1372 1373 static void phys_init_v2_hw(struct hisi_hba *hisi_hba) 1374 { 1375 start_phys_v2_hw(hisi_hba); 1376 } 1377 1378 static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1379 { 1380 u32 sl_control; 1381 1382 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1383 sl_control |= SL_CONTROL_NOTIFY_EN_MSK; 1384 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 1385 msleep(1); 1386 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1387 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; 1388 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 1389 } 1390 1391 static enum sas_linkrate phy_get_max_linkrate_v2_hw(void) 1392 { 1393 return SAS_LINK_RATE_12_0_GBPS; 1394 } 1395 1396 static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no, 1397 struct sas_phy_linkrates *r) 1398 { 1399 u32 prog_phy_link_rate = 1400 hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); 1401 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1402 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1403 int i; 1404 enum sas_linkrate min, max; 1405 u32 rate_mask = 0; 1406 1407 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1408 max = sas_phy->phy->maximum_linkrate; 1409 min = r->minimum_linkrate; 1410 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1411 max = r->maximum_linkrate; 1412 min = sas_phy->phy->minimum_linkrate; 1413 } else 1414 return; 1415 1416 sas_phy->phy->maximum_linkrate = max; 1417 sas_phy->phy->minimum_linkrate = min; 1418 1419 min -= SAS_LINK_RATE_1_5_GBPS; 1420 max -= SAS_LINK_RATE_1_5_GBPS; 1421 1422 for (i = 0; i <= max; i++) 1423 rate_mask |= 1 << (i * 2); 1424 1425 prog_phy_link_rate &= ~0xff; 1426 prog_phy_link_rate |= rate_mask; 1427 1428 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 1429 prog_phy_link_rate); 1430 1431 phy_hard_reset_v2_hw(hisi_hba, phy_no); 1432 } 1433 1434 static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) 1435 { 1436 int i, bitmap = 0; 1437 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 1438 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1439 1440 for (i = 0; i < (hisi_hba->n_phy < 9 ? hisi_hba->n_phy : 8); i++) 1441 if (phy_state & 1 << i) 1442 if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) 1443 bitmap |= 1 << i; 1444 1445 if (hisi_hba->n_phy == 9) { 1446 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 1447 1448 if (phy_state & 1 << 8) 1449 if (((port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> 1450 PORT_STATE_PHY8_PORT_NUM_OFF) == port_id) 1451 bitmap |= 1 << 9; 1452 } 1453 1454 return bitmap; 1455 } 1456 1457 /** 1458 * This function allocates across all queues to load balance. 1459 * Slots are allocated from queues in a round-robin fashion. 1460 * 1461 * The callpath to this function and upto writing the write 1462 * queue pointer should be safe from interruption. 1463 */ 1464 static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id, 1465 int *q, int *s) 1466 { 1467 struct device *dev = &hisi_hba->pdev->dev; 1468 struct hisi_sas_dq *dq; 1469 u32 r, w; 1470 int queue = dev_id % hisi_hba->queue_count; 1471 1472 dq = &hisi_hba->dq[queue]; 1473 w = dq->wr_point; 1474 r = hisi_sas_read32_relaxed(hisi_hba, 1475 DLVRY_Q_0_RD_PTR + (queue * 0x14)); 1476 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { 1477 dev_warn(dev, "full queue=%d r=%d w=%d\n\n", 1478 queue, r, w); 1479 return -EAGAIN; 1480 } 1481 1482 *q = queue; 1483 *s = w; 1484 return 0; 1485 } 1486 1487 static void start_delivery_v2_hw(struct hisi_hba *hisi_hba) 1488 { 1489 int dlvry_queue = hisi_hba->slot_prep->dlvry_queue; 1490 int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot; 1491 struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue]; 1492 1493 dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; 1494 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), 1495 dq->wr_point); 1496 } 1497 1498 static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, 1499 struct hisi_sas_slot *slot, 1500 struct hisi_sas_cmd_hdr *hdr, 1501 struct scatterlist *scatter, 1502 int n_elem) 1503 { 1504 struct device *dev = &hisi_hba->pdev->dev; 1505 struct scatterlist *sg; 1506 int i; 1507 1508 if (n_elem > HISI_SAS_SGE_PAGE_CNT) { 1509 dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", 1510 n_elem); 1511 return -EINVAL; 1512 } 1513 1514 slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC, 1515 &slot->sge_page_dma); 1516 if (!slot->sge_page) 1517 return -ENOMEM; 1518 1519 for_each_sg(scatter, sg, n_elem, i) { 1520 struct hisi_sas_sge *entry = &slot->sge_page->sge[i]; 1521 1522 entry->addr = cpu_to_le64(sg_dma_address(sg)); 1523 entry->page_ctrl_0 = entry->page_ctrl_1 = 0; 1524 entry->data_len = cpu_to_le32(sg_dma_len(sg)); 1525 entry->data_off = 0; 1526 } 1527 1528 hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma); 1529 1530 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); 1531 1532 return 0; 1533 } 1534 1535 static int prep_smp_v2_hw(struct hisi_hba *hisi_hba, 1536 struct hisi_sas_slot *slot) 1537 { 1538 struct sas_task *task = slot->task; 1539 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1540 struct domain_device *device = task->dev; 1541 struct device *dev = &hisi_hba->pdev->dev; 1542 struct hisi_sas_port *port = slot->port; 1543 struct scatterlist *sg_req, *sg_resp; 1544 struct hisi_sas_device *sas_dev = device->lldd_dev; 1545 dma_addr_t req_dma_addr; 1546 unsigned int req_len, resp_len; 1547 int elem, rc; 1548 1549 /* 1550 * DMA-map SMP request, response buffers 1551 */ 1552 /* req */ 1553 sg_req = &task->smp_task.smp_req; 1554 elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE); 1555 if (!elem) 1556 return -ENOMEM; 1557 req_len = sg_dma_len(sg_req); 1558 req_dma_addr = sg_dma_address(sg_req); 1559 1560 /* resp */ 1561 sg_resp = &task->smp_task.smp_resp; 1562 elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE); 1563 if (!elem) { 1564 rc = -ENOMEM; 1565 goto err_out_req; 1566 } 1567 resp_len = sg_dma_len(sg_resp); 1568 if ((req_len & 0x3) || (resp_len & 0x3)) { 1569 rc = -EINVAL; 1570 goto err_out_resp; 1571 } 1572 1573 /* create header */ 1574 /* dw0 */ 1575 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | 1576 (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ 1577 (2 << CMD_HDR_CMD_OFF)); /* smp */ 1578 1579 /* map itct entry */ 1580 hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | 1581 (1 << CMD_HDR_FRAME_TYPE_OFF) | 1582 (DIR_NO_DATA << CMD_HDR_DIR_OFF)); 1583 1584 /* dw2 */ 1585 hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | 1586 (HISI_SAS_MAX_SMP_RESP_SZ / 4 << 1587 CMD_HDR_MRFL_OFF)); 1588 1589 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); 1590 1591 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); 1592 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); 1593 1594 return 0; 1595 1596 err_out_resp: 1597 dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1, 1598 DMA_FROM_DEVICE); 1599 err_out_req: 1600 dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1, 1601 DMA_TO_DEVICE); 1602 return rc; 1603 } 1604 1605 static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, 1606 struct hisi_sas_slot *slot, int is_tmf, 1607 struct hisi_sas_tmf_task *tmf) 1608 { 1609 struct sas_task *task = slot->task; 1610 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1611 struct domain_device *device = task->dev; 1612 struct hisi_sas_device *sas_dev = device->lldd_dev; 1613 struct hisi_sas_port *port = slot->port; 1614 struct sas_ssp_task *ssp_task = &task->ssp_task; 1615 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 1616 int has_data = 0, rc, priority = is_tmf; 1617 u8 *buf_cmd; 1618 u32 dw1 = 0, dw2 = 0; 1619 1620 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | 1621 (2 << CMD_HDR_TLR_CTRL_OFF) | 1622 (port->id << CMD_HDR_PORT_OFF) | 1623 (priority << CMD_HDR_PRIORITY_OFF) | 1624 (1 << CMD_HDR_CMD_OFF)); /* ssp */ 1625 1626 dw1 = 1 << CMD_HDR_VDTL_OFF; 1627 if (is_tmf) { 1628 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; 1629 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; 1630 } else { 1631 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; 1632 switch (scsi_cmnd->sc_data_direction) { 1633 case DMA_TO_DEVICE: 1634 has_data = 1; 1635 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1636 break; 1637 case DMA_FROM_DEVICE: 1638 has_data = 1; 1639 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1640 break; 1641 default: 1642 dw1 &= ~CMD_HDR_DIR_MSK; 1643 } 1644 } 1645 1646 /* map itct entry */ 1647 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1648 hdr->dw1 = cpu_to_le32(dw1); 1649 1650 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) 1651 + 3) / 4) << CMD_HDR_CFL_OFF) | 1652 ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | 1653 (2 << CMD_HDR_SG_MOD_OFF); 1654 hdr->dw2 = cpu_to_le32(dw2); 1655 1656 hdr->transfer_tags = cpu_to_le32(slot->idx); 1657 1658 if (has_data) { 1659 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, 1660 slot->n_elem); 1661 if (rc) 1662 return rc; 1663 } 1664 1665 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1666 hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); 1667 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); 1668 1669 buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr); 1670 1671 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 1672 if (!is_tmf) { 1673 buf_cmd[9] = task->ssp_task.task_attr | 1674 (task->ssp_task.task_prio << 3); 1675 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, 1676 task->ssp_task.cmd->cmd_len); 1677 } else { 1678 buf_cmd[10] = tmf->tmf; 1679 switch (tmf->tmf) { 1680 case TMF_ABORT_TASK: 1681 case TMF_QUERY_TASK: 1682 buf_cmd[12] = 1683 (tmf->tag_of_task_to_be_managed >> 8) & 0xff; 1684 buf_cmd[13] = 1685 tmf->tag_of_task_to_be_managed & 0xff; 1686 break; 1687 default: 1688 break; 1689 } 1690 } 1691 1692 return 0; 1693 } 1694 1695 static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task, 1696 struct hisi_sas_slot *slot) 1697 { 1698 struct task_status_struct *ts = &task->task_status; 1699 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 1700 struct dev_to_host_fis *d2h = slot->status_buffer + 1701 sizeof(struct hisi_sas_err_record); 1702 1703 resp->frame_len = sizeof(struct dev_to_host_fis); 1704 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 1705 1706 ts->buf_valid_size = sizeof(*resp); 1707 } 1708 1709 #define TRANS_TX_ERR 0 1710 #define TRANS_RX_ERR 1 1711 #define DMA_TX_ERR 2 1712 #define SIPC_RX_ERR 3 1713 #define DMA_RX_ERR 4 1714 1715 #define DMA_TX_ERR_OFF 0 1716 #define DMA_TX_ERR_MSK (0xffff << DMA_TX_ERR_OFF) 1717 #define SIPC_RX_ERR_OFF 16 1718 #define SIPC_RX_ERR_MSK (0xffff << SIPC_RX_ERR_OFF) 1719 1720 static int parse_trans_tx_err_code_v2_hw(u32 err_msk) 1721 { 1722 const u8 trans_tx_err_code_prio[] = { 1723 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS, 1724 TRANS_TX_ERR_PHY_NOT_ENABLE, 1725 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, 1726 TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, 1727 TRANS_TX_OPEN_CNX_ERR_BY_OTHER, 1728 RESERVED0, 1729 TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, 1730 TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, 1731 TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, 1732 TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, 1733 TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, 1734 TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, 1735 TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, 1736 TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, 1737 TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, 1738 TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, 1739 TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, 1740 TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, 1741 TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, 1742 TRANS_TX_ERR_WITH_CLOSE_COMINIT, 1743 TRANS_TX_ERR_WITH_BREAK_TIMEOUT, 1744 TRANS_TX_ERR_WITH_BREAK_REQUEST, 1745 TRANS_TX_ERR_WITH_BREAK_RECEVIED, 1746 TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, 1747 TRANS_TX_ERR_WITH_CLOSE_NORMAL, 1748 TRANS_TX_ERR_WITH_NAK_RECEVIED, 1749 TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, 1750 TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, 1751 TRANS_TX_ERR_WITH_IPTT_CONFLICT, 1752 TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, 1753 TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, 1754 }; 1755 int index, i; 1756 1757 for (i = 0; i < ARRAY_SIZE(trans_tx_err_code_prio); i++) { 1758 index = trans_tx_err_code_prio[i] - TRANS_TX_FAIL_BASE; 1759 if (err_msk & (1 << index)) 1760 return trans_tx_err_code_prio[i]; 1761 } 1762 return -1; 1763 } 1764 1765 static int parse_trans_rx_err_code_v2_hw(u32 err_msk) 1766 { 1767 const u8 trans_rx_err_code_prio[] = { 1768 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR, 1769 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, 1770 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, 1771 TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, 1772 TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, 1773 TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, 1774 TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, 1775 TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, 1776 TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, 1777 TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, 1778 TRANS_RX_ERR_WITH_CLOSE_COMINIT, 1779 TRANS_RX_ERR_WITH_BREAK_TIMEOUT, 1780 TRANS_RX_ERR_WITH_BREAK_REQUEST, 1781 TRANS_RX_ERR_WITH_BREAK_RECEVIED, 1782 RESERVED1, 1783 TRANS_RX_ERR_WITH_CLOSE_NORMAL, 1784 TRANS_RX_ERR_WITH_DATA_LEN0, 1785 TRANS_RX_ERR_WITH_BAD_HASH, 1786 TRANS_RX_XRDY_WLEN_ZERO_ERR, 1787 TRANS_RX_SSP_FRM_LEN_ERR, 1788 RESERVED2, 1789 RESERVED3, 1790 RESERVED4, 1791 RESERVED5, 1792 TRANS_RX_ERR_WITH_BAD_FRM_TYPE, 1793 TRANS_RX_SMP_FRM_LEN_ERR, 1794 TRANS_RX_SMP_RESP_TIMEOUT_ERR, 1795 RESERVED6, 1796 RESERVED7, 1797 RESERVED8, 1798 RESERVED9, 1799 TRANS_RX_R_ERR, 1800 }; 1801 int index, i; 1802 1803 for (i = 0; i < ARRAY_SIZE(trans_rx_err_code_prio); i++) { 1804 index = trans_rx_err_code_prio[i] - TRANS_RX_FAIL_BASE; 1805 if (err_msk & (1 << index)) 1806 return trans_rx_err_code_prio[i]; 1807 } 1808 return -1; 1809 } 1810 1811 static int parse_dma_tx_err_code_v2_hw(u32 err_msk) 1812 { 1813 const u8 dma_tx_err_code_prio[] = { 1814 DMA_TX_UNEXP_XFER_ERR, 1815 DMA_TX_UNEXP_RETRANS_ERR, 1816 DMA_TX_XFER_LEN_OVERFLOW, 1817 DMA_TX_XFER_OFFSET_ERR, 1818 DMA_TX_RAM_ECC_ERR, 1819 DMA_TX_DIF_LEN_ALIGN_ERR, 1820 DMA_TX_DIF_CRC_ERR, 1821 DMA_TX_DIF_APP_ERR, 1822 DMA_TX_DIF_RPP_ERR, 1823 DMA_TX_DATA_SGL_OVERFLOW, 1824 DMA_TX_DIF_SGL_OVERFLOW, 1825 }; 1826 int index, i; 1827 1828 for (i = 0; i < ARRAY_SIZE(dma_tx_err_code_prio); i++) { 1829 index = dma_tx_err_code_prio[i] - DMA_TX_ERR_BASE; 1830 err_msk = err_msk & DMA_TX_ERR_MSK; 1831 if (err_msk & (1 << index)) 1832 return dma_tx_err_code_prio[i]; 1833 } 1834 return -1; 1835 } 1836 1837 static int parse_sipc_rx_err_code_v2_hw(u32 err_msk) 1838 { 1839 const u8 sipc_rx_err_code_prio[] = { 1840 SIPC_RX_FIS_STATUS_ERR_BIT_VLD, 1841 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, 1842 SIPC_RX_FIS_STATUS_BSY_BIT_ERR, 1843 SIPC_RX_WRSETUP_LEN_ODD_ERR, 1844 SIPC_RX_WRSETUP_LEN_ZERO_ERR, 1845 SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, 1846 SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, 1847 SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, 1848 SIPC_RX_SATA_UNEXP_FIS_ERR, 1849 SIPC_RX_WRSETUP_ESTATUS_ERR, 1850 SIPC_RX_DATA_UNDERFLOW_ERR, 1851 }; 1852 int index, i; 1853 1854 for (i = 0; i < ARRAY_SIZE(sipc_rx_err_code_prio); i++) { 1855 index = sipc_rx_err_code_prio[i] - SIPC_RX_ERR_BASE; 1856 err_msk = err_msk & SIPC_RX_ERR_MSK; 1857 if (err_msk & (1 << (index + 0x10))) 1858 return sipc_rx_err_code_prio[i]; 1859 } 1860 return -1; 1861 } 1862 1863 static int parse_dma_rx_err_code_v2_hw(u32 err_msk) 1864 { 1865 const u8 dma_rx_err_code_prio[] = { 1866 DMA_RX_UNKNOWN_FRM_ERR, 1867 DMA_RX_DATA_LEN_OVERFLOW, 1868 DMA_RX_DATA_LEN_UNDERFLOW, 1869 DMA_RX_DATA_OFFSET_ERR, 1870 RESERVED10, 1871 DMA_RX_SATA_FRAME_TYPE_ERR, 1872 DMA_RX_RESP_BUF_OVERFLOW, 1873 DMA_RX_UNEXP_RETRANS_RESP_ERR, 1874 DMA_RX_UNEXP_NORM_RESP_ERR, 1875 DMA_RX_UNEXP_RDFRAME_ERR, 1876 DMA_RX_PIO_DATA_LEN_ERR, 1877 DMA_RX_RDSETUP_STATUS_ERR, 1878 DMA_RX_RDSETUP_STATUS_DRQ_ERR, 1879 DMA_RX_RDSETUP_STATUS_BSY_ERR, 1880 DMA_RX_RDSETUP_LEN_ODD_ERR, 1881 DMA_RX_RDSETUP_LEN_ZERO_ERR, 1882 DMA_RX_RDSETUP_LEN_OVER_ERR, 1883 DMA_RX_RDSETUP_OFFSET_ERR, 1884 DMA_RX_RDSETUP_ACTIVE_ERR, 1885 DMA_RX_RDSETUP_ESTATUS_ERR, 1886 DMA_RX_RAM_ECC_ERR, 1887 DMA_RX_DIF_CRC_ERR, 1888 DMA_RX_DIF_APP_ERR, 1889 DMA_RX_DIF_RPP_ERR, 1890 DMA_RX_DATA_SGL_OVERFLOW, 1891 DMA_RX_DIF_SGL_OVERFLOW, 1892 }; 1893 int index, i; 1894 1895 for (i = 0; i < ARRAY_SIZE(dma_rx_err_code_prio); i++) { 1896 index = dma_rx_err_code_prio[i] - DMA_RX_ERR_BASE; 1897 if (err_msk & (1 << index)) 1898 return dma_rx_err_code_prio[i]; 1899 } 1900 return -1; 1901 } 1902 1903 /* by default, task resp is complete */ 1904 static void slot_err_v2_hw(struct hisi_hba *hisi_hba, 1905 struct sas_task *task, 1906 struct hisi_sas_slot *slot, 1907 int err_phase) 1908 { 1909 struct task_status_struct *ts = &task->task_status; 1910 struct hisi_sas_err_record_v2 *err_record = slot->status_buffer; 1911 u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type); 1912 u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type); 1913 u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type); 1914 u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type); 1915 u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type); 1916 int error = -1; 1917 1918 if (err_phase == 1) { 1919 /* error in TX phase, the priority of error is: DW2 > DW0 */ 1920 error = parse_dma_tx_err_code_v2_hw(dma_tx_err_type); 1921 if (error == -1) 1922 error = parse_trans_tx_err_code_v2_hw( 1923 trans_tx_fail_type); 1924 } else if (err_phase == 2) { 1925 /* error in RX phase, the priority is: DW1 > DW3 > DW2 */ 1926 error = parse_trans_rx_err_code_v2_hw( 1927 trans_rx_fail_type); 1928 if (error == -1) { 1929 error = parse_dma_rx_err_code_v2_hw( 1930 dma_rx_err_type); 1931 if (error == -1) 1932 error = parse_sipc_rx_err_code_v2_hw( 1933 sipc_rx_err_type); 1934 } 1935 } 1936 1937 switch (task->task_proto) { 1938 case SAS_PROTOCOL_SSP: 1939 { 1940 switch (error) { 1941 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: 1942 { 1943 ts->stat = SAS_OPEN_REJECT; 1944 ts->open_rej_reason = SAS_OREJ_NO_DEST; 1945 break; 1946 } 1947 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: 1948 { 1949 ts->stat = SAS_OPEN_REJECT; 1950 ts->open_rej_reason = SAS_OREJ_EPROTO; 1951 break; 1952 } 1953 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: 1954 { 1955 ts->stat = SAS_OPEN_REJECT; 1956 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 1957 break; 1958 } 1959 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: 1960 { 1961 ts->stat = SAS_OPEN_REJECT; 1962 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 1963 break; 1964 } 1965 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: 1966 { 1967 ts->stat = SAS_OPEN_REJECT; 1968 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 1969 break; 1970 } 1971 case DMA_RX_UNEXP_NORM_RESP_ERR: 1972 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: 1973 case DMA_RX_RESP_BUF_OVERFLOW: 1974 { 1975 ts->stat = SAS_OPEN_REJECT; 1976 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 1977 break; 1978 } 1979 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: 1980 { 1981 /* not sure */ 1982 ts->stat = SAS_DEV_NO_RESPONSE; 1983 break; 1984 } 1985 case DMA_RX_DATA_LEN_OVERFLOW: 1986 { 1987 ts->stat = SAS_DATA_OVERRUN; 1988 ts->residual = 0; 1989 break; 1990 } 1991 case DMA_RX_DATA_LEN_UNDERFLOW: 1992 { 1993 ts->residual = dma_rx_err_type; 1994 ts->stat = SAS_DATA_UNDERRUN; 1995 break; 1996 } 1997 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: 1998 case TRANS_TX_ERR_PHY_NOT_ENABLE: 1999 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: 2000 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: 2001 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: 2002 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: 2003 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: 2004 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: 2005 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: 2006 case TRANS_TX_ERR_WITH_BREAK_REQUEST: 2007 case TRANS_TX_ERR_WITH_BREAK_RECEVIED: 2008 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: 2009 case TRANS_TX_ERR_WITH_CLOSE_NORMAL: 2010 case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE: 2011 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: 2012 case TRANS_TX_ERR_WITH_CLOSE_COMINIT: 2013 case TRANS_TX_ERR_WITH_NAK_RECEVIED: 2014 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: 2015 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: 2016 case TRANS_TX_ERR_WITH_IPTT_CONFLICT: 2017 case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR: 2018 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: 2019 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: 2020 case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN: 2021 case TRANS_RX_ERR_WITH_BREAK_TIMEOUT: 2022 case TRANS_RX_ERR_WITH_BREAK_REQUEST: 2023 case TRANS_RX_ERR_WITH_BREAK_RECEVIED: 2024 case TRANS_RX_ERR_WITH_CLOSE_NORMAL: 2025 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: 2026 case TRANS_RX_ERR_WITH_CLOSE_COMINIT: 2027 case TRANS_TX_ERR_FRAME_TXED: 2028 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: 2029 case TRANS_RX_ERR_WITH_DATA_LEN0: 2030 case TRANS_RX_ERR_WITH_BAD_HASH: 2031 case TRANS_RX_XRDY_WLEN_ZERO_ERR: 2032 case TRANS_RX_SSP_FRM_LEN_ERR: 2033 case TRANS_RX_ERR_WITH_BAD_FRM_TYPE: 2034 case DMA_TX_DATA_SGL_OVERFLOW: 2035 case DMA_TX_UNEXP_XFER_ERR: 2036 case DMA_TX_UNEXP_RETRANS_ERR: 2037 case DMA_TX_XFER_LEN_OVERFLOW: 2038 case DMA_TX_XFER_OFFSET_ERR: 2039 case SIPC_RX_DATA_UNDERFLOW_ERR: 2040 case DMA_RX_DATA_SGL_OVERFLOW: 2041 case DMA_RX_DATA_OFFSET_ERR: 2042 case DMA_RX_RDSETUP_LEN_ODD_ERR: 2043 case DMA_RX_RDSETUP_LEN_ZERO_ERR: 2044 case DMA_RX_RDSETUP_LEN_OVER_ERR: 2045 case DMA_RX_SATA_FRAME_TYPE_ERR: 2046 case DMA_RX_UNKNOWN_FRM_ERR: 2047 { 2048 /* This will request a retry */ 2049 ts->stat = SAS_QUEUE_FULL; 2050 slot->abort = 1; 2051 break; 2052 } 2053 default: 2054 break; 2055 } 2056 } 2057 break; 2058 case SAS_PROTOCOL_SMP: 2059 ts->stat = SAM_STAT_CHECK_CONDITION; 2060 break; 2061 2062 case SAS_PROTOCOL_SATA: 2063 case SAS_PROTOCOL_STP: 2064 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2065 { 2066 switch (error) { 2067 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: 2068 { 2069 ts->stat = SAS_OPEN_REJECT; 2070 ts->open_rej_reason = SAS_OREJ_NO_DEST; 2071 break; 2072 } 2073 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: 2074 { 2075 ts->resp = SAS_TASK_UNDELIVERED; 2076 ts->stat = SAS_DEV_NO_RESPONSE; 2077 break; 2078 } 2079 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: 2080 { 2081 ts->stat = SAS_OPEN_REJECT; 2082 ts->open_rej_reason = SAS_OREJ_EPROTO; 2083 break; 2084 } 2085 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: 2086 { 2087 ts->stat = SAS_OPEN_REJECT; 2088 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 2089 break; 2090 } 2091 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: 2092 { 2093 ts->stat = SAS_OPEN_REJECT; 2094 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 2095 break; 2096 } 2097 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: 2098 { 2099 ts->stat = SAS_OPEN_REJECT; 2100 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 2101 break; 2102 } 2103 case DMA_RX_RESP_BUF_OVERFLOW: 2104 case DMA_RX_UNEXP_NORM_RESP_ERR: 2105 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: 2106 { 2107 ts->stat = SAS_OPEN_REJECT; 2108 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2109 break; 2110 } 2111 case DMA_RX_DATA_LEN_OVERFLOW: 2112 { 2113 ts->stat = SAS_DATA_OVERRUN; 2114 ts->residual = 0; 2115 break; 2116 } 2117 case DMA_RX_DATA_LEN_UNDERFLOW: 2118 { 2119 ts->residual = dma_rx_err_type; 2120 ts->stat = SAS_DATA_UNDERRUN; 2121 break; 2122 } 2123 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: 2124 case TRANS_TX_ERR_PHY_NOT_ENABLE: 2125 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: 2126 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: 2127 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: 2128 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: 2129 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: 2130 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: 2131 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: 2132 case TRANS_TX_ERR_WITH_BREAK_REQUEST: 2133 case TRANS_TX_ERR_WITH_BREAK_RECEVIED: 2134 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: 2135 case TRANS_TX_ERR_WITH_CLOSE_NORMAL: 2136 case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE: 2137 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: 2138 case TRANS_TX_ERR_WITH_CLOSE_COMINIT: 2139 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: 2140 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: 2141 case TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS: 2142 case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT: 2143 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: 2144 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: 2145 case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR: 2146 case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR: 2147 case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN: 2148 case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP: 2149 case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN: 2150 case TRANS_RX_ERR_WITH_BREAK_TIMEOUT: 2151 case TRANS_RX_ERR_WITH_BREAK_REQUEST: 2152 case TRANS_RX_ERR_WITH_BREAK_RECEVIED: 2153 case TRANS_RX_ERR_WITH_CLOSE_NORMAL: 2154 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: 2155 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: 2156 case TRANS_RX_ERR_WITH_CLOSE_COMINIT: 2157 case TRANS_RX_ERR_WITH_DATA_LEN0: 2158 case TRANS_RX_ERR_WITH_BAD_HASH: 2159 case TRANS_RX_XRDY_WLEN_ZERO_ERR: 2160 case TRANS_RX_ERR_WITH_BAD_FRM_TYPE: 2161 case DMA_TX_DATA_SGL_OVERFLOW: 2162 case DMA_TX_UNEXP_XFER_ERR: 2163 case DMA_TX_UNEXP_RETRANS_ERR: 2164 case DMA_TX_XFER_LEN_OVERFLOW: 2165 case DMA_TX_XFER_OFFSET_ERR: 2166 case SIPC_RX_FIS_STATUS_ERR_BIT_VLD: 2167 case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR: 2168 case SIPC_RX_FIS_STATUS_BSY_BIT_ERR: 2169 case SIPC_RX_WRSETUP_LEN_ODD_ERR: 2170 case SIPC_RX_WRSETUP_LEN_ZERO_ERR: 2171 case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR: 2172 case SIPC_RX_SATA_UNEXP_FIS_ERR: 2173 case DMA_RX_DATA_SGL_OVERFLOW: 2174 case DMA_RX_DATA_OFFSET_ERR: 2175 case DMA_RX_SATA_FRAME_TYPE_ERR: 2176 case DMA_RX_UNEXP_RDFRAME_ERR: 2177 case DMA_RX_PIO_DATA_LEN_ERR: 2178 case DMA_RX_RDSETUP_STATUS_ERR: 2179 case DMA_RX_RDSETUP_STATUS_DRQ_ERR: 2180 case DMA_RX_RDSETUP_STATUS_BSY_ERR: 2181 case DMA_RX_RDSETUP_LEN_ODD_ERR: 2182 case DMA_RX_RDSETUP_LEN_ZERO_ERR: 2183 case DMA_RX_RDSETUP_LEN_OVER_ERR: 2184 case DMA_RX_RDSETUP_OFFSET_ERR: 2185 case DMA_RX_RDSETUP_ACTIVE_ERR: 2186 case DMA_RX_RDSETUP_ESTATUS_ERR: 2187 case DMA_RX_UNKNOWN_FRM_ERR: 2188 case TRANS_RX_SSP_FRM_LEN_ERR: 2189 case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY: 2190 { 2191 slot->abort = 1; 2192 ts->stat = SAS_PHY_DOWN; 2193 break; 2194 } 2195 default: 2196 { 2197 ts->stat = SAS_PROTO_RESPONSE; 2198 break; 2199 } 2200 } 2201 sata_done_v2_hw(hisi_hba, task, slot); 2202 } 2203 break; 2204 default: 2205 break; 2206 } 2207 } 2208 2209 static int 2210 slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) 2211 { 2212 struct sas_task *task = slot->task; 2213 struct hisi_sas_device *sas_dev; 2214 struct device *dev = &hisi_hba->pdev->dev; 2215 struct task_status_struct *ts; 2216 struct domain_device *device; 2217 enum exec_status sts; 2218 struct hisi_sas_complete_v2_hdr *complete_queue = 2219 hisi_hba->complete_hdr[slot->cmplt_queue]; 2220 struct hisi_sas_complete_v2_hdr *complete_hdr = 2221 &complete_queue[slot->cmplt_queue_slot]; 2222 unsigned long flags; 2223 int aborted; 2224 2225 if (unlikely(!task || !task->lldd_task || !task->dev)) 2226 return -EINVAL; 2227 2228 ts = &task->task_status; 2229 device = task->dev; 2230 sas_dev = device->lldd_dev; 2231 2232 spin_lock_irqsave(&task->task_state_lock, flags); 2233 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; 2234 task->task_state_flags &= 2235 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 2236 spin_unlock_irqrestore(&task->task_state_lock, flags); 2237 2238 memset(ts, 0, sizeof(*ts)); 2239 ts->resp = SAS_TASK_COMPLETE; 2240 2241 if (unlikely(aborted)) { 2242 ts->stat = SAS_ABORTED_TASK; 2243 hisi_sas_slot_task_free(hisi_hba, task, slot); 2244 return -1; 2245 } 2246 2247 if (unlikely(!sas_dev)) { 2248 dev_dbg(dev, "slot complete: port has no device\n"); 2249 ts->stat = SAS_PHY_DOWN; 2250 goto out; 2251 } 2252 2253 /* Use SAS+TMF status codes */ 2254 switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK) 2255 >> CMPLT_HDR_ABORT_STAT_OFF) { 2256 case STAT_IO_ABORTED: 2257 /* this io has been aborted by abort command */ 2258 ts->stat = SAS_ABORTED_TASK; 2259 goto out; 2260 case STAT_IO_COMPLETE: 2261 /* internal abort command complete */ 2262 ts->stat = TMF_RESP_FUNC_SUCC; 2263 del_timer(&slot->internal_abort_timer); 2264 goto out; 2265 case STAT_IO_NO_DEVICE: 2266 ts->stat = TMF_RESP_FUNC_COMPLETE; 2267 del_timer(&slot->internal_abort_timer); 2268 goto out; 2269 case STAT_IO_NOT_VALID: 2270 /* abort single io, controller don't find 2271 * the io need to abort 2272 */ 2273 ts->stat = TMF_RESP_FUNC_FAILED; 2274 del_timer(&slot->internal_abort_timer); 2275 goto out; 2276 default: 2277 break; 2278 } 2279 2280 if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) && 2281 (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { 2282 u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK) 2283 >> CMPLT_HDR_ERR_PHASE_OFF; 2284 2285 /* Analyse error happens on which phase TX or RX */ 2286 if (ERR_ON_TX_PHASE(err_phase)) 2287 slot_err_v2_hw(hisi_hba, task, slot, 1); 2288 else if (ERR_ON_RX_PHASE(err_phase)) 2289 slot_err_v2_hw(hisi_hba, task, slot, 2); 2290 2291 if (unlikely(slot->abort)) 2292 return ts->stat; 2293 goto out; 2294 } 2295 2296 switch (task->task_proto) { 2297 case SAS_PROTOCOL_SSP: 2298 { 2299 struct ssp_response_iu *iu = slot->status_buffer + 2300 sizeof(struct hisi_sas_err_record); 2301 2302 sas_ssp_task_response(dev, task, iu); 2303 break; 2304 } 2305 case SAS_PROTOCOL_SMP: 2306 { 2307 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 2308 void *to; 2309 2310 ts->stat = SAM_STAT_GOOD; 2311 to = kmap_atomic(sg_page(sg_resp)); 2312 2313 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1, 2314 DMA_FROM_DEVICE); 2315 dma_unmap_sg(dev, &task->smp_task.smp_req, 1, 2316 DMA_TO_DEVICE); 2317 memcpy(to + sg_resp->offset, 2318 slot->status_buffer + 2319 sizeof(struct hisi_sas_err_record), 2320 sg_dma_len(sg_resp)); 2321 kunmap_atomic(to); 2322 break; 2323 } 2324 case SAS_PROTOCOL_SATA: 2325 case SAS_PROTOCOL_STP: 2326 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 2327 { 2328 ts->stat = SAM_STAT_GOOD; 2329 sata_done_v2_hw(hisi_hba, task, slot); 2330 break; 2331 } 2332 default: 2333 ts->stat = SAM_STAT_CHECK_CONDITION; 2334 break; 2335 } 2336 2337 if (!slot->port->port_attached) { 2338 dev_err(dev, "slot complete: port %d has removed\n", 2339 slot->port->sas_port.id); 2340 ts->stat = SAS_PHY_DOWN; 2341 } 2342 2343 out: 2344 spin_lock_irqsave(&task->task_state_lock, flags); 2345 task->task_state_flags |= SAS_TASK_STATE_DONE; 2346 spin_unlock_irqrestore(&task->task_state_lock, flags); 2347 hisi_sas_slot_task_free(hisi_hba, task, slot); 2348 sts = ts->stat; 2349 2350 if (task->task_done) 2351 task->task_done(task); 2352 2353 return sts; 2354 } 2355 2356 static u8 get_ata_protocol(u8 cmd, int direction) 2357 { 2358 switch (cmd) { 2359 case ATA_CMD_FPDMA_WRITE: 2360 case ATA_CMD_FPDMA_READ: 2361 case ATA_CMD_FPDMA_RECV: 2362 case ATA_CMD_FPDMA_SEND: 2363 case ATA_CMD_NCQ_NON_DATA: 2364 return SATA_PROTOCOL_FPDMA; 2365 2366 case ATA_CMD_DOWNLOAD_MICRO: 2367 case ATA_CMD_ID_ATA: 2368 case ATA_CMD_PMP_READ: 2369 case ATA_CMD_READ_LOG_EXT: 2370 case ATA_CMD_PIO_READ: 2371 case ATA_CMD_PIO_READ_EXT: 2372 case ATA_CMD_PMP_WRITE: 2373 case ATA_CMD_WRITE_LOG_EXT: 2374 case ATA_CMD_PIO_WRITE: 2375 case ATA_CMD_PIO_WRITE_EXT: 2376 return SATA_PROTOCOL_PIO; 2377 2378 case ATA_CMD_DSM: 2379 case ATA_CMD_DOWNLOAD_MICRO_DMA: 2380 case ATA_CMD_PMP_READ_DMA: 2381 case ATA_CMD_PMP_WRITE_DMA: 2382 case ATA_CMD_READ: 2383 case ATA_CMD_READ_EXT: 2384 case ATA_CMD_READ_LOG_DMA_EXT: 2385 case ATA_CMD_READ_STREAM_DMA_EXT: 2386 case ATA_CMD_TRUSTED_RCV_DMA: 2387 case ATA_CMD_TRUSTED_SND_DMA: 2388 case ATA_CMD_WRITE: 2389 case ATA_CMD_WRITE_EXT: 2390 case ATA_CMD_WRITE_FUA_EXT: 2391 case ATA_CMD_WRITE_QUEUED: 2392 case ATA_CMD_WRITE_LOG_DMA_EXT: 2393 case ATA_CMD_WRITE_STREAM_DMA_EXT: 2394 return SATA_PROTOCOL_DMA; 2395 2396 case ATA_CMD_CHK_POWER: 2397 case ATA_CMD_DEV_RESET: 2398 case ATA_CMD_EDD: 2399 case ATA_CMD_FLUSH: 2400 case ATA_CMD_FLUSH_EXT: 2401 case ATA_CMD_VERIFY: 2402 case ATA_CMD_VERIFY_EXT: 2403 case ATA_CMD_SET_FEATURES: 2404 case ATA_CMD_STANDBY: 2405 case ATA_CMD_STANDBYNOW1: 2406 return SATA_PROTOCOL_NONDATA; 2407 default: 2408 if (direction == DMA_NONE) 2409 return SATA_PROTOCOL_NONDATA; 2410 return SATA_PROTOCOL_PIO; 2411 } 2412 } 2413 2414 static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag) 2415 { 2416 struct ata_queued_cmd *qc = task->uldd_task; 2417 2418 if (qc) { 2419 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 2420 qc->tf.command == ATA_CMD_FPDMA_READ) { 2421 *tag = qc->tag; 2422 return 1; 2423 } 2424 } 2425 return 0; 2426 } 2427 2428 static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, 2429 struct hisi_sas_slot *slot) 2430 { 2431 struct sas_task *task = slot->task; 2432 struct domain_device *device = task->dev; 2433 struct domain_device *parent_dev = device->parent; 2434 struct hisi_sas_device *sas_dev = device->lldd_dev; 2435 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 2436 struct asd_sas_port *sas_port = device->port; 2437 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 2438 u8 *buf_cmd; 2439 int has_data = 0, rc = 0, hdr_tag = 0; 2440 u32 dw1 = 0, dw2 = 0; 2441 2442 /* create header */ 2443 /* dw0 */ 2444 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); 2445 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 2446 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); 2447 else 2448 hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF); 2449 2450 /* dw1 */ 2451 switch (task->data_dir) { 2452 case DMA_TO_DEVICE: 2453 has_data = 1; 2454 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 2455 break; 2456 case DMA_FROM_DEVICE: 2457 has_data = 1; 2458 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 2459 break; 2460 default: 2461 dw1 &= ~CMD_HDR_DIR_MSK; 2462 } 2463 2464 if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && 2465 (task->ata_task.fis.control & ATA_SRST)) 2466 dw1 |= 1 << CMD_HDR_RESET_OFF; 2467 2468 dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir)) 2469 << CMD_HDR_FRAME_TYPE_OFF; 2470 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 2471 hdr->dw1 = cpu_to_le32(dw1); 2472 2473 /* dw2 */ 2474 if (task->ata_task.use_ncq && get_ncq_tag_v2_hw(task, &hdr_tag)) { 2475 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 2476 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; 2477 } 2478 2479 dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | 2480 2 << CMD_HDR_SG_MOD_OFF; 2481 hdr->dw2 = cpu_to_le32(dw2); 2482 2483 /* dw3 */ 2484 hdr->transfer_tags = cpu_to_le32(slot->idx); 2485 2486 if (has_data) { 2487 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, 2488 slot->n_elem); 2489 if (rc) 2490 return rc; 2491 } 2492 2493 2494 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 2495 hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); 2496 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); 2497 2498 buf_cmd = slot->command_table; 2499 2500 if (likely(!task->ata_task.device_control_reg_update)) 2501 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 2502 /* fill in command FIS */ 2503 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 2504 2505 return 0; 2506 } 2507 2508 static void hisi_sas_internal_abort_quirk_timeout(unsigned long data) 2509 { 2510 struct hisi_sas_slot *slot = (struct hisi_sas_slot *)data; 2511 struct hisi_sas_port *port = slot->port; 2512 struct asd_sas_port *asd_sas_port; 2513 struct asd_sas_phy *sas_phy; 2514 2515 if (!port) 2516 return; 2517 2518 asd_sas_port = &port->sas_port; 2519 2520 /* Kick the hardware - send break command */ 2521 list_for_each_entry(sas_phy, &asd_sas_port->phy_list, port_phy_el) { 2522 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 2523 struct hisi_hba *hisi_hba = phy->hisi_hba; 2524 int phy_no = sas_phy->id; 2525 u32 link_dfx2; 2526 2527 link_dfx2 = hisi_sas_phy_read32(hisi_hba, phy_no, LINK_DFX2); 2528 if ((link_dfx2 == LINK_DFX2_RCVR_HOLD_STS_MSK) || 2529 (link_dfx2 & LINK_DFX2_SEND_HOLD_STS_MSK)) { 2530 u32 txid_auto; 2531 2532 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, 2533 TXID_AUTO); 2534 txid_auto |= TXID_AUTO_CTB_MSK; 2535 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 2536 txid_auto); 2537 return; 2538 } 2539 } 2540 } 2541 2542 static int prep_abort_v2_hw(struct hisi_hba *hisi_hba, 2543 struct hisi_sas_slot *slot, 2544 int device_id, int abort_flag, int tag_to_abort) 2545 { 2546 struct sas_task *task = slot->task; 2547 struct domain_device *dev = task->dev; 2548 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 2549 struct hisi_sas_port *port = slot->port; 2550 struct timer_list *timer = &slot->internal_abort_timer; 2551 2552 /* setup the quirk timer */ 2553 setup_timer(timer, hisi_sas_internal_abort_quirk_timeout, 2554 (unsigned long)slot); 2555 /* Set the timeout to 10ms less than internal abort timeout */ 2556 mod_timer(timer, jiffies + msecs_to_jiffies(100)); 2557 2558 /* dw0 */ 2559 hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/ 2560 (port->id << CMD_HDR_PORT_OFF) | 2561 ((dev_is_sata(dev) ? 1:0) << 2562 CMD_HDR_ABORT_DEVICE_TYPE_OFF) | 2563 (abort_flag << CMD_HDR_ABORT_FLAG_OFF)); 2564 2565 /* dw1 */ 2566 hdr->dw1 = cpu_to_le32(device_id << CMD_HDR_DEV_ID_OFF); 2567 2568 /* dw7 */ 2569 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF); 2570 hdr->transfer_tags = cpu_to_le32(slot->idx); 2571 2572 return 0; 2573 } 2574 2575 static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 2576 { 2577 int i, res = IRQ_HANDLED; 2578 u32 port_id, link_rate, hard_phy_linkrate; 2579 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2580 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2581 struct device *dev = &hisi_hba->pdev->dev; 2582 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; 2583 struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; 2584 2585 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); 2586 2587 if (is_sata_phy_v2_hw(hisi_hba, phy_no)) 2588 goto end; 2589 2590 if (phy_no == 8) { 2591 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 2592 2593 port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> 2594 PORT_STATE_PHY8_PORT_NUM_OFF; 2595 link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> 2596 PORT_STATE_PHY8_CONN_RATE_OFF; 2597 } else { 2598 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 2599 port_id = (port_id >> (4 * phy_no)) & 0xf; 2600 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 2601 link_rate = (link_rate >> (phy_no * 4)) & 0xf; 2602 } 2603 2604 if (port_id == 0xf) { 2605 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); 2606 res = IRQ_NONE; 2607 goto end; 2608 } 2609 2610 for (i = 0; i < 6; i++) { 2611 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, 2612 RX_IDAF_DWORD0 + (i * 4)); 2613 frame_rcvd[i] = __swab32(idaf); 2614 } 2615 2616 sas_phy->linkrate = link_rate; 2617 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, 2618 HARD_PHY_LINKRATE); 2619 phy->maximum_linkrate = hard_phy_linkrate & 0xf; 2620 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; 2621 2622 sas_phy->oob_mode = SAS_OOB_MODE; 2623 memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE); 2624 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); 2625 phy->port_id = port_id; 2626 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 2627 phy->phy_type |= PORT_TYPE_SAS; 2628 phy->phy_attached = 1; 2629 phy->identify.device_type = id->dev_type; 2630 phy->frame_rcvd_size = sizeof(struct sas_identify_frame); 2631 if (phy->identify.device_type == SAS_END_DEVICE) 2632 phy->identify.target_port_protocols = 2633 SAS_PROTOCOL_SSP; 2634 else if (phy->identify.device_type != SAS_PHY_UNUSED) { 2635 phy->identify.target_port_protocols = 2636 SAS_PROTOCOL_SMP; 2637 if (!timer_pending(&hisi_hba->timer)) 2638 set_link_timer_quirk(hisi_hba); 2639 } 2640 queue_work(hisi_hba->wq, &phy->phyup_ws); 2641 2642 end: 2643 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 2644 CHL_INT0_SL_PHY_ENABLE_MSK); 2645 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); 2646 2647 return res; 2648 } 2649 2650 static bool check_any_wideports_v2_hw(struct hisi_hba *hisi_hba) 2651 { 2652 u32 port_state; 2653 2654 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 2655 if (port_state & 0x1ff) 2656 return true; 2657 2658 return false; 2659 } 2660 2661 static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 2662 { 2663 u32 phy_state, sl_ctrl, txid_auto; 2664 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2665 struct hisi_sas_port *port = phy->port; 2666 2667 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 2668 2669 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 2670 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); 2671 2672 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 2673 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, 2674 sl_ctrl & ~SL_CONTROL_CTA_MSK); 2675 if (port && !get_wideport_bitmap_v2_hw(hisi_hba, port->id)) 2676 if (!check_any_wideports_v2_hw(hisi_hba) && 2677 timer_pending(&hisi_hba->timer)) 2678 del_timer(&hisi_hba->timer); 2679 2680 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 2681 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 2682 txid_auto | TXID_AUTO_CT3_MSK); 2683 2684 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); 2685 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); 2686 2687 return IRQ_HANDLED; 2688 } 2689 2690 static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) 2691 { 2692 struct hisi_hba *hisi_hba = p; 2693 u32 irq_msk; 2694 int phy_no = 0; 2695 2696 irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) 2697 >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff; 2698 while (irq_msk) { 2699 if (irq_msk & 1) { 2700 u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, 2701 CHL_INT0); 2702 2703 switch (reg_value & (CHL_INT0_NOT_RDY_MSK | 2704 CHL_INT0_SL_PHY_ENABLE_MSK)) { 2705 2706 case CHL_INT0_SL_PHY_ENABLE_MSK: 2707 /* phy up */ 2708 if (phy_up_v2_hw(phy_no, hisi_hba) == 2709 IRQ_NONE) 2710 return IRQ_NONE; 2711 break; 2712 2713 case CHL_INT0_NOT_RDY_MSK: 2714 /* phy down */ 2715 if (phy_down_v2_hw(phy_no, hisi_hba) == 2716 IRQ_NONE) 2717 return IRQ_NONE; 2718 break; 2719 2720 case (CHL_INT0_NOT_RDY_MSK | 2721 CHL_INT0_SL_PHY_ENABLE_MSK): 2722 reg_value = hisi_sas_read32(hisi_hba, 2723 PHY_STATE); 2724 if (reg_value & BIT(phy_no)) { 2725 /* phy up */ 2726 if (phy_up_v2_hw(phy_no, hisi_hba) == 2727 IRQ_NONE) 2728 return IRQ_NONE; 2729 } else { 2730 /* phy down */ 2731 if (phy_down_v2_hw(phy_no, hisi_hba) == 2732 IRQ_NONE) 2733 return IRQ_NONE; 2734 } 2735 break; 2736 2737 default: 2738 break; 2739 } 2740 2741 } 2742 irq_msk >>= 1; 2743 phy_no++; 2744 } 2745 2746 return IRQ_HANDLED; 2747 } 2748 2749 static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 2750 { 2751 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2752 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2753 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 2754 u32 bcast_status; 2755 2756 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); 2757 bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); 2758 if (bcast_status & RX_BCAST_CHG_MSK) 2759 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); 2760 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 2761 CHL_INT0_SL_RX_BCST_ACK_MSK); 2762 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); 2763 } 2764 2765 static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) 2766 { 2767 struct hisi_hba *hisi_hba = p; 2768 struct device *dev = &hisi_hba->pdev->dev; 2769 u32 ent_msk, ent_tmp, irq_msk; 2770 int phy_no = 0; 2771 2772 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 2773 ent_tmp = ent_msk; 2774 ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK; 2775 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk); 2776 2777 irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >> 2778 HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff; 2779 2780 while (irq_msk) { 2781 if (irq_msk & (1 << phy_no)) { 2782 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, 2783 CHL_INT0); 2784 u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, 2785 CHL_INT1); 2786 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, 2787 CHL_INT2); 2788 2789 if (irq_value1) { 2790 if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK | 2791 CHL_INT1_DMAC_TX_ECC_ERR_MSK)) 2792 panic("%s: DMAC RX/TX ecc bad error!\ 2793 (0x%x)", 2794 dev_name(dev), irq_value1); 2795 2796 hisi_sas_phy_write32(hisi_hba, phy_no, 2797 CHL_INT1, irq_value1); 2798 } 2799 2800 if (irq_value2) 2801 hisi_sas_phy_write32(hisi_hba, phy_no, 2802 CHL_INT2, irq_value2); 2803 2804 2805 if (irq_value0) { 2806 if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK) 2807 phy_bcast_v2_hw(phy_no, hisi_hba); 2808 2809 hisi_sas_phy_write32(hisi_hba, phy_no, 2810 CHL_INT0, irq_value0 2811 & (~CHL_INT0_HOTPLUG_TOUT_MSK) 2812 & (~CHL_INT0_SL_PHY_ENABLE_MSK) 2813 & (~CHL_INT0_NOT_RDY_MSK)); 2814 } 2815 } 2816 irq_msk &= ~(1 << phy_no); 2817 phy_no++; 2818 } 2819 2820 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp); 2821 2822 return IRQ_HANDLED; 2823 } 2824 2825 static void 2826 one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value) 2827 { 2828 struct device *dev = &hisi_hba->pdev->dev; 2829 u32 reg_val; 2830 2831 if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) { 2832 reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR); 2833 dev_warn(dev, "hgc_dqe_acc1b_intr found: \ 2834 Ram address is 0x%08X\n", 2835 (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >> 2836 HGC_DQE_ECC_1B_ADDR_OFF); 2837 } 2838 2839 if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) { 2840 reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR); 2841 dev_warn(dev, "hgc_iost_acc1b_intr found: \ 2842 Ram address is 0x%08X\n", 2843 (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >> 2844 HGC_IOST_ECC_1B_ADDR_OFF); 2845 } 2846 2847 if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) { 2848 reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR); 2849 dev_warn(dev, "hgc_itct_acc1b_intr found: \ 2850 Ram address is 0x%08X\n", 2851 (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >> 2852 HGC_ITCT_ECC_1B_ADDR_OFF); 2853 } 2854 2855 if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) { 2856 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); 2857 dev_warn(dev, "hgc_iostl_acc1b_intr found: \ 2858 memory address is 0x%08X\n", 2859 (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >> 2860 HGC_LM_DFX_STATUS2_IOSTLIST_OFF); 2861 } 2862 2863 if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) { 2864 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); 2865 dev_warn(dev, "hgc_itctl_acc1b_intr found: \ 2866 memory address is 0x%08X\n", 2867 (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >> 2868 HGC_LM_DFX_STATUS2_ITCTLIST_OFF); 2869 } 2870 2871 if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) { 2872 reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR); 2873 dev_warn(dev, "hgc_cqe_acc1b_intr found: \ 2874 Ram address is 0x%08X\n", 2875 (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >> 2876 HGC_CQE_ECC_1B_ADDR_OFF); 2877 } 2878 2879 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) { 2880 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2881 dev_warn(dev, "rxm_mem0_acc1b_intr found: \ 2882 memory address is 0x%08X\n", 2883 (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >> 2884 HGC_RXM_DFX_STATUS14_MEM0_OFF); 2885 } 2886 2887 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) { 2888 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2889 dev_warn(dev, "rxm_mem1_acc1b_intr found: \ 2890 memory address is 0x%08X\n", 2891 (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >> 2892 HGC_RXM_DFX_STATUS14_MEM1_OFF); 2893 } 2894 2895 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) { 2896 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2897 dev_warn(dev, "rxm_mem2_acc1b_intr found: \ 2898 memory address is 0x%08X\n", 2899 (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >> 2900 HGC_RXM_DFX_STATUS14_MEM2_OFF); 2901 } 2902 2903 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) { 2904 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15); 2905 dev_warn(dev, "rxm_mem3_acc1b_intr found: \ 2906 memory address is 0x%08X\n", 2907 (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >> 2908 HGC_RXM_DFX_STATUS15_MEM3_OFF); 2909 } 2910 2911 } 2912 2913 static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, 2914 u32 irq_value) 2915 { 2916 u32 reg_val; 2917 struct device *dev = &hisi_hba->pdev->dev; 2918 2919 if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) { 2920 reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR); 2921 dev_warn(dev, "hgc_dqe_accbad_intr (0x%x) found: \ 2922 Ram address is 0x%08X\n", 2923 irq_value, 2924 (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >> 2925 HGC_DQE_ECC_MB_ADDR_OFF); 2926 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2927 } 2928 2929 if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) { 2930 reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR); 2931 dev_warn(dev, "hgc_iost_accbad_intr (0x%x) found: \ 2932 Ram address is 0x%08X\n", 2933 irq_value, 2934 (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >> 2935 HGC_IOST_ECC_MB_ADDR_OFF); 2936 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2937 } 2938 2939 if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) { 2940 reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR); 2941 dev_warn(dev,"hgc_itct_accbad_intr (0x%x) found: \ 2942 Ram address is 0x%08X\n", 2943 irq_value, 2944 (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >> 2945 HGC_ITCT_ECC_MB_ADDR_OFF); 2946 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2947 } 2948 2949 if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) { 2950 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); 2951 dev_warn(dev, "hgc_iostl_accbad_intr (0x%x) found: \ 2952 memory address is 0x%08X\n", 2953 irq_value, 2954 (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >> 2955 HGC_LM_DFX_STATUS2_IOSTLIST_OFF); 2956 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2957 } 2958 2959 if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) { 2960 reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); 2961 dev_warn(dev, "hgc_itctl_accbad_intr (0x%x) found: \ 2962 memory address is 0x%08X\n", 2963 irq_value, 2964 (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >> 2965 HGC_LM_DFX_STATUS2_ITCTLIST_OFF); 2966 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2967 } 2968 2969 if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) { 2970 reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR); 2971 dev_warn(dev, "hgc_cqe_accbad_intr (0x%x) found: \ 2972 Ram address is 0x%08X\n", 2973 irq_value, 2974 (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >> 2975 HGC_CQE_ECC_MB_ADDR_OFF); 2976 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2977 } 2978 2979 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) { 2980 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2981 dev_warn(dev, "rxm_mem0_accbad_intr (0x%x) found: \ 2982 memory address is 0x%08X\n", 2983 irq_value, 2984 (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >> 2985 HGC_RXM_DFX_STATUS14_MEM0_OFF); 2986 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2987 } 2988 2989 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) { 2990 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 2991 dev_warn(dev, "rxm_mem1_accbad_intr (0x%x) found: \ 2992 memory address is 0x%08X\n", 2993 irq_value, 2994 (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >> 2995 HGC_RXM_DFX_STATUS14_MEM1_OFF); 2996 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2997 } 2998 2999 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) { 3000 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); 3001 dev_warn(dev, "rxm_mem2_accbad_intr (0x%x) found: \ 3002 memory address is 0x%08X\n", 3003 irq_value, 3004 (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >> 3005 HGC_RXM_DFX_STATUS14_MEM2_OFF); 3006 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3007 } 3008 3009 if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) { 3010 reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15); 3011 dev_warn(dev, "rxm_mem3_accbad_intr (0x%x) found: \ 3012 memory address is 0x%08X\n", 3013 irq_value, 3014 (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >> 3015 HGC_RXM_DFX_STATUS15_MEM3_OFF); 3016 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3017 } 3018 3019 return; 3020 } 3021 3022 static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p) 3023 { 3024 struct hisi_hba *hisi_hba = p; 3025 u32 irq_value, irq_msk; 3026 3027 irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); 3028 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff); 3029 3030 irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); 3031 if (irq_value) { 3032 one_bit_ecc_error_process_v2_hw(hisi_hba, irq_value); 3033 multi_bit_ecc_error_process_v2_hw(hisi_hba, irq_value); 3034 } 3035 3036 hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value); 3037 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk); 3038 3039 return IRQ_HANDLED; 3040 } 3041 3042 #define AXI_ERR_NR 8 3043 static const char axi_err_info[AXI_ERR_NR][32] = { 3044 "IOST_AXI_W_ERR", 3045 "IOST_AXI_R_ERR", 3046 "ITCT_AXI_W_ERR", 3047 "ITCT_AXI_R_ERR", 3048 "SATA_AXI_W_ERR", 3049 "SATA_AXI_R_ERR", 3050 "DQE_AXI_R_ERR", 3051 "CQE_AXI_W_ERR" 3052 }; 3053 3054 #define FIFO_ERR_NR 5 3055 static const char fifo_err_info[FIFO_ERR_NR][32] = { 3056 "CQE_WINFO_FIFO", 3057 "CQE_MSG_FIFIO", 3058 "GETDQE_FIFO", 3059 "CMDP_FIFO", 3060 "AWTCTRL_FIFO" 3061 }; 3062 3063 static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) 3064 { 3065 struct hisi_hba *hisi_hba = p; 3066 u32 irq_value, irq_msk, err_value; 3067 struct device *dev = &hisi_hba->pdev->dev; 3068 3069 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 3070 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe); 3071 3072 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 3073 if (irq_value) { 3074 if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) { 3075 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 3076 1 << ENT_INT_SRC3_WP_DEPTH_OFF); 3077 dev_warn(dev, "write pointer and depth error (0x%x) \ 3078 found!\n", 3079 irq_value); 3080 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3081 } 3082 3083 if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) { 3084 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 3085 1 << 3086 ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF); 3087 dev_warn(dev, "iptt no match slot error (0x%x) found!\n", 3088 irq_value); 3089 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3090 } 3091 3092 if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF)) { 3093 dev_warn(dev, "read pointer and depth error (0x%x) \ 3094 found!\n", 3095 irq_value); 3096 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3097 } 3098 3099 if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) { 3100 int i; 3101 3102 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 3103 1 << ENT_INT_SRC3_AXI_OFF); 3104 err_value = hisi_sas_read32(hisi_hba, 3105 HGC_AXI_FIFO_ERR_INFO); 3106 3107 for (i = 0; i < AXI_ERR_NR; i++) { 3108 if (err_value & BIT(i)) { 3109 dev_warn(dev, "%s (0x%x) found!\n", 3110 axi_err_info[i], irq_value); 3111 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3112 } 3113 } 3114 } 3115 3116 if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) { 3117 int i; 3118 3119 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 3120 1 << ENT_INT_SRC3_FIFO_OFF); 3121 err_value = hisi_sas_read32(hisi_hba, 3122 HGC_AXI_FIFO_ERR_INFO); 3123 3124 for (i = 0; i < FIFO_ERR_NR; i++) { 3125 if (err_value & BIT(AXI_ERR_NR + i)) { 3126 dev_warn(dev, "%s (0x%x) found!\n", 3127 fifo_err_info[i], irq_value); 3128 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3129 } 3130 } 3131 3132 } 3133 3134 if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) { 3135 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 3136 1 << ENT_INT_SRC3_LM_OFF); 3137 dev_warn(dev, "LM add/fetch list error (0x%x) found!\n", 3138 irq_value); 3139 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3140 } 3141 3142 if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) { 3143 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 3144 1 << ENT_INT_SRC3_ABT_OFF); 3145 dev_warn(dev, "SAS_HGC_ABT fetch LM list error (0x%x) found!\n", 3146 irq_value); 3147 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3148 } 3149 } 3150 3151 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); 3152 3153 return IRQ_HANDLED; 3154 } 3155 3156 static void cq_tasklet_v2_hw(unsigned long val) 3157 { 3158 struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; 3159 struct hisi_hba *hisi_hba = cq->hisi_hba; 3160 struct hisi_sas_slot *slot; 3161 struct hisi_sas_itct *itct; 3162 struct hisi_sas_complete_v2_hdr *complete_queue; 3163 u32 rd_point = cq->rd_point, wr_point, dev_id; 3164 int queue = cq->id; 3165 3166 if (unlikely(hisi_hba->reject_stp_links_msk)) 3167 phys_try_accept_stp_links_v2_hw(hisi_hba); 3168 3169 complete_queue = hisi_hba->complete_hdr[queue]; 3170 3171 spin_lock(&hisi_hba->lock); 3172 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + 3173 (0x14 * queue)); 3174 3175 while (rd_point != wr_point) { 3176 struct hisi_sas_complete_v2_hdr *complete_hdr; 3177 int iptt; 3178 3179 complete_hdr = &complete_queue[rd_point]; 3180 3181 /* Check for NCQ completion */ 3182 if (complete_hdr->act) { 3183 u32 act_tmp = complete_hdr->act; 3184 int ncq_tag_count = ffs(act_tmp); 3185 3186 dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >> 3187 CMPLT_HDR_DEV_ID_OFF; 3188 itct = &hisi_hba->itct[dev_id]; 3189 3190 /* The NCQ tags are held in the itct header */ 3191 while (ncq_tag_count) { 3192 __le64 *ncq_tag = &itct->qw4_15[0]; 3193 3194 ncq_tag_count -= 1; 3195 iptt = (ncq_tag[ncq_tag_count / 5] 3196 >> (ncq_tag_count % 5) * 12) & 0xfff; 3197 3198 slot = &hisi_hba->slot_info[iptt]; 3199 slot->cmplt_queue_slot = rd_point; 3200 slot->cmplt_queue = queue; 3201 slot_complete_v2_hw(hisi_hba, slot); 3202 3203 act_tmp &= ~(1 << ncq_tag_count); 3204 ncq_tag_count = ffs(act_tmp); 3205 } 3206 } else { 3207 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; 3208 slot = &hisi_hba->slot_info[iptt]; 3209 slot->cmplt_queue_slot = rd_point; 3210 slot->cmplt_queue = queue; 3211 slot_complete_v2_hw(hisi_hba, slot); 3212 } 3213 3214 if (++rd_point >= HISI_SAS_QUEUE_SLOTS) 3215 rd_point = 0; 3216 } 3217 3218 /* update rd_point */ 3219 cq->rd_point = rd_point; 3220 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 3221 spin_unlock(&hisi_hba->lock); 3222 } 3223 3224 static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) 3225 { 3226 struct hisi_sas_cq *cq = p; 3227 struct hisi_hba *hisi_hba = cq->hisi_hba; 3228 int queue = cq->id; 3229 3230 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); 3231 3232 tasklet_schedule(&cq->tasklet); 3233 3234 return IRQ_HANDLED; 3235 } 3236 3237 static irqreturn_t sata_int_v2_hw(int irq_no, void *p) 3238 { 3239 struct hisi_sas_phy *phy = p; 3240 struct hisi_hba *hisi_hba = phy->hisi_hba; 3241 struct asd_sas_phy *sas_phy = &phy->sas_phy; 3242 struct device *dev = &hisi_hba->pdev->dev; 3243 struct hisi_sas_initial_fis *initial_fis; 3244 struct dev_to_host_fis *fis; 3245 u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; 3246 irqreturn_t res = IRQ_HANDLED; 3247 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 3248 int phy_no, offset; 3249 3250 phy_no = sas_phy->id; 3251 initial_fis = &hisi_hba->initial_fis[phy_no]; 3252 fis = &initial_fis->fis; 3253 3254 offset = 4 * (phy_no / 4); 3255 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1 + offset); 3256 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, 3257 ent_msk | 1 << ((phy_no % 4) * 8)); 3258 3259 ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1 + offset); 3260 ent_tmp = ent_int & (1 << (ENT_INT_SRC1_D2H_FIS_CH1_OFF * 3261 (phy_no % 4))); 3262 ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4); 3263 if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) { 3264 dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no); 3265 res = IRQ_NONE; 3266 goto end; 3267 } 3268 3269 /* check ERR bit of Status Register */ 3270 if (fis->status & ATA_ERR) { 3271 dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no, 3272 fis->status); 3273 disable_phy_v2_hw(hisi_hba, phy_no); 3274 enable_phy_v2_hw(hisi_hba, phy_no); 3275 res = IRQ_NONE; 3276 goto end; 3277 } 3278 3279 if (unlikely(phy_no == 8)) { 3280 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 3281 3282 port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> 3283 PORT_STATE_PHY8_PORT_NUM_OFF; 3284 link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> 3285 PORT_STATE_PHY8_CONN_RATE_OFF; 3286 } else { 3287 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 3288 port_id = (port_id >> (4 * phy_no)) & 0xf; 3289 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 3290 link_rate = (link_rate >> (phy_no * 4)) & 0xf; 3291 } 3292 3293 if (port_id == 0xf) { 3294 dev_err(dev, "sata int: phy%d invalid portid\n", phy_no); 3295 res = IRQ_NONE; 3296 goto end; 3297 } 3298 3299 sas_phy->linkrate = link_rate; 3300 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, 3301 HARD_PHY_LINKRATE); 3302 phy->maximum_linkrate = hard_phy_linkrate & 0xf; 3303 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; 3304 3305 sas_phy->oob_mode = SATA_OOB_MODE; 3306 /* Make up some unique SAS address */ 3307 attached_sas_addr[0] = 0x50; 3308 attached_sas_addr[7] = phy_no; 3309 memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE); 3310 memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis)); 3311 dev_info(dev, "sata int phyup: phy%d link_rate=%d\n", phy_no, link_rate); 3312 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 3313 phy->port_id = port_id; 3314 phy->phy_type |= PORT_TYPE_SATA; 3315 phy->phy_attached = 1; 3316 phy->identify.device_type = SAS_SATA_DEV; 3317 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 3318 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 3319 queue_work(hisi_hba->wq, &phy->phyup_ws); 3320 3321 end: 3322 hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp); 3323 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk); 3324 3325 return res; 3326 } 3327 3328 static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = { 3329 int_phy_updown_v2_hw, 3330 int_chnl_int_v2_hw, 3331 }; 3332 3333 static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = { 3334 fatal_ecc_int_v2_hw, 3335 fatal_axi_int_v2_hw 3336 }; 3337 3338 /** 3339 * There is a limitation in the hip06 chipset that we need 3340 * to map in all mbigen interrupts, even if they are not used. 3341 */ 3342 static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) 3343 { 3344 struct platform_device *pdev = hisi_hba->pdev; 3345 struct device *dev = &pdev->dev; 3346 int i, irq, rc, irq_map[128]; 3347 3348 3349 for (i = 0; i < 128; i++) 3350 irq_map[i] = platform_get_irq(pdev, i); 3351 3352 for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) { 3353 int idx = i; 3354 3355 irq = irq_map[idx + 1]; /* Phy up/down is irq1 */ 3356 if (!irq) { 3357 dev_err(dev, "irq init: fail map phy interrupt %d\n", 3358 idx); 3359 return -ENOENT; 3360 } 3361 3362 rc = devm_request_irq(dev, irq, phy_interrupts[i], 0, 3363 DRV_NAME " phy", hisi_hba); 3364 if (rc) { 3365 dev_err(dev, "irq init: could not request " 3366 "phy interrupt %d, rc=%d\n", 3367 irq, rc); 3368 return -ENOENT; 3369 } 3370 } 3371 3372 for (i = 0; i < hisi_hba->n_phy; i++) { 3373 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 3374 int idx = i + 72; /* First SATA interrupt is irq72 */ 3375 3376 irq = irq_map[idx]; 3377 if (!irq) { 3378 dev_err(dev, "irq init: fail map phy interrupt %d\n", 3379 idx); 3380 return -ENOENT; 3381 } 3382 3383 rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0, 3384 DRV_NAME " sata", phy); 3385 if (rc) { 3386 dev_err(dev, "irq init: could not request " 3387 "sata interrupt %d, rc=%d\n", 3388 irq, rc); 3389 return -ENOENT; 3390 } 3391 } 3392 3393 for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) { 3394 int idx = i; 3395 3396 irq = irq_map[idx + 81]; 3397 if (!irq) { 3398 dev_err(dev, "irq init: fail map fatal interrupt %d\n", 3399 idx); 3400 return -ENOENT; 3401 } 3402 3403 rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0, 3404 DRV_NAME " fatal", hisi_hba); 3405 if (rc) { 3406 dev_err(dev, 3407 "irq init: could not request fatal interrupt %d, rc=%d\n", 3408 irq, rc); 3409 return -ENOENT; 3410 } 3411 } 3412 3413 for (i = 0; i < hisi_hba->queue_count; i++) { 3414 int idx = i + 96; /* First cq interrupt is irq96 */ 3415 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 3416 struct tasklet_struct *t = &cq->tasklet; 3417 3418 irq = irq_map[idx]; 3419 if (!irq) { 3420 dev_err(dev, 3421 "irq init: could not map cq interrupt %d\n", 3422 idx); 3423 return -ENOENT; 3424 } 3425 rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0, 3426 DRV_NAME " cq", &hisi_hba->cq[i]); 3427 if (rc) { 3428 dev_err(dev, 3429 "irq init: could not request cq interrupt %d, rc=%d\n", 3430 irq, rc); 3431 return -ENOENT; 3432 } 3433 tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq); 3434 } 3435 3436 return 0; 3437 } 3438 3439 static int hisi_sas_v2_init(struct hisi_hba *hisi_hba) 3440 { 3441 int rc; 3442 3443 memset(hisi_hba->sata_dev_bitmap, 0, sizeof(hisi_hba->sata_dev_bitmap)); 3444 3445 rc = hw_init_v2_hw(hisi_hba); 3446 if (rc) 3447 return rc; 3448 3449 rc = interrupt_init_v2_hw(hisi_hba); 3450 if (rc) 3451 return rc; 3452 3453 return 0; 3454 } 3455 3456 static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba) 3457 { 3458 struct platform_device *pdev = hisi_hba->pdev; 3459 int i; 3460 3461 for (i = 0; i < hisi_hba->queue_count; i++) 3462 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); 3463 3464 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); 3465 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); 3466 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); 3467 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); 3468 3469 for (i = 0; i < hisi_hba->n_phy; i++) { 3470 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 3471 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); 3472 } 3473 3474 for (i = 0; i < 128; i++) 3475 synchronize_irq(platform_get_irq(pdev, i)); 3476 } 3477 3478 static int soft_reset_v2_hw(struct hisi_hba *hisi_hba) 3479 { 3480 struct device *dev = &hisi_hba->pdev->dev; 3481 u32 old_state, state; 3482 int rc, cnt; 3483 int phy_no; 3484 3485 old_state = hisi_sas_read32(hisi_hba, PHY_STATE); 3486 3487 interrupt_disable_v2_hw(hisi_hba); 3488 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); 3489 3490 stop_phys_v2_hw(hisi_hba); 3491 3492 mdelay(10); 3493 3494 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1); 3495 3496 /* wait until bus idle */ 3497 cnt = 0; 3498 while (1) { 3499 u32 status = hisi_sas_read32_relaxed(hisi_hba, 3500 AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN); 3501 3502 if (status == 0x3) 3503 break; 3504 3505 udelay(10); 3506 if (cnt++ > 10) { 3507 dev_info(dev, "wait axi bus state to idle timeout!\n"); 3508 return -1; 3509 } 3510 } 3511 3512 hisi_sas_init_mem(hisi_hba); 3513 3514 rc = hw_init_v2_hw(hisi_hba); 3515 if (rc) 3516 return rc; 3517 3518 phys_reject_stp_links_v2_hw(hisi_hba); 3519 3520 /* Re-enable the PHYs */ 3521 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 3522 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 3523 struct asd_sas_phy *sas_phy = &phy->sas_phy; 3524 3525 if (sas_phy->enabled) 3526 start_phy_v2_hw(hisi_hba, phy_no); 3527 } 3528 3529 /* Wait for the PHYs to come up and read the PHY state */ 3530 msleep(1000); 3531 3532 state = hisi_sas_read32(hisi_hba, PHY_STATE); 3533 3534 hisi_sas_rescan_topology(hisi_hba, old_state, state); 3535 3536 return 0; 3537 } 3538 3539 static const struct hisi_sas_hw hisi_sas_v2_hw = { 3540 .hw_init = hisi_sas_v2_init, 3541 .setup_itct = setup_itct_v2_hw, 3542 .slot_index_alloc = slot_index_alloc_quirk_v2_hw, 3543 .alloc_dev = alloc_dev_quirk_v2_hw, 3544 .sl_notify = sl_notify_v2_hw, 3545 .get_wideport_bitmap = get_wideport_bitmap_v2_hw, 3546 .free_device = free_device_v2_hw, 3547 .prep_smp = prep_smp_v2_hw, 3548 .prep_ssp = prep_ssp_v2_hw, 3549 .prep_stp = prep_ata_v2_hw, 3550 .prep_abort = prep_abort_v2_hw, 3551 .get_free_slot = get_free_slot_v2_hw, 3552 .start_delivery = start_delivery_v2_hw, 3553 .slot_complete = slot_complete_v2_hw, 3554 .phys_init = phys_init_v2_hw, 3555 .phy_enable = enable_phy_v2_hw, 3556 .phy_disable = disable_phy_v2_hw, 3557 .phy_hard_reset = phy_hard_reset_v2_hw, 3558 .phy_set_linkrate = phy_set_linkrate_v2_hw, 3559 .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw, 3560 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW, 3561 .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), 3562 .soft_reset = soft_reset_v2_hw, 3563 }; 3564 3565 static int hisi_sas_v2_probe(struct platform_device *pdev) 3566 { 3567 /* 3568 * Check if we should defer the probe before we probe the 3569 * upper layer, as it's hard to defer later on. 3570 */ 3571 int ret = platform_get_irq(pdev, 0); 3572 3573 if (ret < 0) { 3574 if (ret != -EPROBE_DEFER) 3575 dev_err(&pdev->dev, "cannot obtain irq\n"); 3576 return ret; 3577 } 3578 3579 return hisi_sas_probe(pdev, &hisi_sas_v2_hw); 3580 } 3581 3582 static int hisi_sas_v2_remove(struct platform_device *pdev) 3583 { 3584 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 3585 struct hisi_hba *hisi_hba = sha->lldd_ha; 3586 3587 if (timer_pending(&hisi_hba->timer)) 3588 del_timer(&hisi_hba->timer); 3589 3590 return hisi_sas_remove(pdev); 3591 } 3592 3593 static const struct of_device_id sas_v2_of_match[] = { 3594 { .compatible = "hisilicon,hip06-sas-v2",}, 3595 { .compatible = "hisilicon,hip07-sas-v2",}, 3596 {}, 3597 }; 3598 MODULE_DEVICE_TABLE(of, sas_v2_of_match); 3599 3600 static const struct acpi_device_id sas_v2_acpi_match[] = { 3601 { "HISI0162", 0 }, 3602 { } 3603 }; 3604 3605 MODULE_DEVICE_TABLE(acpi, sas_v2_acpi_match); 3606 3607 static struct platform_driver hisi_sas_v2_driver = { 3608 .probe = hisi_sas_v2_probe, 3609 .remove = hisi_sas_v2_remove, 3610 .driver = { 3611 .name = DRV_NAME, 3612 .of_match_table = sas_v2_of_match, 3613 .acpi_match_table = ACPI_PTR(sas_v2_acpi_match), 3614 }, 3615 }; 3616 3617 module_platform_driver(hisi_sas_v2_driver); 3618 3619 MODULE_LICENSE("GPL"); 3620 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 3621 MODULE_DESCRIPTION("HISILICON SAS controller v2 hw driver"); 3622 MODULE_ALIAS("platform:" DRV_NAME); 3623