1 /* 2 * Copyright (c) 2016 Linaro Ltd. 3 * Copyright (c) 2016 Hisilicon Limited. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 */ 11 12 #include "hisi_sas.h" 13 #define DRV_NAME "hisi_sas_v2_hw" 14 15 /* global registers need init*/ 16 #define DLVRY_QUEUE_ENABLE 0x0 17 #define IOST_BASE_ADDR_LO 0x8 18 #define IOST_BASE_ADDR_HI 0xc 19 #define ITCT_BASE_ADDR_LO 0x10 20 #define ITCT_BASE_ADDR_HI 0x14 21 #define IO_BROKEN_MSG_ADDR_LO 0x18 22 #define IO_BROKEN_MSG_ADDR_HI 0x1c 23 #define PHY_CONTEXT 0x20 24 #define PHY_STATE 0x24 25 #define PHY_PORT_NUM_MA 0x28 26 #define PORT_STATE 0x2c 27 #define PORT_STATE_PHY8_PORT_NUM_OFF 16 28 #define PORT_STATE_PHY8_PORT_NUM_MSK (0xf << PORT_STATE_PHY8_PORT_NUM_OFF) 29 #define PORT_STATE_PHY8_CONN_RATE_OFF 20 30 #define PORT_STATE_PHY8_CONN_RATE_MSK (0xf << PORT_STATE_PHY8_CONN_RATE_OFF) 31 #define PHY_CONN_RATE 0x30 32 #define HGC_TRANS_TASK_CNT_LIMIT 0x38 33 #define AXI_AHB_CLK_CFG 0x3c 34 #define ITCT_CLR 0x44 35 #define ITCT_CLR_EN_OFF 16 36 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) 37 #define ITCT_DEV_OFF 0 38 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) 39 #define AXI_USER1 0x48 40 #define AXI_USER2 0x4c 41 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 42 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c 43 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 44 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 45 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 46 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 47 #define HGC_GET_ITV_TIME 0x90 48 #define DEVICE_MSG_WORK_MODE 0x94 49 #define OPENA_WT_CONTI_TIME 0x9c 50 #define I_T_NEXUS_LOSS_TIME 0xa0 51 #define MAX_CON_TIME_LIMIT_TIME 0xa4 52 #define BUS_INACTIVE_LIMIT_TIME 0xa8 53 #define REJECT_TO_OPEN_LIMIT_TIME 0xac 54 #define CFG_AGING_TIME 0xbc 55 #define HGC_DFX_CFG2 0xc0 56 #define HGC_IOMB_PROC1_STATUS 0x104 57 #define CFG_1US_TIMER_TRSH 0xcc 58 #define HGC_INVLD_DQE_INFO 0x148 59 #define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9 60 #define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF) 61 #define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18 62 #define INT_COAL_EN 0x19c 63 #define OQ_INT_COAL_TIME 0x1a0 64 #define OQ_INT_COAL_CNT 0x1a4 65 #define ENT_INT_COAL_TIME 0x1a8 66 #define ENT_INT_COAL_CNT 0x1ac 67 #define OQ_INT_SRC 0x1b0 68 #define OQ_INT_SRC_MSK 0x1b4 69 #define ENT_INT_SRC1 0x1b8 70 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 71 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) 72 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 73 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) 74 #define ENT_INT_SRC2 0x1bc 75 #define ENT_INT_SRC3 0x1c0 76 #define ENT_INT_SRC3_ITC_INT_OFF 15 77 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) 78 #define ENT_INT_SRC_MSK1 0x1c4 79 #define ENT_INT_SRC_MSK2 0x1c8 80 #define ENT_INT_SRC_MSK3 0x1cc 81 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 82 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) 83 #define SAS_ECC_INTR_MSK 0x1ec 84 #define HGC_ERR_STAT_EN 0x238 85 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 86 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 87 #define DLVRY_Q_0_DEPTH 0x268 88 #define DLVRY_Q_0_WR_PTR 0x26c 89 #define DLVRY_Q_0_RD_PTR 0x270 90 #define HYPER_STREAM_ID_EN_CFG 0xc80 91 #define OQ0_INT_SRC_MSK 0xc90 92 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0 93 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4 94 #define COMPL_Q_0_DEPTH 0x4e8 95 #define COMPL_Q_0_WR_PTR 0x4ec 96 #define COMPL_Q_0_RD_PTR 0x4f0 97 98 /* phy registers need init */ 99 #define PORT_BASE (0x2000) 100 101 #define PHY_CFG (PORT_BASE + 0x0) 102 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4) 103 #define PHY_CFG_ENA_OFF 0 104 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) 105 #define PHY_CFG_DC_OPT_OFF 2 106 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) 107 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) 108 #define PROG_PHY_LINK_RATE_MAX_OFF 0 109 #define PROG_PHY_LINK_RATE_MAX_MSK (0xff << PROG_PHY_LINK_RATE_MAX_OFF) 110 #define PHY_CTRL (PORT_BASE + 0x14) 111 #define PHY_CTRL_RESET_OFF 0 112 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) 113 #define SAS_PHY_CTRL (PORT_BASE + 0x20) 114 #define SL_CFG (PORT_BASE + 0x84) 115 #define PHY_PCN (PORT_BASE + 0x44) 116 #define SL_TOUT_CFG (PORT_BASE + 0x8c) 117 #define SL_CONTROL (PORT_BASE + 0x94) 118 #define SL_CONTROL_NOTIFY_EN_OFF 0 119 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) 120 #define TX_ID_DWORD0 (PORT_BASE + 0x9c) 121 #define TX_ID_DWORD1 (PORT_BASE + 0xa0) 122 #define TX_ID_DWORD2 (PORT_BASE + 0xa4) 123 #define TX_ID_DWORD3 (PORT_BASE + 0xa8) 124 #define TX_ID_DWORD4 (PORT_BASE + 0xaC) 125 #define TX_ID_DWORD5 (PORT_BASE + 0xb0) 126 #define TX_ID_DWORD6 (PORT_BASE + 0xb4) 127 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) 128 #define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) 129 #define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) 130 #define RX_IDAF_DWORD3 (PORT_BASE + 0xd0) 131 #define RX_IDAF_DWORD4 (PORT_BASE + 0xd4) 132 #define RX_IDAF_DWORD5 (PORT_BASE + 0xd8) 133 #define RX_IDAF_DWORD6 (PORT_BASE + 0xdc) 134 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) 135 #define DONE_RECEIVED_TIME (PORT_BASE + 0x11c) 136 #define CHL_INT0 (PORT_BASE + 0x1b4) 137 #define CHL_INT0_HOTPLUG_TOUT_OFF 0 138 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) 139 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1 140 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) 141 #define CHL_INT0_SL_PHY_ENABLE_OFF 2 142 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) 143 #define CHL_INT0_NOT_RDY_OFF 4 144 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) 145 #define CHL_INT0_PHY_RDY_OFF 5 146 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) 147 #define CHL_INT1 (PORT_BASE + 0x1b8) 148 #define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15 149 #define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) 150 #define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 151 #define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) 152 #define CHL_INT2 (PORT_BASE + 0x1bc) 153 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) 154 #define CHL_INT1_MSK (PORT_BASE + 0x1c4) 155 #define CHL_INT2_MSK (PORT_BASE + 0x1c8) 156 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) 157 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) 158 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) 159 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) 160 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) 161 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) 162 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) 163 #define DMA_TX_STATUS (PORT_BASE + 0x2d0) 164 #define DMA_TX_STATUS_BUSY_OFF 0 165 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) 166 #define DMA_RX_STATUS (PORT_BASE + 0x2e8) 167 #define DMA_RX_STATUS_BUSY_OFF 0 168 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) 169 170 #define AXI_CFG (0x5100) 171 #define AM_CFG_MAX_TRANS (0x5010) 172 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) 173 174 /* HW dma structures */ 175 /* Delivery queue header */ 176 /* dw0 */ 177 #define CMD_HDR_RESP_REPORT_OFF 5 178 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) 179 #define CMD_HDR_TLR_CTRL_OFF 6 180 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) 181 #define CMD_HDR_PORT_OFF 18 182 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) 183 #define CMD_HDR_PRIORITY_OFF 27 184 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) 185 #define CMD_HDR_CMD_OFF 29 186 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) 187 /* dw1 */ 188 #define CMD_HDR_DIR_OFF 5 189 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) 190 #define CMD_HDR_RESET_OFF 7 191 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) 192 #define CMD_HDR_VDTL_OFF 10 193 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) 194 #define CMD_HDR_FRAME_TYPE_OFF 11 195 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) 196 #define CMD_HDR_DEV_ID_OFF 16 197 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) 198 /* dw2 */ 199 #define CMD_HDR_CFL_OFF 0 200 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) 201 #define CMD_HDR_NCQ_TAG_OFF 10 202 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) 203 #define CMD_HDR_MRFL_OFF 15 204 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) 205 #define CMD_HDR_SG_MOD_OFF 24 206 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) 207 #define CMD_HDR_FIRST_BURST_OFF 26 208 #define CMD_HDR_FIRST_BURST_MSK (0x1 << CMD_HDR_SG_MOD_OFF) 209 /* dw3 */ 210 #define CMD_HDR_IPTT_OFF 0 211 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) 212 /* dw6 */ 213 #define CMD_HDR_DIF_SGL_LEN_OFF 0 214 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) 215 #define CMD_HDR_DATA_SGL_LEN_OFF 16 216 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) 217 218 /* Completion header */ 219 /* dw0 */ 220 #define CMPLT_HDR_RSPNS_XFRD_OFF 10 221 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) 222 #define CMPLT_HDR_ERX_OFF 12 223 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) 224 /* dw1 */ 225 #define CMPLT_HDR_IPTT_OFF 0 226 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) 227 #define CMPLT_HDR_DEV_ID_OFF 16 228 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) 229 230 /* ITCT header */ 231 /* qw0 */ 232 #define ITCT_HDR_DEV_TYPE_OFF 0 233 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) 234 #define ITCT_HDR_VALID_OFF 2 235 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) 236 #define ITCT_HDR_MCR_OFF 5 237 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) 238 #define ITCT_HDR_VLN_OFF 9 239 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) 240 #define ITCT_HDR_PORT_ID_OFF 28 241 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) 242 /* qw2 */ 243 #define ITCT_HDR_INLT_OFF 0 244 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) 245 #define ITCT_HDR_BITLT_OFF 16 246 #define ITCT_HDR_BITLT_MSK (0xffffULL << ITCT_HDR_BITLT_OFF) 247 #define ITCT_HDR_MCTLT_OFF 32 248 #define ITCT_HDR_MCTLT_MSK (0xffffULL << ITCT_HDR_MCTLT_OFF) 249 #define ITCT_HDR_RTOLT_OFF 48 250 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) 251 252 struct hisi_sas_complete_v2_hdr { 253 __le32 dw0; 254 __le32 dw1; 255 __le32 act; 256 __le32 dw3; 257 }; 258 259 struct hisi_sas_err_record_v2 { 260 /* dw0 */ 261 __le32 trans_tx_fail_type; 262 263 /* dw1 */ 264 __le32 trans_rx_fail_type; 265 266 /* dw2 */ 267 __le16 dma_tx_err_type; 268 __le16 sipc_rx_err_type; 269 270 /* dw3 */ 271 __le32 dma_rx_err_type; 272 }; 273 274 enum { 275 HISI_SAS_PHY_PHY_UPDOWN, 276 HISI_SAS_PHY_CHNL_INT, 277 HISI_SAS_PHY_INT_NR 278 }; 279 280 enum { 281 TRANS_TX_FAIL_BASE = 0x0, /* dw0 */ 282 TRANS_RX_FAIL_BASE = 0x100, /* dw1 */ 283 DMA_TX_ERR_BASE = 0x200, /* dw2 bit 15-0 */ 284 SIPC_RX_ERR_BASE = 0x300, /* dw2 bit 31-16*/ 285 DMA_RX_ERR_BASE = 0x400, /* dw3 */ 286 287 /* trans tx*/ 288 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */ 289 TRANS_TX_ERR_PHY_NOT_ENABLE, /* 0x1 */ 290 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, /* 0x2 */ 291 TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, /* 0x3 */ 292 TRANS_TX_OPEN_CNX_ERR_BY_OTHER, /* 0x4 */ 293 RESERVED0, /* 0x5 */ 294 TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, /* 0x6 */ 295 TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, /* 0x7 */ 296 TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, /* 0x8 */ 297 TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, /* 0x9 */ 298 TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, /* 0xa */ 299 TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, /* 0xb */ 300 TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, /* 0xc */ 301 TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, /* 0xd */ 302 TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, /* 0xe */ 303 TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, /* 0xf */ 304 TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, /* 0x10 */ 305 TRANS_TX_ERR_FRAME_TXED, /* 0x11 */ 306 TRANS_TX_ERR_WITH_BREAK_TIMEOUT, /* 0x12 */ 307 TRANS_TX_ERR_WITH_BREAK_REQUEST, /* 0x13 */ 308 TRANS_TX_ERR_WITH_BREAK_RECEVIED, /* 0x14 */ 309 TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, /* 0x15 */ 310 TRANS_TX_ERR_WITH_CLOSE_NORMAL, /* 0x16 for ssp*/ 311 TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, /* 0x17 */ 312 TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x18 */ 313 TRANS_TX_ERR_WITH_CLOSE_COMINIT, /* 0x19 */ 314 TRANS_TX_ERR_WITH_NAK_RECEVIED, /* 0x1a for ssp*/ 315 TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, /* 0x1b for ssp*/ 316 /*IO_TX_ERR_WITH_R_ERR_RECEVIED, [> 0x1b for sata/stp<] */ 317 TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, /* 0x1c for ssp */ 318 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST 0x1c for sata/stp */ 319 TRANS_TX_ERR_WITH_IPTT_CONFLICT, /* 0x1d for ssp/smp */ 320 TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, /* 0x1e */ 321 /*IO_TX_ERR_WITH_SYNC_RXD, [> 0x1e <] for sata/stp */ 322 TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */ 323 324 /* trans rx */ 325 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x100 */ 326 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x101 for sata/stp */ 327 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x102 for ssp/smp */ 328 /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x102 <] for sata/stp */ 329 TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x103 for sata/stp */ 330 TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x104 for sata/stp */ 331 TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x105 for smp */ 332 /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x105 <] for sata/stp */ 333 TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x106 for sata/stp*/ 334 TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x107 */ 335 TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x108 */ 336 TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x109 */ 337 TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x10a */ 338 RESERVED1, /* 0x10b */ 339 TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x10c */ 340 TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x10d */ 341 TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x10e */ 342 TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x10f */ 343 TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x110 for ssp/smp */ 344 TRANS_RX_ERR_WITH_BAD_HASH, /* 0x111 for ssp */ 345 /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x111 <] for sata/stp */ 346 TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x112 for ssp*/ 347 /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x112 <] for sata/stp */ 348 TRANS_RX_SSP_FRM_LEN_ERR, /* 0x113 for ssp */ 349 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x113 <] for sata */ 350 RESERVED2, /* 0x114 */ 351 RESERVED3, /* 0x115 */ 352 RESERVED4, /* 0x116 */ 353 RESERVED5, /* 0x117 */ 354 TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x118 */ 355 TRANS_RX_SMP_FRM_LEN_ERR, /* 0x119 */ 356 TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x11a */ 357 RESERVED6, /* 0x11b */ 358 RESERVED7, /* 0x11c */ 359 RESERVED8, /* 0x11d */ 360 RESERVED9, /* 0x11e */ 361 TRANS_RX_R_ERR, /* 0x11f */ 362 363 /* dma tx */ 364 DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x200 */ 365 DMA_TX_DIF_APP_ERR, /* 0x201 */ 366 DMA_TX_DIF_RPP_ERR, /* 0x202 */ 367 DMA_TX_DATA_SGL_OVERFLOW, /* 0x203 */ 368 DMA_TX_DIF_SGL_OVERFLOW, /* 0x204 */ 369 DMA_TX_UNEXP_XFER_ERR, /* 0x205 */ 370 DMA_TX_UNEXP_RETRANS_ERR, /* 0x206 */ 371 DMA_TX_XFER_LEN_OVERFLOW, /* 0x207 */ 372 DMA_TX_XFER_OFFSET_ERR, /* 0x208 */ 373 DMA_TX_RAM_ECC_ERR, /* 0x209 */ 374 DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x20a */ 375 376 /* sipc rx */ 377 SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x300 */ 378 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x301 */ 379 SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x302 */ 380 SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x303 */ 381 SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x304 */ 382 SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x305 */ 383 SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x306 */ 384 SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x307 */ 385 SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x308 */ 386 SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x309 */ 387 SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x30a */ 388 389 /* dma rx */ 390 DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x400 */ 391 DMA_RX_DIF_APP_ERR, /* 0x401 */ 392 DMA_RX_DIF_RPP_ERR, /* 0x402 */ 393 DMA_RX_DATA_SGL_OVERFLOW, /* 0x403 */ 394 DMA_RX_DIF_SGL_OVERFLOW, /* 0x404 */ 395 DMA_RX_DATA_LEN_OVERFLOW, /* 0x405 */ 396 DMA_RX_DATA_LEN_UNDERFLOW, /* 0x406 */ 397 DMA_RX_DATA_OFFSET_ERR, /* 0x407 */ 398 RESERVED10, /* 0x408 */ 399 DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x409 */ 400 DMA_RX_RESP_BUF_OVERFLOW, /* 0x40a */ 401 DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x40b */ 402 DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x40c */ 403 DMA_RX_UNEXP_RDFRAME_ERR, /* 0x40d */ 404 DMA_RX_PIO_DATA_LEN_ERR, /* 0x40e */ 405 DMA_RX_RDSETUP_STATUS_ERR, /* 0x40f */ 406 DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x410 */ 407 DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x411 */ 408 DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x412 */ 409 DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x413 */ 410 DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x414 */ 411 DMA_RX_RDSETUP_OFFSET_ERR, /* 0x415 */ 412 DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x416 */ 413 DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x417 */ 414 DMA_RX_RAM_ECC_ERR, /* 0x418 */ 415 DMA_RX_UNKNOWN_FRM_ERR, /* 0x419 */ 416 }; 417 418 #define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096 419 420 #define DIR_NO_DATA 0 421 #define DIR_TO_INI 1 422 #define DIR_TO_DEVICE 2 423 #define DIR_RESERVED 3 424 425 #define SATA_PROTOCOL_NONDATA 0x1 426 #define SATA_PROTOCOL_PIO 0x2 427 #define SATA_PROTOCOL_DMA 0x4 428 #define SATA_PROTOCOL_FPDMA 0x8 429 #define SATA_PROTOCOL_ATAPI 0x10 430 431 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) 432 { 433 void __iomem *regs = hisi_hba->regs + off; 434 435 return readl(regs); 436 } 437 438 static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) 439 { 440 void __iomem *regs = hisi_hba->regs + off; 441 442 return readl_relaxed(regs); 443 } 444 445 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) 446 { 447 void __iomem *regs = hisi_hba->regs + off; 448 449 writel(val, regs); 450 } 451 452 static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, 453 u32 off, u32 val) 454 { 455 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 456 457 writel(val, regs); 458 } 459 460 static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, 461 int phy_no, u32 off) 462 { 463 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 464 465 return readl(regs); 466 } 467 468 static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 469 { 470 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 471 472 cfg &= ~PHY_CFG_DC_OPT_MSK; 473 cfg |= 1 << PHY_CFG_DC_OPT_OFF; 474 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 475 } 476 477 static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 478 { 479 struct sas_identify_frame identify_frame; 480 u32 *identify_buffer; 481 482 memset(&identify_frame, 0, sizeof(identify_frame)); 483 identify_frame.dev_type = SAS_END_DEVICE; 484 identify_frame.frame_type = 0; 485 identify_frame._un1 = 1; 486 identify_frame.initiator_bits = SAS_PROTOCOL_ALL; 487 identify_frame.target_bits = SAS_PROTOCOL_NONE; 488 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 489 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 490 identify_frame.phy_id = phy_no; 491 identify_buffer = (u32 *)(&identify_frame); 492 493 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, 494 __swab32(identify_buffer[0])); 495 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, 496 identify_buffer[2]); 497 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, 498 identify_buffer[1]); 499 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, 500 identify_buffer[4]); 501 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, 502 identify_buffer[3]); 503 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, 504 __swab32(identify_buffer[5])); 505 } 506 507 static void init_id_frame_v2_hw(struct hisi_hba *hisi_hba) 508 { 509 int i; 510 511 for (i = 0; i < hisi_hba->n_phy; i++) 512 config_id_frame_v2_hw(hisi_hba, i); 513 } 514 515 static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, 516 struct hisi_sas_device *sas_dev) 517 { 518 struct domain_device *device = sas_dev->sas_device; 519 struct device *dev = &hisi_hba->pdev->dev; 520 u64 qw0, device_id = sas_dev->device_id; 521 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; 522 struct domain_device *parent_dev = device->parent; 523 struct hisi_sas_port *port = device->port->lldd_port; 524 525 memset(itct, 0, sizeof(*itct)); 526 527 /* qw0 */ 528 qw0 = 0; 529 switch (sas_dev->dev_type) { 530 case SAS_END_DEVICE: 531 case SAS_EDGE_EXPANDER_DEVICE: 532 case SAS_FANOUT_EXPANDER_DEVICE: 533 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; 534 break; 535 case SAS_SATA_DEV: 536 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 537 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; 538 else 539 qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; 540 break; 541 default: 542 dev_warn(dev, "setup itct: unsupported dev type (%d)\n", 543 sas_dev->dev_type); 544 } 545 546 qw0 |= ((1 << ITCT_HDR_VALID_OFF) | 547 (device->max_linkrate << ITCT_HDR_MCR_OFF) | 548 (1 << ITCT_HDR_VLN_OFF) | 549 (port->id << ITCT_HDR_PORT_ID_OFF)); 550 itct->qw0 = cpu_to_le64(qw0); 551 552 /* qw1 */ 553 memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE); 554 itct->sas_addr = __swab64(itct->sas_addr); 555 556 /* qw2 */ 557 itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) | 558 (0xff00ULL << ITCT_HDR_BITLT_OFF) | 559 (0xff00ULL << ITCT_HDR_MCTLT_OFF) | 560 (0xff00ULL << ITCT_HDR_RTOLT_OFF)); 561 } 562 563 static void free_device_v2_hw(struct hisi_hba *hisi_hba, 564 struct hisi_sas_device *sas_dev) 565 { 566 u64 qw0, dev_id = sas_dev->device_id; 567 struct device *dev = &hisi_hba->pdev->dev; 568 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; 569 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 570 int i; 571 572 /* clear the itct interrupt state */ 573 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) 574 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 575 ENT_INT_SRC3_ITC_INT_MSK); 576 577 /* clear the itct int*/ 578 for (i = 0; i < 2; i++) { 579 /* clear the itct table*/ 580 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); 581 reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); 582 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); 583 584 udelay(10); 585 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 586 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) { 587 dev_dbg(dev, "got clear ITCT done interrupt\n"); 588 589 /* invalid the itct state*/ 590 qw0 = cpu_to_le64(itct->qw0); 591 qw0 &= ~(1 << ITCT_HDR_VALID_OFF); 592 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 593 ENT_INT_SRC3_ITC_INT_MSK); 594 hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED; 595 hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL; 596 597 /* clear the itct */ 598 hisi_sas_write32(hisi_hba, ITCT_CLR, 0); 599 dev_dbg(dev, "clear ITCT ok\n"); 600 break; 601 } 602 } 603 } 604 605 static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) 606 { 607 int i, reset_val; 608 u32 val; 609 unsigned long end_time; 610 struct device *dev = &hisi_hba->pdev->dev; 611 612 /* The mask needs to be set depending on the number of phys */ 613 if (hisi_hba->n_phy == 9) 614 reset_val = 0x1fffff; 615 else 616 reset_val = 0x7ffff; 617 618 /* Disable all of the DQ */ 619 for (i = 0; i < HISI_SAS_MAX_QUEUES; i++) 620 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 621 622 /* Disable all of the PHYs */ 623 for (i = 0; i < hisi_hba->n_phy; i++) { 624 u32 phy_cfg = hisi_sas_phy_read32(hisi_hba, i, PHY_CFG); 625 626 phy_cfg &= ~PHY_CTRL_RESET_MSK; 627 hisi_sas_phy_write32(hisi_hba, i, PHY_CFG, phy_cfg); 628 } 629 udelay(50); 630 631 /* Ensure DMA tx & rx idle */ 632 for (i = 0; i < hisi_hba->n_phy; i++) { 633 u32 dma_tx_status, dma_rx_status; 634 635 end_time = jiffies + msecs_to_jiffies(1000); 636 637 while (1) { 638 dma_tx_status = hisi_sas_phy_read32(hisi_hba, i, 639 DMA_TX_STATUS); 640 dma_rx_status = hisi_sas_phy_read32(hisi_hba, i, 641 DMA_RX_STATUS); 642 643 if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) && 644 !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK)) 645 break; 646 647 msleep(20); 648 if (time_after(jiffies, end_time)) 649 return -EIO; 650 } 651 } 652 653 /* Ensure axi bus idle */ 654 end_time = jiffies + msecs_to_jiffies(1000); 655 while (1) { 656 u32 axi_status = 657 hisi_sas_read32(hisi_hba, AXI_CFG); 658 659 if (axi_status == 0) 660 break; 661 662 msleep(20); 663 if (time_after(jiffies, end_time)) 664 return -EIO; 665 } 666 667 /* reset and disable clock*/ 668 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, 669 reset_val); 670 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, 671 reset_val); 672 msleep(1); 673 regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); 674 if (reset_val != (val & reset_val)) { 675 dev_err(dev, "SAS reset fail.\n"); 676 return -EIO; 677 } 678 679 /* De-reset and enable clock*/ 680 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, 681 reset_val); 682 regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, 683 reset_val); 684 msleep(1); 685 regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, 686 &val); 687 if (val & reset_val) { 688 dev_err(dev, "SAS de-reset fail.\n"); 689 return -EIO; 690 } 691 692 return 0; 693 } 694 695 static void init_reg_v2_hw(struct hisi_hba *hisi_hba) 696 { 697 struct device *dev = &hisi_hba->pdev->dev; 698 struct device_node *np = dev->of_node; 699 int i; 700 701 /* Global registers init */ 702 703 /* Deal with am-max-transmissions quirk */ 704 if (of_get_property(np, "hip06-sas-v2-quirk-amt", NULL)) { 705 hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020); 706 hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS, 707 0x2020); 708 } /* Else, use defaults -> do nothing */ 709 710 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 711 (u32)((1ULL << hisi_hba->queue_count) - 1)); 712 hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000); 713 hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000); 714 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); 715 hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF); 716 hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1); 717 hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4); 718 hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x4E20); 719 hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1); 720 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); 721 hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1); 722 hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1); 723 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); 724 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); 725 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); 726 hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1); 727 hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1); 728 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0); 729 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); 730 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); 731 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); 732 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe); 733 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe); 734 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe); 735 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfffff3c0); 736 for (i = 0; i < hisi_hba->queue_count; i++) 737 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); 738 739 hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1); 740 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); 741 742 for (i = 0; i < hisi_hba->n_phy; i++) { 743 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855); 744 hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908); 745 hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); 746 hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10); 747 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); 748 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 749 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); 750 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 751 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 752 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); 753 hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x23f801fc); 754 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); 755 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); 756 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); 757 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); 758 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); 759 hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0); 760 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0); 761 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694); 762 } 763 764 for (i = 0; i < hisi_hba->queue_count; i++) { 765 /* Delivery queue */ 766 hisi_sas_write32(hisi_hba, 767 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), 768 upper_32_bits(hisi_hba->cmd_hdr_dma[i])); 769 770 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), 771 lower_32_bits(hisi_hba->cmd_hdr_dma[i])); 772 773 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), 774 HISI_SAS_QUEUE_SLOTS); 775 776 /* Completion queue */ 777 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), 778 upper_32_bits(hisi_hba->complete_hdr_dma[i])); 779 780 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), 781 lower_32_bits(hisi_hba->complete_hdr_dma[i])); 782 783 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), 784 HISI_SAS_QUEUE_SLOTS); 785 } 786 787 /* itct */ 788 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, 789 lower_32_bits(hisi_hba->itct_dma)); 790 791 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, 792 upper_32_bits(hisi_hba->itct_dma)); 793 794 /* iost */ 795 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, 796 lower_32_bits(hisi_hba->iost_dma)); 797 798 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, 799 upper_32_bits(hisi_hba->iost_dma)); 800 801 /* breakpoint */ 802 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, 803 lower_32_bits(hisi_hba->breakpoint_dma)); 804 805 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, 806 upper_32_bits(hisi_hba->breakpoint_dma)); 807 808 /* SATA broken msg */ 809 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, 810 lower_32_bits(hisi_hba->sata_breakpoint_dma)); 811 812 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, 813 upper_32_bits(hisi_hba->sata_breakpoint_dma)); 814 815 /* SATA initial fis */ 816 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, 817 lower_32_bits(hisi_hba->initial_fis_dma)); 818 819 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, 820 upper_32_bits(hisi_hba->initial_fis_dma)); 821 } 822 823 static int hw_init_v2_hw(struct hisi_hba *hisi_hba) 824 { 825 struct device *dev = &hisi_hba->pdev->dev; 826 int rc; 827 828 rc = reset_hw_v2_hw(hisi_hba); 829 if (rc) { 830 dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc); 831 return rc; 832 } 833 834 msleep(100); 835 init_reg_v2_hw(hisi_hba); 836 837 init_id_frame_v2_hw(hisi_hba); 838 839 return 0; 840 } 841 842 static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 843 { 844 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 845 846 cfg |= PHY_CFG_ENA_MSK; 847 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 848 } 849 850 static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 851 { 852 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 853 854 cfg &= ~PHY_CFG_ENA_MSK; 855 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 856 } 857 858 static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 859 { 860 config_id_frame_v2_hw(hisi_hba, phy_no); 861 config_phy_opt_mode_v2_hw(hisi_hba, phy_no); 862 enable_phy_v2_hw(hisi_hba, phy_no); 863 } 864 865 static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 866 { 867 disable_phy_v2_hw(hisi_hba, phy_no); 868 } 869 870 static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 871 { 872 stop_phy_v2_hw(hisi_hba, phy_no); 873 msleep(100); 874 start_phy_v2_hw(hisi_hba, phy_no); 875 } 876 877 static void start_phys_v2_hw(unsigned long data) 878 { 879 struct hisi_hba *hisi_hba = (struct hisi_hba *)data; 880 int i; 881 882 for (i = 0; i < hisi_hba->n_phy; i++) 883 start_phy_v2_hw(hisi_hba, i); 884 } 885 886 static void phys_init_v2_hw(struct hisi_hba *hisi_hba) 887 { 888 int i; 889 struct timer_list *timer = &hisi_hba->timer; 890 891 for (i = 0; i < hisi_hba->n_phy; i++) { 892 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x6a); 893 hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK); 894 } 895 896 setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba); 897 mod_timer(timer, jiffies + HZ); 898 } 899 900 static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 901 { 902 u32 sl_control; 903 904 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 905 sl_control |= SL_CONTROL_NOTIFY_EN_MSK; 906 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 907 msleep(1); 908 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 909 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; 910 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 911 } 912 913 static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) 914 { 915 int i, bitmap = 0; 916 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 917 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 918 919 for (i = 0; i < (hisi_hba->n_phy < 9 ? hisi_hba->n_phy : 8); i++) 920 if (phy_state & 1 << i) 921 if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) 922 bitmap |= 1 << i; 923 924 if (hisi_hba->n_phy == 9) { 925 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 926 927 if (phy_state & 1 << 8) 928 if (((port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> 929 PORT_STATE_PHY8_PORT_NUM_OFF) == port_id) 930 bitmap |= 1 << 9; 931 } 932 933 return bitmap; 934 } 935 936 /** 937 * This function allocates across all queues to load balance. 938 * Slots are allocated from queues in a round-robin fashion. 939 * 940 * The callpath to this function and upto writing the write 941 * queue pointer should be safe from interruption. 942 */ 943 static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s) 944 { 945 struct device *dev = &hisi_hba->pdev->dev; 946 u32 r, w; 947 int queue = hisi_hba->queue; 948 949 while (1) { 950 w = hisi_sas_read32_relaxed(hisi_hba, 951 DLVRY_Q_0_WR_PTR + (queue * 0x14)); 952 r = hisi_sas_read32_relaxed(hisi_hba, 953 DLVRY_Q_0_RD_PTR + (queue * 0x14)); 954 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { 955 queue = (queue + 1) % hisi_hba->queue_count; 956 if (queue == hisi_hba->queue) { 957 dev_warn(dev, "could not find free slot\n"); 958 return -EAGAIN; 959 } 960 continue; 961 } 962 break; 963 } 964 hisi_hba->queue = (queue + 1) % hisi_hba->queue_count; 965 *q = queue; 966 *s = w; 967 return 0; 968 } 969 970 static void start_delivery_v2_hw(struct hisi_hba *hisi_hba) 971 { 972 int dlvry_queue = hisi_hba->slot_prep->dlvry_queue; 973 int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot; 974 975 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), 976 ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS); 977 } 978 979 static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, 980 struct hisi_sas_slot *slot, 981 struct hisi_sas_cmd_hdr *hdr, 982 struct scatterlist *scatter, 983 int n_elem) 984 { 985 struct device *dev = &hisi_hba->pdev->dev; 986 struct scatterlist *sg; 987 int i; 988 989 if (n_elem > HISI_SAS_SGE_PAGE_CNT) { 990 dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", 991 n_elem); 992 return -EINVAL; 993 } 994 995 slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC, 996 &slot->sge_page_dma); 997 if (!slot->sge_page) 998 return -ENOMEM; 999 1000 for_each_sg(scatter, sg, n_elem, i) { 1001 struct hisi_sas_sge *entry = &slot->sge_page->sge[i]; 1002 1003 entry->addr = cpu_to_le64(sg_dma_address(sg)); 1004 entry->page_ctrl_0 = entry->page_ctrl_1 = 0; 1005 entry->data_len = cpu_to_le32(sg_dma_len(sg)); 1006 entry->data_off = 0; 1007 } 1008 1009 hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma); 1010 1011 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); 1012 1013 return 0; 1014 } 1015 1016 static int prep_smp_v2_hw(struct hisi_hba *hisi_hba, 1017 struct hisi_sas_slot *slot) 1018 { 1019 struct sas_task *task = slot->task; 1020 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1021 struct domain_device *device = task->dev; 1022 struct device *dev = &hisi_hba->pdev->dev; 1023 struct hisi_sas_port *port = slot->port; 1024 struct scatterlist *sg_req, *sg_resp; 1025 struct hisi_sas_device *sas_dev = device->lldd_dev; 1026 dma_addr_t req_dma_addr; 1027 unsigned int req_len, resp_len; 1028 int elem, rc; 1029 1030 /* 1031 * DMA-map SMP request, response buffers 1032 */ 1033 /* req */ 1034 sg_req = &task->smp_task.smp_req; 1035 elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE); 1036 if (!elem) 1037 return -ENOMEM; 1038 req_len = sg_dma_len(sg_req); 1039 req_dma_addr = sg_dma_address(sg_req); 1040 1041 /* resp */ 1042 sg_resp = &task->smp_task.smp_resp; 1043 elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE); 1044 if (!elem) { 1045 rc = -ENOMEM; 1046 goto err_out_req; 1047 } 1048 resp_len = sg_dma_len(sg_resp); 1049 if ((req_len & 0x3) || (resp_len & 0x3)) { 1050 rc = -EINVAL; 1051 goto err_out_resp; 1052 } 1053 1054 /* create header */ 1055 /* dw0 */ 1056 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | 1057 (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ 1058 (2 << CMD_HDR_CMD_OFF)); /* smp */ 1059 1060 /* map itct entry */ 1061 hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | 1062 (1 << CMD_HDR_FRAME_TYPE_OFF) | 1063 (DIR_NO_DATA << CMD_HDR_DIR_OFF)); 1064 1065 /* dw2 */ 1066 hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | 1067 (HISI_SAS_MAX_SMP_RESP_SZ / 4 << 1068 CMD_HDR_MRFL_OFF)); 1069 1070 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); 1071 1072 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); 1073 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); 1074 1075 return 0; 1076 1077 err_out_resp: 1078 dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1, 1079 DMA_FROM_DEVICE); 1080 err_out_req: 1081 dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1, 1082 DMA_TO_DEVICE); 1083 return rc; 1084 } 1085 1086 static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, 1087 struct hisi_sas_slot *slot, int is_tmf, 1088 struct hisi_sas_tmf_task *tmf) 1089 { 1090 struct sas_task *task = slot->task; 1091 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1092 struct domain_device *device = task->dev; 1093 struct hisi_sas_device *sas_dev = device->lldd_dev; 1094 struct hisi_sas_port *port = slot->port; 1095 struct sas_ssp_task *ssp_task = &task->ssp_task; 1096 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 1097 int has_data = 0, rc, priority = is_tmf; 1098 u8 *buf_cmd; 1099 u32 dw1 = 0, dw2 = 0; 1100 1101 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | 1102 (2 << CMD_HDR_TLR_CTRL_OFF) | 1103 (port->id << CMD_HDR_PORT_OFF) | 1104 (priority << CMD_HDR_PRIORITY_OFF) | 1105 (1 << CMD_HDR_CMD_OFF)); /* ssp */ 1106 1107 dw1 = 1 << CMD_HDR_VDTL_OFF; 1108 if (is_tmf) { 1109 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; 1110 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; 1111 } else { 1112 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; 1113 switch (scsi_cmnd->sc_data_direction) { 1114 case DMA_TO_DEVICE: 1115 has_data = 1; 1116 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1117 break; 1118 case DMA_FROM_DEVICE: 1119 has_data = 1; 1120 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1121 break; 1122 default: 1123 dw1 &= ~CMD_HDR_DIR_MSK; 1124 } 1125 } 1126 1127 /* map itct entry */ 1128 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1129 hdr->dw1 = cpu_to_le32(dw1); 1130 1131 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) 1132 + 3) / 4) << CMD_HDR_CFL_OFF) | 1133 ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | 1134 (2 << CMD_HDR_SG_MOD_OFF); 1135 hdr->dw2 = cpu_to_le32(dw2); 1136 1137 hdr->transfer_tags = cpu_to_le32(slot->idx); 1138 1139 if (has_data) { 1140 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, 1141 slot->n_elem); 1142 if (rc) 1143 return rc; 1144 } 1145 1146 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1147 hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); 1148 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); 1149 1150 buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr); 1151 1152 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 1153 if (!is_tmf) { 1154 buf_cmd[9] = task->ssp_task.task_attr | 1155 (task->ssp_task.task_prio << 3); 1156 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, 1157 task->ssp_task.cmd->cmd_len); 1158 } else { 1159 buf_cmd[10] = tmf->tmf; 1160 switch (tmf->tmf) { 1161 case TMF_ABORT_TASK: 1162 case TMF_QUERY_TASK: 1163 buf_cmd[12] = 1164 (tmf->tag_of_task_to_be_managed >> 8) & 0xff; 1165 buf_cmd[13] = 1166 tmf->tag_of_task_to_be_managed & 0xff; 1167 break; 1168 default: 1169 break; 1170 } 1171 } 1172 1173 return 0; 1174 } 1175 1176 static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task, 1177 struct hisi_sas_slot *slot) 1178 { 1179 struct task_status_struct *ts = &task->task_status; 1180 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 1181 struct dev_to_host_fis *d2h = slot->status_buffer + 1182 sizeof(struct hisi_sas_err_record); 1183 1184 resp->frame_len = sizeof(struct dev_to_host_fis); 1185 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 1186 1187 ts->buf_valid_size = sizeof(*resp); 1188 } 1189 1190 /* by default, task resp is complete */ 1191 static void slot_err_v2_hw(struct hisi_hba *hisi_hba, 1192 struct sas_task *task, 1193 struct hisi_sas_slot *slot) 1194 { 1195 struct task_status_struct *ts = &task->task_status; 1196 struct hisi_sas_err_record_v2 *err_record = slot->status_buffer; 1197 u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type); 1198 u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type); 1199 u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type); 1200 u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type); 1201 u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type); 1202 int error = -1; 1203 1204 if (dma_rx_err_type) { 1205 error = ffs(dma_rx_err_type) 1206 - 1 + DMA_RX_ERR_BASE; 1207 } else if (sipc_rx_err_type) { 1208 error = ffs(sipc_rx_err_type) 1209 - 1 + SIPC_RX_ERR_BASE; 1210 } else if (dma_tx_err_type) { 1211 error = ffs(dma_tx_err_type) 1212 - 1 + DMA_TX_ERR_BASE; 1213 } else if (trans_rx_fail_type) { 1214 error = ffs(trans_rx_fail_type) 1215 - 1 + TRANS_RX_FAIL_BASE; 1216 } else if (trans_tx_fail_type) { 1217 error = ffs(trans_tx_fail_type) 1218 - 1 + TRANS_TX_FAIL_BASE; 1219 } 1220 1221 switch (task->task_proto) { 1222 case SAS_PROTOCOL_SSP: 1223 { 1224 switch (error) { 1225 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: 1226 { 1227 ts->stat = SAS_OPEN_REJECT; 1228 ts->open_rej_reason = SAS_OREJ_NO_DEST; 1229 break; 1230 } 1231 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: 1232 { 1233 ts->stat = SAS_OPEN_REJECT; 1234 ts->open_rej_reason = SAS_OREJ_PATH_BLOCKED; 1235 break; 1236 } 1237 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: 1238 { 1239 ts->stat = SAS_OPEN_REJECT; 1240 ts->open_rej_reason = SAS_OREJ_EPROTO; 1241 break; 1242 } 1243 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: 1244 { 1245 ts->stat = SAS_OPEN_REJECT; 1246 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 1247 break; 1248 } 1249 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: 1250 { 1251 ts->stat = SAS_OPEN_REJECT; 1252 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 1253 break; 1254 } 1255 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: 1256 { 1257 ts->stat = SAS_OPEN_REJECT; 1258 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 1259 break; 1260 } 1261 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: 1262 { 1263 ts->stat = SAS_OPEN_REJECT; 1264 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 1265 break; 1266 } 1267 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: 1268 { 1269 ts->stat = SAS_OPEN_REJECT; 1270 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 1271 break; 1272 } 1273 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: 1274 { 1275 /* not sure */ 1276 ts->stat = SAS_DEV_NO_RESPONSE; 1277 break; 1278 } 1279 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: 1280 { 1281 ts->stat = SAS_PHY_DOWN; 1282 break; 1283 } 1284 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: 1285 { 1286 ts->stat = SAS_OPEN_TO; 1287 break; 1288 } 1289 case DMA_RX_DATA_LEN_OVERFLOW: 1290 { 1291 ts->stat = SAS_DATA_OVERRUN; 1292 ts->residual = 0; 1293 break; 1294 } 1295 case DMA_RX_DATA_LEN_UNDERFLOW: 1296 case SIPC_RX_DATA_UNDERFLOW_ERR: 1297 { 1298 ts->residual = trans_tx_fail_type; 1299 ts->stat = SAS_DATA_UNDERRUN; 1300 break; 1301 } 1302 case TRANS_TX_ERR_FRAME_TXED: 1303 { 1304 /* This will request a retry */ 1305 ts->stat = SAS_QUEUE_FULL; 1306 slot->abort = 1; 1307 break; 1308 } 1309 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: 1310 case TRANS_TX_ERR_PHY_NOT_ENABLE: 1311 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: 1312 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: 1313 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: 1314 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: 1315 case TRANS_TX_ERR_WITH_BREAK_REQUEST: 1316 case TRANS_TX_ERR_WITH_BREAK_RECEVIED: 1317 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: 1318 case TRANS_TX_ERR_WITH_CLOSE_NORMAL: 1319 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: 1320 case TRANS_TX_ERR_WITH_CLOSE_COMINIT: 1321 case TRANS_TX_ERR_WITH_NAK_RECEVIED: 1322 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: 1323 case TRANS_TX_ERR_WITH_IPTT_CONFLICT: 1324 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: 1325 case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR: 1326 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: 1327 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: 1328 case TRANS_RX_ERR_WITH_BREAK_TIMEOUT: 1329 case TRANS_RX_ERR_WITH_BREAK_REQUEST: 1330 case TRANS_RX_ERR_WITH_BREAK_RECEVIED: 1331 case TRANS_RX_ERR_WITH_CLOSE_NORMAL: 1332 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: 1333 case TRANS_RX_ERR_WITH_CLOSE_COMINIT: 1334 case TRANS_RX_ERR_WITH_DATA_LEN0: 1335 case TRANS_RX_ERR_WITH_BAD_HASH: 1336 case TRANS_RX_XRDY_WLEN_ZERO_ERR: 1337 case TRANS_RX_SSP_FRM_LEN_ERR: 1338 case TRANS_RX_ERR_WITH_BAD_FRM_TYPE: 1339 case DMA_TX_UNEXP_XFER_ERR: 1340 case DMA_TX_UNEXP_RETRANS_ERR: 1341 case DMA_TX_XFER_LEN_OVERFLOW: 1342 case DMA_TX_XFER_OFFSET_ERR: 1343 case DMA_RX_DATA_OFFSET_ERR: 1344 case DMA_RX_UNEXP_NORM_RESP_ERR: 1345 case DMA_RX_UNEXP_RDFRAME_ERR: 1346 case DMA_RX_UNKNOWN_FRM_ERR: 1347 { 1348 ts->stat = SAS_OPEN_REJECT; 1349 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 1350 break; 1351 } 1352 default: 1353 break; 1354 } 1355 } 1356 break; 1357 case SAS_PROTOCOL_SMP: 1358 ts->stat = SAM_STAT_CHECK_CONDITION; 1359 break; 1360 1361 case SAS_PROTOCOL_SATA: 1362 case SAS_PROTOCOL_STP: 1363 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1364 { 1365 switch (error) { 1366 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: 1367 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: 1368 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: 1369 { 1370 ts->resp = SAS_TASK_UNDELIVERED; 1371 ts->stat = SAS_DEV_NO_RESPONSE; 1372 break; 1373 } 1374 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: 1375 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: 1376 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: 1377 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: 1378 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: 1379 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: 1380 case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY: 1381 { 1382 ts->stat = SAS_OPEN_REJECT; 1383 break; 1384 } 1385 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: 1386 { 1387 ts->stat = SAS_OPEN_TO; 1388 break; 1389 } 1390 case DMA_RX_DATA_LEN_OVERFLOW: 1391 { 1392 ts->stat = SAS_DATA_OVERRUN; 1393 break; 1394 } 1395 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: 1396 case TRANS_TX_ERR_PHY_NOT_ENABLE: 1397 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: 1398 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: 1399 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: 1400 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: 1401 case TRANS_TX_ERR_WITH_BREAK_REQUEST: 1402 case TRANS_TX_ERR_WITH_BREAK_RECEVIED: 1403 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: 1404 case TRANS_TX_ERR_WITH_CLOSE_NORMAL: 1405 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: 1406 case TRANS_TX_ERR_WITH_CLOSE_COMINIT: 1407 case TRANS_TX_ERR_WITH_NAK_RECEVIED: 1408 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: 1409 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: 1410 case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT: 1411 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: 1412 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: 1413 case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR: 1414 case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR: 1415 case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN: 1416 case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP: 1417 case TRANS_RX_ERR_WITH_CLOSE_NORMAL: 1418 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: 1419 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: 1420 case TRANS_RX_ERR_WITH_CLOSE_COMINIT: 1421 case TRANS_RX_ERR_WITH_DATA_LEN0: 1422 case TRANS_RX_ERR_WITH_BAD_HASH: 1423 case TRANS_RX_XRDY_WLEN_ZERO_ERR: 1424 case TRANS_RX_SSP_FRM_LEN_ERR: 1425 case SIPC_RX_FIS_STATUS_ERR_BIT_VLD: 1426 case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR: 1427 case SIPC_RX_FIS_STATUS_BSY_BIT_ERR: 1428 case SIPC_RX_WRSETUP_LEN_ODD_ERR: 1429 case SIPC_RX_WRSETUP_LEN_ZERO_ERR: 1430 case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR: 1431 case SIPC_RX_SATA_UNEXP_FIS_ERR: 1432 case DMA_RX_SATA_FRAME_TYPE_ERR: 1433 case DMA_RX_UNEXP_RDFRAME_ERR: 1434 case DMA_RX_PIO_DATA_LEN_ERR: 1435 case DMA_RX_RDSETUP_STATUS_ERR: 1436 case DMA_RX_RDSETUP_STATUS_DRQ_ERR: 1437 case DMA_RX_RDSETUP_STATUS_BSY_ERR: 1438 case DMA_RX_RDSETUP_LEN_ODD_ERR: 1439 case DMA_RX_RDSETUP_LEN_ZERO_ERR: 1440 case DMA_RX_RDSETUP_LEN_OVER_ERR: 1441 case DMA_RX_RDSETUP_OFFSET_ERR: 1442 case DMA_RX_RDSETUP_ACTIVE_ERR: 1443 case DMA_RX_RDSETUP_ESTATUS_ERR: 1444 case DMA_RX_UNKNOWN_FRM_ERR: 1445 { 1446 ts->stat = SAS_OPEN_REJECT; 1447 break; 1448 } 1449 default: 1450 { 1451 ts->stat = SAS_PROTO_RESPONSE; 1452 break; 1453 } 1454 } 1455 sata_done_v2_hw(hisi_hba, task, slot); 1456 } 1457 break; 1458 default: 1459 break; 1460 } 1461 } 1462 1463 static int 1464 slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, 1465 int abort) 1466 { 1467 struct sas_task *task = slot->task; 1468 struct hisi_sas_device *sas_dev; 1469 struct device *dev = &hisi_hba->pdev->dev; 1470 struct task_status_struct *ts; 1471 struct domain_device *device; 1472 enum exec_status sts; 1473 struct hisi_sas_complete_v2_hdr *complete_queue = 1474 hisi_hba->complete_hdr[slot->cmplt_queue]; 1475 struct hisi_sas_complete_v2_hdr *complete_hdr = 1476 &complete_queue[slot->cmplt_queue_slot]; 1477 1478 if (unlikely(!task || !task->lldd_task || !task->dev)) 1479 return -EINVAL; 1480 1481 ts = &task->task_status; 1482 device = task->dev; 1483 sas_dev = device->lldd_dev; 1484 1485 task->task_state_flags &= 1486 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1487 task->task_state_flags |= SAS_TASK_STATE_DONE; 1488 1489 memset(ts, 0, sizeof(*ts)); 1490 ts->resp = SAS_TASK_COMPLETE; 1491 1492 if (unlikely(!sas_dev || abort)) { 1493 if (!sas_dev) 1494 dev_dbg(dev, "slot complete: port has not device\n"); 1495 ts->stat = SAS_PHY_DOWN; 1496 goto out; 1497 } 1498 1499 if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) && 1500 (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { 1501 1502 slot_err_v2_hw(hisi_hba, task, slot); 1503 if (unlikely(slot->abort)) { 1504 queue_work(hisi_hba->wq, &slot->abort_slot); 1505 /* immediately return and do not complete */ 1506 return ts->stat; 1507 } 1508 goto out; 1509 } 1510 1511 switch (task->task_proto) { 1512 case SAS_PROTOCOL_SSP: 1513 { 1514 struct ssp_response_iu *iu = slot->status_buffer + 1515 sizeof(struct hisi_sas_err_record); 1516 1517 sas_ssp_task_response(dev, task, iu); 1518 break; 1519 } 1520 case SAS_PROTOCOL_SMP: 1521 { 1522 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 1523 void *to; 1524 1525 ts->stat = SAM_STAT_GOOD; 1526 to = kmap_atomic(sg_page(sg_resp)); 1527 1528 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1, 1529 DMA_FROM_DEVICE); 1530 dma_unmap_sg(dev, &task->smp_task.smp_req, 1, 1531 DMA_TO_DEVICE); 1532 memcpy(to + sg_resp->offset, 1533 slot->status_buffer + 1534 sizeof(struct hisi_sas_err_record), 1535 sg_dma_len(sg_resp)); 1536 kunmap_atomic(to); 1537 break; 1538 } 1539 case SAS_PROTOCOL_SATA: 1540 case SAS_PROTOCOL_STP: 1541 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1542 { 1543 ts->stat = SAM_STAT_GOOD; 1544 sata_done_v2_hw(hisi_hba, task, slot); 1545 break; 1546 } 1547 default: 1548 ts->stat = SAM_STAT_CHECK_CONDITION; 1549 break; 1550 } 1551 1552 if (!slot->port->port_attached) { 1553 dev_err(dev, "slot complete: port %d has removed\n", 1554 slot->port->sas_port.id); 1555 ts->stat = SAS_PHY_DOWN; 1556 } 1557 1558 out: 1559 if (sas_dev && sas_dev->running_req) 1560 sas_dev->running_req--; 1561 1562 hisi_sas_slot_task_free(hisi_hba, task, slot); 1563 sts = ts->stat; 1564 1565 if (task->task_done) 1566 task->task_done(task); 1567 1568 return sts; 1569 } 1570 1571 static u8 get_ata_protocol(u8 cmd, int direction) 1572 { 1573 switch (cmd) { 1574 case ATA_CMD_FPDMA_WRITE: 1575 case ATA_CMD_FPDMA_READ: 1576 return SATA_PROTOCOL_FPDMA; 1577 1578 case ATA_CMD_ID_ATA: 1579 case ATA_CMD_PMP_READ: 1580 case ATA_CMD_READ_LOG_EXT: 1581 case ATA_CMD_PIO_READ: 1582 case ATA_CMD_PIO_READ_EXT: 1583 case ATA_CMD_PMP_WRITE: 1584 case ATA_CMD_WRITE_LOG_EXT: 1585 case ATA_CMD_PIO_WRITE: 1586 case ATA_CMD_PIO_WRITE_EXT: 1587 return SATA_PROTOCOL_PIO; 1588 1589 case ATA_CMD_READ: 1590 case ATA_CMD_READ_EXT: 1591 case ATA_CMD_READ_LOG_DMA_EXT: 1592 case ATA_CMD_WRITE: 1593 case ATA_CMD_WRITE_EXT: 1594 case ATA_CMD_WRITE_QUEUED: 1595 case ATA_CMD_WRITE_LOG_DMA_EXT: 1596 return SATA_PROTOCOL_DMA; 1597 1598 case ATA_CMD_DOWNLOAD_MICRO: 1599 case ATA_CMD_DEV_RESET: 1600 case ATA_CMD_CHK_POWER: 1601 case ATA_CMD_FLUSH: 1602 case ATA_CMD_FLUSH_EXT: 1603 case ATA_CMD_VERIFY: 1604 case ATA_CMD_VERIFY_EXT: 1605 case ATA_CMD_SET_FEATURES: 1606 case ATA_CMD_STANDBY: 1607 case ATA_CMD_STANDBYNOW1: 1608 return SATA_PROTOCOL_NONDATA; 1609 default: 1610 if (direction == DMA_NONE) 1611 return SATA_PROTOCOL_NONDATA; 1612 return SATA_PROTOCOL_PIO; 1613 } 1614 } 1615 1616 static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag) 1617 { 1618 struct ata_queued_cmd *qc = task->uldd_task; 1619 1620 if (qc) { 1621 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 1622 qc->tf.command == ATA_CMD_FPDMA_READ) { 1623 *tag = qc->tag; 1624 return 1; 1625 } 1626 } 1627 return 0; 1628 } 1629 1630 static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, 1631 struct hisi_sas_slot *slot) 1632 { 1633 struct sas_task *task = slot->task; 1634 struct domain_device *device = task->dev; 1635 struct domain_device *parent_dev = device->parent; 1636 struct hisi_sas_device *sas_dev = device->lldd_dev; 1637 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1638 struct hisi_sas_port *port = device->port->lldd_port; 1639 u8 *buf_cmd; 1640 int has_data = 0, rc = 0, hdr_tag = 0; 1641 u32 dw1 = 0, dw2 = 0; 1642 1643 /* create header */ 1644 /* dw0 */ 1645 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); 1646 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 1647 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); 1648 else 1649 hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF); 1650 1651 /* dw1 */ 1652 switch (task->data_dir) { 1653 case DMA_TO_DEVICE: 1654 has_data = 1; 1655 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1656 break; 1657 case DMA_FROM_DEVICE: 1658 has_data = 1; 1659 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1660 break; 1661 default: 1662 dw1 &= ~CMD_HDR_DIR_MSK; 1663 } 1664 1665 if (0 == task->ata_task.fis.command) 1666 dw1 |= 1 << CMD_HDR_RESET_OFF; 1667 1668 dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir)) 1669 << CMD_HDR_FRAME_TYPE_OFF; 1670 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1671 hdr->dw1 = cpu_to_le32(dw1); 1672 1673 /* dw2 */ 1674 if (task->ata_task.use_ncq && get_ncq_tag_v2_hw(task, &hdr_tag)) { 1675 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 1676 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; 1677 } 1678 1679 dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | 1680 2 << CMD_HDR_SG_MOD_OFF; 1681 hdr->dw2 = cpu_to_le32(dw2); 1682 1683 /* dw3 */ 1684 hdr->transfer_tags = cpu_to_le32(slot->idx); 1685 1686 if (has_data) { 1687 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, 1688 slot->n_elem); 1689 if (rc) 1690 return rc; 1691 } 1692 1693 1694 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1695 hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); 1696 hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); 1697 1698 buf_cmd = slot->command_table; 1699 1700 if (likely(!task->ata_task.device_control_reg_update)) 1701 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 1702 /* fill in command FIS */ 1703 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 1704 1705 return 0; 1706 } 1707 1708 static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 1709 { 1710 int i, res = 0; 1711 u32 context, port_id, link_rate, hard_phy_linkrate; 1712 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1713 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1714 struct device *dev = &hisi_hba->pdev->dev; 1715 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; 1716 struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; 1717 1718 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); 1719 1720 /* Check for SATA dev */ 1721 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); 1722 if (context & (1 << phy_no)) 1723 goto end; 1724 1725 if (phy_no == 8) { 1726 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 1727 1728 port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> 1729 PORT_STATE_PHY8_PORT_NUM_OFF; 1730 link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> 1731 PORT_STATE_PHY8_CONN_RATE_OFF; 1732 } else { 1733 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 1734 port_id = (port_id >> (4 * phy_no)) & 0xf; 1735 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 1736 link_rate = (link_rate >> (phy_no * 4)) & 0xf; 1737 } 1738 1739 if (port_id == 0xf) { 1740 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); 1741 res = IRQ_NONE; 1742 goto end; 1743 } 1744 1745 for (i = 0; i < 6; i++) { 1746 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, 1747 RX_IDAF_DWORD0 + (i * 4)); 1748 frame_rcvd[i] = __swab32(idaf); 1749 } 1750 1751 /* Get the linkrates */ 1752 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 1753 link_rate = (link_rate >> (phy_no * 4)) & 0xf; 1754 sas_phy->linkrate = link_rate; 1755 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, 1756 HARD_PHY_LINKRATE); 1757 phy->maximum_linkrate = hard_phy_linkrate & 0xf; 1758 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; 1759 1760 sas_phy->oob_mode = SAS_OOB_MODE; 1761 memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE); 1762 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); 1763 phy->port_id = port_id; 1764 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 1765 phy->phy_type |= PORT_TYPE_SAS; 1766 phy->phy_attached = 1; 1767 phy->identify.device_type = id->dev_type; 1768 phy->frame_rcvd_size = sizeof(struct sas_identify_frame); 1769 if (phy->identify.device_type == SAS_END_DEVICE) 1770 phy->identify.target_port_protocols = 1771 SAS_PROTOCOL_SSP; 1772 else if (phy->identify.device_type != SAS_PHY_UNUSED) 1773 phy->identify.target_port_protocols = 1774 SAS_PROTOCOL_SMP; 1775 queue_work(hisi_hba->wq, &phy->phyup_ws); 1776 1777 end: 1778 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1779 CHL_INT0_SL_PHY_ENABLE_MSK); 1780 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); 1781 1782 return res; 1783 } 1784 1785 static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 1786 { 1787 int res = 0; 1788 u32 phy_cfg, phy_state; 1789 1790 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 1791 1792 phy_cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 1793 1794 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1795 1796 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); 1797 1798 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); 1799 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); 1800 1801 return res; 1802 } 1803 1804 static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) 1805 { 1806 struct hisi_hba *hisi_hba = p; 1807 u32 irq_msk; 1808 int phy_no = 0; 1809 irqreturn_t res = IRQ_HANDLED; 1810 1811 irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) 1812 >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff; 1813 while (irq_msk) { 1814 if (irq_msk & 1) { 1815 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, 1816 CHL_INT0); 1817 1818 if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK) 1819 /* phy up */ 1820 if (phy_up_v2_hw(phy_no, hisi_hba)) { 1821 res = IRQ_NONE; 1822 goto end; 1823 } 1824 1825 if (irq_value & CHL_INT0_NOT_RDY_MSK) 1826 /* phy down */ 1827 if (phy_down_v2_hw(phy_no, hisi_hba)) { 1828 res = IRQ_NONE; 1829 goto end; 1830 } 1831 } 1832 irq_msk >>= 1; 1833 phy_no++; 1834 } 1835 1836 end: 1837 return res; 1838 } 1839 1840 static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 1841 { 1842 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1843 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1844 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1845 unsigned long flags; 1846 1847 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); 1848 1849 spin_lock_irqsave(&hisi_hba->lock, flags); 1850 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); 1851 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1852 1853 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1854 CHL_INT0_SL_RX_BCST_ACK_MSK); 1855 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); 1856 } 1857 1858 static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) 1859 { 1860 struct hisi_hba *hisi_hba = p; 1861 struct device *dev = &hisi_hba->pdev->dev; 1862 u32 ent_msk, ent_tmp, irq_msk; 1863 int phy_no = 0; 1864 1865 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 1866 ent_tmp = ent_msk; 1867 ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK; 1868 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk); 1869 1870 irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >> 1871 HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff; 1872 1873 while (irq_msk) { 1874 if (irq_msk & (1 << phy_no)) { 1875 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, 1876 CHL_INT0); 1877 u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, 1878 CHL_INT1); 1879 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, 1880 CHL_INT2); 1881 1882 if (irq_value1) { 1883 if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK | 1884 CHL_INT1_DMAC_TX_ECC_ERR_MSK)) 1885 panic("%s: DMAC RX/TX ecc bad error! (0x%x)", 1886 dev_name(dev), irq_value1); 1887 1888 hisi_sas_phy_write32(hisi_hba, phy_no, 1889 CHL_INT1, irq_value1); 1890 } 1891 1892 if (irq_value2) 1893 hisi_sas_phy_write32(hisi_hba, phy_no, 1894 CHL_INT2, irq_value2); 1895 1896 1897 if (irq_value0) { 1898 if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK) 1899 phy_bcast_v2_hw(phy_no, hisi_hba); 1900 1901 hisi_sas_phy_write32(hisi_hba, phy_no, 1902 CHL_INT0, irq_value0 1903 & (~CHL_INT0_HOTPLUG_TOUT_MSK) 1904 & (~CHL_INT0_SL_PHY_ENABLE_MSK) 1905 & (~CHL_INT0_NOT_RDY_MSK)); 1906 } 1907 } 1908 irq_msk &= ~(1 << phy_no); 1909 phy_no++; 1910 } 1911 1912 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp); 1913 1914 return IRQ_HANDLED; 1915 } 1916 1917 static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) 1918 { 1919 struct hisi_sas_cq *cq = p; 1920 struct hisi_hba *hisi_hba = cq->hisi_hba; 1921 struct hisi_sas_slot *slot; 1922 struct hisi_sas_itct *itct; 1923 struct hisi_sas_complete_v2_hdr *complete_queue; 1924 u32 irq_value, rd_point, wr_point, dev_id; 1925 int queue = cq->id; 1926 1927 complete_queue = hisi_hba->complete_hdr[queue]; 1928 irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC); 1929 1930 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); 1931 1932 rd_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_RD_PTR + 1933 (0x14 * queue)); 1934 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + 1935 (0x14 * queue)); 1936 1937 while (rd_point != wr_point) { 1938 struct hisi_sas_complete_v2_hdr *complete_hdr; 1939 int iptt; 1940 1941 complete_hdr = &complete_queue[rd_point]; 1942 1943 /* Check for NCQ completion */ 1944 if (complete_hdr->act) { 1945 u32 act_tmp = complete_hdr->act; 1946 int ncq_tag_count = ffs(act_tmp); 1947 1948 dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >> 1949 CMPLT_HDR_DEV_ID_OFF; 1950 itct = &hisi_hba->itct[dev_id]; 1951 1952 /* The NCQ tags are held in the itct header */ 1953 while (ncq_tag_count) { 1954 __le64 *ncq_tag = &itct->qw4_15[0]; 1955 1956 ncq_tag_count -= 1; 1957 iptt = (ncq_tag[ncq_tag_count / 5] 1958 >> (ncq_tag_count % 5) * 12) & 0xfff; 1959 1960 slot = &hisi_hba->slot_info[iptt]; 1961 slot->cmplt_queue_slot = rd_point; 1962 slot->cmplt_queue = queue; 1963 slot_complete_v2_hw(hisi_hba, slot, 0); 1964 1965 act_tmp &= ~(1 << ncq_tag_count); 1966 ncq_tag_count = ffs(act_tmp); 1967 } 1968 } else { 1969 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; 1970 slot = &hisi_hba->slot_info[iptt]; 1971 slot->cmplt_queue_slot = rd_point; 1972 slot->cmplt_queue = queue; 1973 slot_complete_v2_hw(hisi_hba, slot, 0); 1974 } 1975 1976 if (++rd_point >= HISI_SAS_QUEUE_SLOTS) 1977 rd_point = 0; 1978 } 1979 1980 /* update rd_point */ 1981 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 1982 return IRQ_HANDLED; 1983 } 1984 1985 static irqreturn_t sata_int_v2_hw(int irq_no, void *p) 1986 { 1987 struct hisi_sas_phy *phy = p; 1988 struct hisi_hba *hisi_hba = phy->hisi_hba; 1989 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1990 struct device *dev = &hisi_hba->pdev->dev; 1991 struct hisi_sas_initial_fis *initial_fis; 1992 struct dev_to_host_fis *fis; 1993 u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; 1994 irqreturn_t res = IRQ_HANDLED; 1995 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 1996 int phy_no; 1997 1998 phy_no = sas_phy->id; 1999 initial_fis = &hisi_hba->initial_fis[phy_no]; 2000 fis = &initial_fis->fis; 2001 2002 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1); 2003 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk | 1 << phy_no); 2004 2005 ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1); 2006 ent_tmp = ent_int; 2007 ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4); 2008 if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) { 2009 dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no); 2010 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp); 2011 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk); 2012 res = IRQ_NONE; 2013 goto end; 2014 } 2015 2016 if (unlikely(phy_no == 8)) { 2017 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); 2018 2019 port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >> 2020 PORT_STATE_PHY8_PORT_NUM_OFF; 2021 link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >> 2022 PORT_STATE_PHY8_CONN_RATE_OFF; 2023 } else { 2024 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 2025 port_id = (port_id >> (4 * phy_no)) & 0xf; 2026 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 2027 link_rate = (link_rate >> (phy_no * 4)) & 0xf; 2028 } 2029 2030 if (port_id == 0xf) { 2031 dev_err(dev, "sata int: phy%d invalid portid\n", phy_no); 2032 res = IRQ_NONE; 2033 goto end; 2034 } 2035 2036 sas_phy->linkrate = link_rate; 2037 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, 2038 HARD_PHY_LINKRATE); 2039 phy->maximum_linkrate = hard_phy_linkrate & 0xf; 2040 phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf; 2041 2042 sas_phy->oob_mode = SATA_OOB_MODE; 2043 /* Make up some unique SAS address */ 2044 attached_sas_addr[0] = 0x50; 2045 attached_sas_addr[7] = phy_no; 2046 memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE); 2047 memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis)); 2048 dev_info(dev, "sata int phyup: phy%d link_rate=%d\n", phy_no, link_rate); 2049 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 2050 phy->port_id = port_id; 2051 phy->phy_type |= PORT_TYPE_SATA; 2052 phy->phy_attached = 1; 2053 phy->identify.device_type = SAS_SATA_DEV; 2054 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 2055 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 2056 queue_work(hisi_hba->wq, &phy->phyup_ws); 2057 2058 end: 2059 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp); 2060 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk); 2061 2062 return res; 2063 } 2064 2065 static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = { 2066 int_phy_updown_v2_hw, 2067 int_chnl_int_v2_hw, 2068 }; 2069 2070 /** 2071 * There is a limitation in the hip06 chipset that we need 2072 * to map in all mbigen interrupts, even if they are not used. 2073 */ 2074 static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) 2075 { 2076 struct platform_device *pdev = hisi_hba->pdev; 2077 struct device *dev = &pdev->dev; 2078 int i, irq, rc, irq_map[128]; 2079 2080 2081 for (i = 0; i < 128; i++) 2082 irq_map[i] = platform_get_irq(pdev, i); 2083 2084 for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) { 2085 int idx = i; 2086 2087 irq = irq_map[idx + 1]; /* Phy up/down is irq1 */ 2088 if (!irq) { 2089 dev_err(dev, "irq init: fail map phy interrupt %d\n", 2090 idx); 2091 return -ENOENT; 2092 } 2093 2094 rc = devm_request_irq(dev, irq, phy_interrupts[i], 0, 2095 DRV_NAME " phy", hisi_hba); 2096 if (rc) { 2097 dev_err(dev, "irq init: could not request " 2098 "phy interrupt %d, rc=%d\n", 2099 irq, rc); 2100 return -ENOENT; 2101 } 2102 } 2103 2104 for (i = 0; i < hisi_hba->n_phy; i++) { 2105 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2106 int idx = i + 72; /* First SATA interrupt is irq72 */ 2107 2108 irq = irq_map[idx]; 2109 if (!irq) { 2110 dev_err(dev, "irq init: fail map phy interrupt %d\n", 2111 idx); 2112 return -ENOENT; 2113 } 2114 2115 rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0, 2116 DRV_NAME " sata", phy); 2117 if (rc) { 2118 dev_err(dev, "irq init: could not request " 2119 "sata interrupt %d, rc=%d\n", 2120 irq, rc); 2121 return -ENOENT; 2122 } 2123 } 2124 2125 for (i = 0; i < hisi_hba->queue_count; i++) { 2126 int idx = i + 96; /* First cq interrupt is irq96 */ 2127 2128 irq = irq_map[idx]; 2129 if (!irq) { 2130 dev_err(dev, 2131 "irq init: could not map cq interrupt %d\n", 2132 idx); 2133 return -ENOENT; 2134 } 2135 rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0, 2136 DRV_NAME " cq", &hisi_hba->cq[i]); 2137 if (rc) { 2138 dev_err(dev, 2139 "irq init: could not request cq interrupt %d, rc=%d\n", 2140 irq, rc); 2141 return -ENOENT; 2142 } 2143 } 2144 2145 return 0; 2146 } 2147 2148 static int hisi_sas_v2_init(struct hisi_hba *hisi_hba) 2149 { 2150 int rc; 2151 2152 rc = hw_init_v2_hw(hisi_hba); 2153 if (rc) 2154 return rc; 2155 2156 rc = interrupt_init_v2_hw(hisi_hba); 2157 if (rc) 2158 return rc; 2159 2160 phys_init_v2_hw(hisi_hba); 2161 2162 return 0; 2163 } 2164 2165 static const struct hisi_sas_hw hisi_sas_v2_hw = { 2166 .hw_init = hisi_sas_v2_init, 2167 .setup_itct = setup_itct_v2_hw, 2168 .sl_notify = sl_notify_v2_hw, 2169 .get_wideport_bitmap = get_wideport_bitmap_v2_hw, 2170 .free_device = free_device_v2_hw, 2171 .prep_smp = prep_smp_v2_hw, 2172 .prep_ssp = prep_ssp_v2_hw, 2173 .prep_stp = prep_ata_v2_hw, 2174 .get_free_slot = get_free_slot_v2_hw, 2175 .start_delivery = start_delivery_v2_hw, 2176 .slot_complete = slot_complete_v2_hw, 2177 .phy_enable = enable_phy_v2_hw, 2178 .phy_disable = disable_phy_v2_hw, 2179 .phy_hard_reset = phy_hard_reset_v2_hw, 2180 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW, 2181 .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), 2182 }; 2183 2184 static int hisi_sas_v2_probe(struct platform_device *pdev) 2185 { 2186 return hisi_sas_probe(pdev, &hisi_sas_v2_hw); 2187 } 2188 2189 static int hisi_sas_v2_remove(struct platform_device *pdev) 2190 { 2191 return hisi_sas_remove(pdev); 2192 } 2193 2194 static const struct of_device_id sas_v2_of_match[] = { 2195 { .compatible = "hisilicon,hip06-sas-v2",}, 2196 {}, 2197 }; 2198 MODULE_DEVICE_TABLE(of, sas_v2_of_match); 2199 2200 static struct platform_driver hisi_sas_v2_driver = { 2201 .probe = hisi_sas_v2_probe, 2202 .remove = hisi_sas_v2_remove, 2203 .driver = { 2204 .name = DRV_NAME, 2205 .of_match_table = sas_v2_of_match, 2206 }, 2207 }; 2208 2209 module_platform_driver(hisi_sas_v2_driver); 2210 2211 MODULE_LICENSE("GPL"); 2212 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2213 MODULE_DESCRIPTION("HISILICON SAS controller v2 hw driver"); 2214 MODULE_ALIAS("platform:" DRV_NAME); 2215