1 /* 2 * Copyright (c) 2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 */ 10 11 #include "hisi_sas.h" 12 #define DRV_NAME "hisi_sas_v3_hw" 13 14 /* global registers need init*/ 15 #define DLVRY_QUEUE_ENABLE 0x0 16 #define IOST_BASE_ADDR_LO 0x8 17 #define IOST_BASE_ADDR_HI 0xc 18 #define ITCT_BASE_ADDR_LO 0x10 19 #define ITCT_BASE_ADDR_HI 0x14 20 #define IO_BROKEN_MSG_ADDR_LO 0x18 21 #define IO_BROKEN_MSG_ADDR_HI 0x1c 22 #define PHY_CONTEXT 0x20 23 #define PHY_STATE 0x24 24 #define PHY_PORT_NUM_MA 0x28 25 #define PHY_CONN_RATE 0x30 26 #define ITCT_CLR 0x44 27 #define ITCT_CLR_EN_OFF 16 28 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF) 29 #define ITCT_DEV_OFF 0 30 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF) 31 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58 32 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c 33 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 34 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 35 #define CFG_MAX_TAG 0x68 36 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 37 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 38 #define HGC_GET_ITV_TIME 0x90 39 #define DEVICE_MSG_WORK_MODE 0x94 40 #define OPENA_WT_CONTI_TIME 0x9c 41 #define I_T_NEXUS_LOSS_TIME 0xa0 42 #define MAX_CON_TIME_LIMIT_TIME 0xa4 43 #define BUS_INACTIVE_LIMIT_TIME 0xa8 44 #define REJECT_TO_OPEN_LIMIT_TIME 0xac 45 #define CFG_AGING_TIME 0xbc 46 #define HGC_DFX_CFG2 0xc0 47 #define CFG_ABT_SET_QUERY_IPTT 0xd4 48 #define CFG_SET_ABORTED_IPTT_OFF 0 49 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) 50 #define CFG_SET_ABORTED_EN_OFF 12 51 #define CFG_ABT_SET_IPTT_DONE 0xd8 52 #define CFG_ABT_SET_IPTT_DONE_OFF 0 53 #define HGC_IOMB_PROC1_STATUS 0x104 54 #define CFG_1US_TIMER_TRSH 0xcc 55 #define CHNL_INT_STATUS 0x148 56 #define HGC_AXI_FIFO_ERR_INFO 0x154 57 #define AXI_ERR_INFO_OFF 0 58 #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) 59 #define FIFO_ERR_INFO_OFF 8 60 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) 61 #define INT_COAL_EN 0x19c 62 #define OQ_INT_COAL_TIME 0x1a0 63 #define OQ_INT_COAL_CNT 0x1a4 64 #define ENT_INT_COAL_TIME 0x1a8 65 #define ENT_INT_COAL_CNT 0x1ac 66 #define OQ_INT_SRC 0x1b0 67 #define OQ_INT_SRC_MSK 0x1b4 68 #define ENT_INT_SRC1 0x1b8 69 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0 70 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF) 71 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8 72 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF) 73 #define ENT_INT_SRC2 0x1bc 74 #define ENT_INT_SRC3 0x1c0 75 #define ENT_INT_SRC3_WP_DEPTH_OFF 8 76 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9 77 #define ENT_INT_SRC3_RP_DEPTH_OFF 10 78 #define ENT_INT_SRC3_AXI_OFF 11 79 #define ENT_INT_SRC3_FIFO_OFF 12 80 #define ENT_INT_SRC3_LM_OFF 14 81 #define ENT_INT_SRC3_ITC_INT_OFF 15 82 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF) 83 #define ENT_INT_SRC3_ABT_OFF 16 84 #define ENT_INT_SRC_MSK1 0x1c4 85 #define ENT_INT_SRC_MSK2 0x1c8 86 #define ENT_INT_SRC_MSK3 0x1cc 87 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31 88 #define CHNL_PHYUPDOWN_INT_MSK 0x1d0 89 #define CHNL_ENT_INT_MSK 0x1d4 90 #define HGC_COM_INT_MSK 0x1d8 91 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF) 92 #define SAS_ECC_INTR 0x1e8 93 #define SAS_ECC_INTR_MSK 0x1ec 94 #define HGC_ERR_STAT_EN 0x238 95 #define DLVRY_Q_0_BASE_ADDR_LO 0x260 96 #define DLVRY_Q_0_BASE_ADDR_HI 0x264 97 #define DLVRY_Q_0_DEPTH 0x268 98 #define DLVRY_Q_0_WR_PTR 0x26c 99 #define DLVRY_Q_0_RD_PTR 0x270 100 #define HYPER_STREAM_ID_EN_CFG 0xc80 101 #define OQ0_INT_SRC_MSK 0xc90 102 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0 103 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4 104 #define COMPL_Q_0_DEPTH 0x4e8 105 #define COMPL_Q_0_WR_PTR 0x4ec 106 #define COMPL_Q_0_RD_PTR 0x4f0 107 #define AWQOS_AWCACHE_CFG 0xc84 108 #define ARQOS_ARCACHE_CFG 0xc88 109 110 /* phy registers requiring init */ 111 #define PORT_BASE (0x2000) 112 #define PHY_CFG (PORT_BASE + 0x0) 113 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4) 114 #define PHY_CFG_ENA_OFF 0 115 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) 116 #define PHY_CFG_DC_OPT_OFF 2 117 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) 118 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) 119 #define PHY_CTRL (PORT_BASE + 0x14) 120 #define PHY_CTRL_RESET_OFF 0 121 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) 122 #define SL_CFG (PORT_BASE + 0x84) 123 #define SL_CONTROL (PORT_BASE + 0x94) 124 #define SL_CONTROL_NOTIFY_EN_OFF 0 125 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) 126 #define SL_CTA_OFF 17 127 #define SL_CTA_MSK (0x1 << SL_CTA_OFF) 128 #define TX_ID_DWORD0 (PORT_BASE + 0x9c) 129 #define TX_ID_DWORD1 (PORT_BASE + 0xa0) 130 #define TX_ID_DWORD2 (PORT_BASE + 0xa4) 131 #define TX_ID_DWORD3 (PORT_BASE + 0xa8) 132 #define TX_ID_DWORD4 (PORT_BASE + 0xaC) 133 #define TX_ID_DWORD5 (PORT_BASE + 0xb0) 134 #define TX_ID_DWORD6 (PORT_BASE + 0xb4) 135 #define TXID_AUTO (PORT_BASE + 0xb8) 136 #define CT3_OFF 1 137 #define CT3_MSK (0x1 << CT3_OFF) 138 #define TX_HARDRST_OFF 2 139 #define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) 140 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) 141 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) 142 #define STP_LINK_TIMER (PORT_BASE + 0x120) 143 #define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124) 144 #define CON_CFG_DRIVER (PORT_BASE + 0x130) 145 #define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) 146 #define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) 147 #define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c) 148 #define CHL_INT0 (PORT_BASE + 0x1b4) 149 #define CHL_INT0_HOTPLUG_TOUT_OFF 0 150 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF) 151 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1 152 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF) 153 #define CHL_INT0_SL_PHY_ENABLE_OFF 2 154 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF) 155 #define CHL_INT0_NOT_RDY_OFF 4 156 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF) 157 #define CHL_INT0_PHY_RDY_OFF 5 158 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF) 159 #define CHL_INT1 (PORT_BASE + 0x1b8) 160 #define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15 161 #define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) 162 #define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 163 #define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) 164 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19 165 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20 166 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21 167 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 168 #define CHL_INT2 (PORT_BASE + 0x1bc) 169 #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 170 #define CHL_INT2_STP_LINK_TIMEOUT_OFF 31 171 #define CHL_INT0_MSK (PORT_BASE + 0x1c0) 172 #define CHL_INT1_MSK (PORT_BASE + 0x1c4) 173 #define CHL_INT2_MSK (PORT_BASE + 0x1c8) 174 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) 175 #define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4) 176 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) 177 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) 178 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) 179 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc) 180 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0) 181 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4) 182 #define DMA_TX_STATUS (PORT_BASE + 0x2d0) 183 #define DMA_TX_STATUS_BUSY_OFF 0 184 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF) 185 #define DMA_RX_STATUS (PORT_BASE + 0x2e8) 186 #define DMA_RX_STATUS_BUSY_OFF 0 187 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) 188 189 #define COARSETUNE_TIME (PORT_BASE + 0x304) 190 #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380) 191 #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384) 192 #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390) 193 #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398) 194 195 #define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */ 196 #if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW) 197 #error Max ITCT exceeded 198 #endif 199 200 #define AXI_MASTER_CFG_BASE (0x5000) 201 #define AM_CTRL_GLOBAL (0x0) 202 #define AM_CURR_TRANS_RETURN (0x150) 203 204 #define AM_CFG_MAX_TRANS (0x5010) 205 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) 206 #define AXI_CFG (0x5100) 207 #define AM_ROB_ECC_ERR_ADDR (0x510c) 208 #define AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF 0 209 #define AM_ROB_ECC_ONEBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF) 210 #define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8 211 #define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF) 212 213 /* RAS registers need init */ 214 #define RAS_BASE (0x6000) 215 #define SAS_RAS_INTR0 (RAS_BASE) 216 #define SAS_RAS_INTR1 (RAS_BASE + 0x04) 217 #define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08) 218 #define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c) 219 220 /* HW dma structures */ 221 /* Delivery queue header */ 222 /* dw0 */ 223 #define CMD_HDR_ABORT_FLAG_OFF 0 224 #define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF) 225 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2 226 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) 227 #define CMD_HDR_RESP_REPORT_OFF 5 228 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) 229 #define CMD_HDR_TLR_CTRL_OFF 6 230 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) 231 #define CMD_HDR_PORT_OFF 18 232 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) 233 #define CMD_HDR_PRIORITY_OFF 27 234 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) 235 #define CMD_HDR_CMD_OFF 29 236 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) 237 /* dw1 */ 238 #define CMD_HDR_UNCON_CMD_OFF 3 239 #define CMD_HDR_DIR_OFF 5 240 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) 241 #define CMD_HDR_RESET_OFF 7 242 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF) 243 #define CMD_HDR_VDTL_OFF 10 244 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) 245 #define CMD_HDR_FRAME_TYPE_OFF 11 246 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) 247 #define CMD_HDR_DEV_ID_OFF 16 248 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) 249 /* dw2 */ 250 #define CMD_HDR_CFL_OFF 0 251 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) 252 #define CMD_HDR_NCQ_TAG_OFF 10 253 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF) 254 #define CMD_HDR_MRFL_OFF 15 255 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) 256 #define CMD_HDR_SG_MOD_OFF 24 257 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) 258 /* dw3 */ 259 #define CMD_HDR_IPTT_OFF 0 260 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF) 261 /* dw6 */ 262 #define CMD_HDR_DIF_SGL_LEN_OFF 0 263 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) 264 #define CMD_HDR_DATA_SGL_LEN_OFF 16 265 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) 266 /* dw7 */ 267 #define CMD_HDR_ADDR_MODE_SEL_OFF 15 268 #define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF) 269 #define CMD_HDR_ABORT_IPTT_OFF 16 270 #define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF) 271 272 /* Completion header */ 273 /* dw0 */ 274 #define CMPLT_HDR_CMPLT_OFF 0 275 #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF) 276 #define CMPLT_HDR_ERROR_PHASE_OFF 2 277 #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF) 278 #define CMPLT_HDR_RSPNS_XFRD_OFF 10 279 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) 280 #define CMPLT_HDR_ERX_OFF 12 281 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) 282 #define CMPLT_HDR_ABORT_STAT_OFF 13 283 #define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF) 284 /* abort_stat */ 285 #define STAT_IO_NOT_VALID 0x1 286 #define STAT_IO_NO_DEVICE 0x2 287 #define STAT_IO_COMPLETE 0x3 288 #define STAT_IO_ABORTED 0x4 289 /* dw1 */ 290 #define CMPLT_HDR_IPTT_OFF 0 291 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) 292 #define CMPLT_HDR_DEV_ID_OFF 16 293 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) 294 /* dw3 */ 295 #define CMPLT_HDR_IO_IN_TARGET_OFF 17 296 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF) 297 298 /* ITCT header */ 299 /* qw0 */ 300 #define ITCT_HDR_DEV_TYPE_OFF 0 301 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF) 302 #define ITCT_HDR_VALID_OFF 2 303 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF) 304 #define ITCT_HDR_MCR_OFF 5 305 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) 306 #define ITCT_HDR_VLN_OFF 9 307 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) 308 #define ITCT_HDR_SMP_TIMEOUT_OFF 16 309 #define ITCT_HDR_AWT_CONTINUE_OFF 25 310 #define ITCT_HDR_PORT_ID_OFF 28 311 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) 312 /* qw2 */ 313 #define ITCT_HDR_INLT_OFF 0 314 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF) 315 #define ITCT_HDR_RTOLT_OFF 48 316 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) 317 318 struct hisi_sas_complete_v3_hdr { 319 __le32 dw0; 320 __le32 dw1; 321 __le32 act; 322 __le32 dw3; 323 }; 324 325 struct hisi_sas_err_record_v3 { 326 /* dw0 */ 327 __le32 trans_tx_fail_type; 328 329 /* dw1 */ 330 __le32 trans_rx_fail_type; 331 332 /* dw2 */ 333 __le16 dma_tx_err_type; 334 __le16 sipc_rx_err_type; 335 336 /* dw3 */ 337 __le32 dma_rx_err_type; 338 }; 339 340 #define RX_DATA_LEN_UNDERFLOW_OFF 6 341 #define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF) 342 343 #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096 344 #define HISI_SAS_MSI_COUNT_V3_HW 32 345 346 #define DIR_NO_DATA 0 347 #define DIR_TO_INI 1 348 #define DIR_TO_DEVICE 2 349 #define DIR_RESERVED 3 350 351 #define CMD_IS_UNCONSTRAINT(cmd) \ 352 ((cmd == ATA_CMD_READ_LOG_EXT) || \ 353 (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \ 354 (cmd == ATA_CMD_DEV_RESET)) 355 356 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) 357 { 358 void __iomem *regs = hisi_hba->regs + off; 359 360 return readl(regs); 361 } 362 363 static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) 364 { 365 void __iomem *regs = hisi_hba->regs + off; 366 367 return readl_relaxed(regs); 368 } 369 370 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) 371 { 372 void __iomem *regs = hisi_hba->regs + off; 373 374 writel(val, regs); 375 } 376 377 static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no, 378 u32 off, u32 val) 379 { 380 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 381 382 writel(val, regs); 383 } 384 385 static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, 386 int phy_no, u32 off) 387 { 388 void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off; 389 390 return readl(regs); 391 } 392 393 static void init_reg_v3_hw(struct hisi_hba *hisi_hba) 394 { 395 int i; 396 397 /* Global registers init */ 398 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 399 (u32)((1ULL << hisi_hba->queue_count) - 1)); 400 hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); 401 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); 402 hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd); 403 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); 404 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); 405 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); 406 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); 407 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); 408 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); 409 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); 410 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); 411 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); 412 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff); 413 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); 414 hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); 415 hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); 416 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x0); 417 hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0); 418 hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0); 419 for (i = 0; i < hisi_hba->queue_count; i++) 420 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0); 421 422 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); 423 424 for (i = 0; i < hisi_hba->n_phy; i++) { 425 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855); 426 hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80); 427 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); 428 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 429 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); 430 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 431 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff); 432 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe); 433 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); 434 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); 435 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); 436 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); 437 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); 438 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); 439 hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); 440 441 /* used for 12G negotiate */ 442 hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); 443 } 444 445 for (i = 0; i < hisi_hba->queue_count; i++) { 446 /* Delivery queue */ 447 hisi_sas_write32(hisi_hba, 448 DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14), 449 upper_32_bits(hisi_hba->cmd_hdr_dma[i])); 450 451 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14), 452 lower_32_bits(hisi_hba->cmd_hdr_dma[i])); 453 454 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14), 455 HISI_SAS_QUEUE_SLOTS); 456 457 /* Completion queue */ 458 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14), 459 upper_32_bits(hisi_hba->complete_hdr_dma[i])); 460 461 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14), 462 lower_32_bits(hisi_hba->complete_hdr_dma[i])); 463 464 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14), 465 HISI_SAS_QUEUE_SLOTS); 466 } 467 468 /* itct */ 469 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO, 470 lower_32_bits(hisi_hba->itct_dma)); 471 472 hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI, 473 upper_32_bits(hisi_hba->itct_dma)); 474 475 /* iost */ 476 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO, 477 lower_32_bits(hisi_hba->iost_dma)); 478 479 hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI, 480 upper_32_bits(hisi_hba->iost_dma)); 481 482 /* breakpoint */ 483 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO, 484 lower_32_bits(hisi_hba->breakpoint_dma)); 485 486 hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI, 487 upper_32_bits(hisi_hba->breakpoint_dma)); 488 489 /* SATA broken msg */ 490 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO, 491 lower_32_bits(hisi_hba->sata_breakpoint_dma)); 492 493 hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI, 494 upper_32_bits(hisi_hba->sata_breakpoint_dma)); 495 496 /* SATA initial fis */ 497 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO, 498 lower_32_bits(hisi_hba->initial_fis_dma)); 499 500 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, 501 upper_32_bits(hisi_hba->initial_fis_dma)); 502 503 /* RAS registers init */ 504 hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0); 505 hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0); 506 } 507 508 static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 509 { 510 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 511 512 cfg &= ~PHY_CFG_DC_OPT_MSK; 513 cfg |= 1 << PHY_CFG_DC_OPT_OFF; 514 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 515 } 516 517 static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 518 { 519 struct sas_identify_frame identify_frame; 520 u32 *identify_buffer; 521 522 memset(&identify_frame, 0, sizeof(identify_frame)); 523 identify_frame.dev_type = SAS_END_DEVICE; 524 identify_frame.frame_type = 0; 525 identify_frame._un1 = 1; 526 identify_frame.initiator_bits = SAS_PROTOCOL_ALL; 527 identify_frame.target_bits = SAS_PROTOCOL_NONE; 528 memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 529 memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE); 530 identify_frame.phy_id = phy_no; 531 identify_buffer = (u32 *)(&identify_frame); 532 533 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, 534 __swab32(identify_buffer[0])); 535 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, 536 __swab32(identify_buffer[1])); 537 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, 538 __swab32(identify_buffer[2])); 539 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, 540 __swab32(identify_buffer[3])); 541 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, 542 __swab32(identify_buffer[4])); 543 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, 544 __swab32(identify_buffer[5])); 545 } 546 547 static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, 548 struct hisi_sas_device *sas_dev) 549 { 550 struct domain_device *device = sas_dev->sas_device; 551 struct device *dev = hisi_hba->dev; 552 u64 qw0, device_id = sas_dev->device_id; 553 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; 554 struct domain_device *parent_dev = device->parent; 555 struct asd_sas_port *sas_port = device->port; 556 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 557 558 memset(itct, 0, sizeof(*itct)); 559 560 /* qw0 */ 561 qw0 = 0; 562 switch (sas_dev->dev_type) { 563 case SAS_END_DEVICE: 564 case SAS_EDGE_EXPANDER_DEVICE: 565 case SAS_FANOUT_EXPANDER_DEVICE: 566 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; 567 break; 568 case SAS_SATA_DEV: 569 case SAS_SATA_PENDING: 570 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 571 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; 572 else 573 qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF; 574 break; 575 default: 576 dev_warn(dev, "setup itct: unsupported dev type (%d)\n", 577 sas_dev->dev_type); 578 } 579 580 qw0 |= ((1 << ITCT_HDR_VALID_OFF) | 581 (device->linkrate << ITCT_HDR_MCR_OFF) | 582 (1 << ITCT_HDR_VLN_OFF) | 583 (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) | 584 (1 << ITCT_HDR_AWT_CONTINUE_OFF) | 585 (port->id << ITCT_HDR_PORT_ID_OFF)); 586 itct->qw0 = cpu_to_le64(qw0); 587 588 /* qw1 */ 589 memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE); 590 itct->sas_addr = __swab64(itct->sas_addr); 591 592 /* qw2 */ 593 if (!dev_is_sata(device)) 594 itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) | 595 (0x1ULL << ITCT_HDR_RTOLT_OFF)); 596 } 597 598 static void clear_itct_v3_hw(struct hisi_hba *hisi_hba, 599 struct hisi_sas_device *sas_dev) 600 { 601 DECLARE_COMPLETION_ONSTACK(completion); 602 u64 dev_id = sas_dev->device_id; 603 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; 604 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 605 606 sas_dev->completion = &completion; 607 608 /* clear the itct interrupt state */ 609 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) 610 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 611 ENT_INT_SRC3_ITC_INT_MSK); 612 613 /* clear the itct table*/ 614 reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); 615 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); 616 617 wait_for_completion(sas_dev->completion); 618 memset(itct, 0, sizeof(struct hisi_sas_itct)); 619 } 620 621 static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, 622 struct domain_device *device) 623 { 624 struct hisi_sas_slot *slot, *slot2; 625 struct hisi_sas_device *sas_dev = device->lldd_dev; 626 u32 cfg_abt_set_query_iptt; 627 628 cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba, 629 CFG_ABT_SET_QUERY_IPTT); 630 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) { 631 cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK; 632 cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) | 633 (slot->idx << CFG_SET_ABORTED_IPTT_OFF); 634 hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, 635 cfg_abt_set_query_iptt); 636 } 637 cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF); 638 hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, 639 cfg_abt_set_query_iptt); 640 hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE, 641 1 << CFG_ABT_SET_IPTT_DONE_OFF); 642 } 643 644 static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) 645 { 646 struct device *dev = hisi_hba->dev; 647 int ret; 648 u32 val; 649 650 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 651 652 /* Disable all of the PHYs */ 653 hisi_sas_stop_phys(hisi_hba); 654 udelay(50); 655 656 /* Ensure axi bus idle */ 657 ret = readl_poll_timeout(hisi_hba->regs + AXI_CFG, val, !val, 658 20000, 1000000); 659 if (ret) { 660 dev_err(dev, "axi bus is not idle, ret = %d!\n", ret); 661 return -EIO; 662 } 663 664 if (ACPI_HANDLE(dev)) { 665 acpi_status s; 666 667 s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); 668 if (ACPI_FAILURE(s)) { 669 dev_err(dev, "Reset failed\n"); 670 return -EIO; 671 } 672 } else { 673 dev_err(dev, "no reset method!\n"); 674 return -EINVAL; 675 } 676 677 return 0; 678 } 679 680 static int hw_init_v3_hw(struct hisi_hba *hisi_hba) 681 { 682 struct device *dev = hisi_hba->dev; 683 int rc; 684 685 rc = reset_hw_v3_hw(hisi_hba); 686 if (rc) { 687 dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc); 688 return rc; 689 } 690 691 msleep(100); 692 init_reg_v3_hw(hisi_hba); 693 694 return 0; 695 } 696 697 static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 698 { 699 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 700 701 cfg |= PHY_CFG_ENA_MSK; 702 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 703 } 704 705 static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 706 { 707 u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); 708 709 cfg &= ~PHY_CFG_ENA_MSK; 710 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); 711 } 712 713 static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 714 { 715 config_id_frame_v3_hw(hisi_hba, phy_no); 716 config_phy_opt_mode_v3_hw(hisi_hba, phy_no); 717 enable_phy_v3_hw(hisi_hba, phy_no); 718 } 719 720 static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 721 { 722 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 723 u32 txid_auto; 724 725 disable_phy_v3_hw(hisi_hba, phy_no); 726 if (phy->identify.device_type == SAS_END_DEVICE) { 727 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 728 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 729 txid_auto | TX_HARDRST_MSK); 730 } 731 msleep(100); 732 start_phy_v3_hw(hisi_hba, phy_no); 733 } 734 735 static enum sas_linkrate phy_get_max_linkrate_v3_hw(void) 736 { 737 return SAS_LINK_RATE_12_0_GBPS; 738 } 739 740 static void phys_init_v3_hw(struct hisi_hba *hisi_hba) 741 { 742 int i; 743 744 for (i = 0; i < hisi_hba->n_phy; i++) { 745 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 746 struct asd_sas_phy *sas_phy = &phy->sas_phy; 747 748 if (!sas_phy->phy->enabled) 749 continue; 750 751 start_phy_v3_hw(hisi_hba, i); 752 } 753 } 754 755 static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 756 { 757 u32 sl_control; 758 759 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 760 sl_control |= SL_CONTROL_NOTIFY_EN_MSK; 761 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 762 msleep(1); 763 sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 764 sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK; 765 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control); 766 } 767 768 static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) 769 { 770 int i, bitmap = 0; 771 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 772 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 773 774 for (i = 0; i < hisi_hba->n_phy; i++) 775 if (phy_state & BIT(i)) 776 if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) 777 bitmap |= BIT(i); 778 779 return bitmap; 780 } 781 782 /** 783 * The callpath to this function and upto writing the write 784 * queue pointer should be safe from interruption. 785 */ 786 static int 787 get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) 788 { 789 struct device *dev = hisi_hba->dev; 790 int queue = dq->id; 791 u32 r, w; 792 793 w = dq->wr_point; 794 r = hisi_sas_read32_relaxed(hisi_hba, 795 DLVRY_Q_0_RD_PTR + (queue * 0x14)); 796 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { 797 dev_warn(dev, "full queue=%d r=%d w=%d\n\n", 798 queue, r, w); 799 return -EAGAIN; 800 } 801 802 return 0; 803 } 804 805 static void start_delivery_v3_hw(struct hisi_sas_dq *dq) 806 { 807 struct hisi_hba *hisi_hba = dq->hisi_hba; 808 int dlvry_queue = dq->slot_prep->dlvry_queue; 809 int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot; 810 811 dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; 812 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), 813 dq->wr_point); 814 } 815 816 static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, 817 struct hisi_sas_slot *slot, 818 struct hisi_sas_cmd_hdr *hdr, 819 struct scatterlist *scatter, 820 int n_elem) 821 { 822 struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); 823 struct device *dev = hisi_hba->dev; 824 struct scatterlist *sg; 825 int i; 826 827 if (n_elem > HISI_SAS_SGE_PAGE_CNT) { 828 dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", 829 n_elem); 830 return -EINVAL; 831 } 832 833 for_each_sg(scatter, sg, n_elem, i) { 834 struct hisi_sas_sge *entry = &sge_page->sge[i]; 835 836 entry->addr = cpu_to_le64(sg_dma_address(sg)); 837 entry->page_ctrl_0 = entry->page_ctrl_1 = 0; 838 entry->data_len = cpu_to_le32(sg_dma_len(sg)); 839 entry->data_off = 0; 840 } 841 842 hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); 843 844 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); 845 846 return 0; 847 } 848 849 static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba, 850 struct hisi_sas_slot *slot, int is_tmf, 851 struct hisi_sas_tmf_task *tmf) 852 { 853 struct sas_task *task = slot->task; 854 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 855 struct domain_device *device = task->dev; 856 struct hisi_sas_device *sas_dev = device->lldd_dev; 857 struct hisi_sas_port *port = slot->port; 858 struct sas_ssp_task *ssp_task = &task->ssp_task; 859 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 860 int has_data = 0, rc, priority = is_tmf; 861 u8 *buf_cmd; 862 u32 dw1 = 0, dw2 = 0; 863 864 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | 865 (2 << CMD_HDR_TLR_CTRL_OFF) | 866 (port->id << CMD_HDR_PORT_OFF) | 867 (priority << CMD_HDR_PRIORITY_OFF) | 868 (1 << CMD_HDR_CMD_OFF)); /* ssp */ 869 870 dw1 = 1 << CMD_HDR_VDTL_OFF; 871 if (is_tmf) { 872 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; 873 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; 874 } else { 875 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; 876 switch (scsi_cmnd->sc_data_direction) { 877 case DMA_TO_DEVICE: 878 has_data = 1; 879 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 880 break; 881 case DMA_FROM_DEVICE: 882 has_data = 1; 883 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 884 break; 885 default: 886 dw1 &= ~CMD_HDR_DIR_MSK; 887 } 888 } 889 890 /* map itct entry */ 891 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 892 hdr->dw1 = cpu_to_le32(dw1); 893 894 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) 895 + 3) / 4) << CMD_HDR_CFL_OFF) | 896 ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | 897 (2 << CMD_HDR_SG_MOD_OFF); 898 hdr->dw2 = cpu_to_le32(dw2); 899 hdr->transfer_tags = cpu_to_le32(slot->idx); 900 901 if (has_data) { 902 rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, 903 slot->n_elem); 904 if (rc) 905 return rc; 906 } 907 908 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 909 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 910 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 911 912 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + 913 sizeof(struct ssp_frame_hdr); 914 915 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 916 if (!is_tmf) { 917 buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3); 918 memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 919 } else { 920 buf_cmd[10] = tmf->tmf; 921 switch (tmf->tmf) { 922 case TMF_ABORT_TASK: 923 case TMF_QUERY_TASK: 924 buf_cmd[12] = 925 (tmf->tag_of_task_to_be_managed >> 8) & 0xff; 926 buf_cmd[13] = 927 tmf->tag_of_task_to_be_managed & 0xff; 928 break; 929 default: 930 break; 931 } 932 } 933 934 return 0; 935 } 936 937 static int prep_smp_v3_hw(struct hisi_hba *hisi_hba, 938 struct hisi_sas_slot *slot) 939 { 940 struct sas_task *task = slot->task; 941 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 942 struct domain_device *device = task->dev; 943 struct device *dev = hisi_hba->dev; 944 struct hisi_sas_port *port = slot->port; 945 struct scatterlist *sg_req, *sg_resp; 946 struct hisi_sas_device *sas_dev = device->lldd_dev; 947 dma_addr_t req_dma_addr; 948 unsigned int req_len, resp_len; 949 int elem, rc; 950 951 /* 952 * DMA-map SMP request, response buffers 953 */ 954 /* req */ 955 sg_req = &task->smp_task.smp_req; 956 elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE); 957 if (!elem) 958 return -ENOMEM; 959 req_len = sg_dma_len(sg_req); 960 req_dma_addr = sg_dma_address(sg_req); 961 962 /* resp */ 963 sg_resp = &task->smp_task.smp_resp; 964 elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE); 965 if (!elem) { 966 rc = -ENOMEM; 967 goto err_out_req; 968 } 969 resp_len = sg_dma_len(sg_resp); 970 if ((req_len & 0x3) || (resp_len & 0x3)) { 971 rc = -EINVAL; 972 goto err_out_resp; 973 } 974 975 /* create header */ 976 /* dw0 */ 977 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | 978 (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ 979 (2 << CMD_HDR_CMD_OFF)); /* smp */ 980 981 /* map itct entry */ 982 hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | 983 (1 << CMD_HDR_FRAME_TYPE_OFF) | 984 (DIR_NO_DATA << CMD_HDR_DIR_OFF)); 985 986 /* dw2 */ 987 hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | 988 (HISI_SAS_MAX_SMP_RESP_SZ / 4 << 989 CMD_HDR_MRFL_OFF)); 990 991 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); 992 993 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); 994 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 995 996 return 0; 997 998 err_out_resp: 999 dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1, 1000 DMA_FROM_DEVICE); 1001 err_out_req: 1002 dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1, 1003 DMA_TO_DEVICE); 1004 return rc; 1005 } 1006 1007 static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, 1008 struct hisi_sas_slot *slot) 1009 { 1010 struct sas_task *task = slot->task; 1011 struct domain_device *device = task->dev; 1012 struct domain_device *parent_dev = device->parent; 1013 struct hisi_sas_device *sas_dev = device->lldd_dev; 1014 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1015 struct asd_sas_port *sas_port = device->port; 1016 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 1017 u8 *buf_cmd; 1018 int has_data = 0, rc = 0, hdr_tag = 0; 1019 u32 dw1 = 0, dw2 = 0; 1020 1021 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); 1022 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 1023 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); 1024 else 1025 hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF); 1026 1027 switch (task->data_dir) { 1028 case DMA_TO_DEVICE: 1029 has_data = 1; 1030 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; 1031 break; 1032 case DMA_FROM_DEVICE: 1033 has_data = 1; 1034 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; 1035 break; 1036 default: 1037 dw1 &= ~CMD_HDR_DIR_MSK; 1038 } 1039 1040 if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && 1041 (task->ata_task.fis.control & ATA_SRST)) 1042 dw1 |= 1 << CMD_HDR_RESET_OFF; 1043 1044 dw1 |= (hisi_sas_get_ata_protocol( 1045 &task->ata_task.fis, task->data_dir)) 1046 << CMD_HDR_FRAME_TYPE_OFF; 1047 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1048 1049 if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command)) 1050 dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; 1051 1052 hdr->dw1 = cpu_to_le32(dw1); 1053 1054 /* dw2 */ 1055 if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) { 1056 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 1057 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; 1058 } 1059 1060 dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | 1061 2 << CMD_HDR_SG_MOD_OFF; 1062 hdr->dw2 = cpu_to_le32(dw2); 1063 1064 /* dw3 */ 1065 hdr->transfer_tags = cpu_to_le32(slot->idx); 1066 1067 if (has_data) { 1068 rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, 1069 slot->n_elem); 1070 if (rc) 1071 return rc; 1072 } 1073 1074 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1075 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1076 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1077 1078 buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot); 1079 1080 if (likely(!task->ata_task.device_control_reg_update)) 1081 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 1082 /* fill in command FIS */ 1083 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 1084 1085 return 0; 1086 } 1087 1088 static int prep_abort_v3_hw(struct hisi_hba *hisi_hba, 1089 struct hisi_sas_slot *slot, 1090 int device_id, int abort_flag, int tag_to_abort) 1091 { 1092 struct sas_task *task = slot->task; 1093 struct domain_device *dev = task->dev; 1094 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1095 struct hisi_sas_port *port = slot->port; 1096 1097 /* dw0 */ 1098 hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/ 1099 (port->id << CMD_HDR_PORT_OFF) | 1100 (dev_is_sata(dev) 1101 << CMD_HDR_ABORT_DEVICE_TYPE_OFF) | 1102 (abort_flag 1103 << CMD_HDR_ABORT_FLAG_OFF)); 1104 1105 /* dw1 */ 1106 hdr->dw1 = cpu_to_le32(device_id 1107 << CMD_HDR_DEV_ID_OFF); 1108 1109 /* dw7 */ 1110 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF); 1111 hdr->transfer_tags = cpu_to_le32(slot->idx); 1112 1113 return 0; 1114 } 1115 1116 static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1117 { 1118 int i, res; 1119 u32 context, port_id, link_rate; 1120 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1121 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1122 struct device *dev = hisi_hba->dev; 1123 1124 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); 1125 1126 port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 1127 port_id = (port_id >> (4 * phy_no)) & 0xf; 1128 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); 1129 link_rate = (link_rate >> (phy_no * 4)) & 0xf; 1130 1131 if (port_id == 0xf) { 1132 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); 1133 res = IRQ_NONE; 1134 goto end; 1135 } 1136 sas_phy->linkrate = link_rate; 1137 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 1138 1139 /* Check for SATA dev */ 1140 context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); 1141 if (context & (1 << phy_no)) { 1142 struct hisi_sas_initial_fis *initial_fis; 1143 struct dev_to_host_fis *fis; 1144 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 1145 1146 dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate); 1147 initial_fis = &hisi_hba->initial_fis[phy_no]; 1148 fis = &initial_fis->fis; 1149 sas_phy->oob_mode = SATA_OOB_MODE; 1150 attached_sas_addr[0] = 0x50; 1151 attached_sas_addr[7] = phy_no; 1152 memcpy(sas_phy->attached_sas_addr, 1153 attached_sas_addr, 1154 SAS_ADDR_SIZE); 1155 memcpy(sas_phy->frame_rcvd, fis, 1156 sizeof(struct dev_to_host_fis)); 1157 phy->phy_type |= PORT_TYPE_SATA; 1158 phy->identify.device_type = SAS_SATA_DEV; 1159 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 1160 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 1161 } else { 1162 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; 1163 struct sas_identify_frame *id = 1164 (struct sas_identify_frame *)frame_rcvd; 1165 1166 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); 1167 for (i = 0; i < 6; i++) { 1168 u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, 1169 RX_IDAF_DWORD0 + (i * 4)); 1170 frame_rcvd[i] = __swab32(idaf); 1171 } 1172 sas_phy->oob_mode = SAS_OOB_MODE; 1173 memcpy(sas_phy->attached_sas_addr, 1174 &id->sas_addr, 1175 SAS_ADDR_SIZE); 1176 phy->phy_type |= PORT_TYPE_SAS; 1177 phy->identify.device_type = id->dev_type; 1178 phy->frame_rcvd_size = sizeof(struct sas_identify_frame); 1179 if (phy->identify.device_type == SAS_END_DEVICE) 1180 phy->identify.target_port_protocols = 1181 SAS_PROTOCOL_SSP; 1182 else if (phy->identify.device_type != SAS_PHY_UNUSED) 1183 phy->identify.target_port_protocols = 1184 SAS_PROTOCOL_SMP; 1185 } 1186 1187 phy->port_id = port_id; 1188 phy->phy_attached = 1; 1189 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); 1190 res = IRQ_HANDLED; 1191 end: 1192 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1193 CHL_INT0_SL_PHY_ENABLE_MSK); 1194 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); 1195 1196 return res; 1197 } 1198 1199 static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1200 { 1201 u32 phy_state, sl_ctrl, txid_auto; 1202 struct device *dev = hisi_hba->dev; 1203 1204 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 1205 1206 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1207 dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state); 1208 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); 1209 1210 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 1211 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, 1212 sl_ctrl&(~SL_CTA_MSK)); 1213 1214 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 1215 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 1216 txid_auto | CT3_MSK); 1217 1218 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); 1219 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); 1220 1221 return IRQ_HANDLED; 1222 } 1223 1224 static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1225 { 1226 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1227 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1228 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1229 1230 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); 1231 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); 1232 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1233 CHL_INT0_SL_RX_BCST_ACK_MSK); 1234 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); 1235 1236 return IRQ_HANDLED; 1237 } 1238 1239 static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) 1240 { 1241 struct hisi_hba *hisi_hba = p; 1242 u32 irq_msk; 1243 int phy_no = 0; 1244 irqreturn_t res = IRQ_NONE; 1245 1246 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) 1247 & 0x11111111; 1248 while (irq_msk) { 1249 if (irq_msk & 1) { 1250 u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, 1251 CHL_INT0); 1252 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1253 int rdy = phy_state & (1 << phy_no); 1254 1255 if (rdy) { 1256 if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK) 1257 /* phy up */ 1258 if (phy_up_v3_hw(phy_no, hisi_hba) 1259 == IRQ_HANDLED) 1260 res = IRQ_HANDLED; 1261 if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK) 1262 /* phy bcast */ 1263 if (phy_bcast_v3_hw(phy_no, hisi_hba) 1264 == IRQ_HANDLED) 1265 res = IRQ_HANDLED; 1266 } else { 1267 if (irq_value & CHL_INT0_NOT_RDY_MSK) 1268 /* phy down */ 1269 if (phy_down_v3_hw(phy_no, hisi_hba) 1270 == IRQ_HANDLED) 1271 res = IRQ_HANDLED; 1272 } 1273 } 1274 irq_msk >>= 4; 1275 phy_no++; 1276 } 1277 1278 return res; 1279 } 1280 1281 static const struct hisi_sas_hw_error port_axi_error[] = { 1282 { 1283 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF), 1284 .msg = "dma_tx_axi_wr_err", 1285 }, 1286 { 1287 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF), 1288 .msg = "dma_tx_axi_rd_err", 1289 }, 1290 { 1291 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF), 1292 .msg = "dma_rx_axi_wr_err", 1293 }, 1294 { 1295 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF), 1296 .msg = "dma_rx_axi_rd_err", 1297 }, 1298 }; 1299 1300 static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) 1301 { 1302 struct hisi_hba *hisi_hba = p; 1303 struct device *dev = hisi_hba->dev; 1304 u32 ent_msk, ent_tmp, irq_msk; 1305 int phy_no = 0; 1306 1307 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 1308 ent_tmp = ent_msk; 1309 ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK; 1310 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk); 1311 1312 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) 1313 & 0xeeeeeeee; 1314 1315 while (irq_msk) { 1316 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, 1317 CHL_INT0); 1318 u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, 1319 CHL_INT1); 1320 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, 1321 CHL_INT2); 1322 1323 if ((irq_msk & (4 << (phy_no * 4))) && 1324 irq_value1) { 1325 int i; 1326 1327 for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) { 1328 const struct hisi_sas_hw_error *error = 1329 &port_axi_error[i]; 1330 1331 if (!(irq_value1 & error->irq_msk)) 1332 continue; 1333 1334 dev_err(dev, "%s error (phy%d 0x%x) found!\n", 1335 error->msg, phy_no, irq_value1); 1336 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1337 } 1338 1339 hisi_sas_phy_write32(hisi_hba, phy_no, 1340 CHL_INT1, irq_value1); 1341 } 1342 1343 if (irq_msk & (8 << (phy_no * 4)) && irq_value2) { 1344 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1345 1346 if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) { 1347 dev_warn(dev, "phy%d identify timeout\n", 1348 phy_no); 1349 hisi_sas_notify_phy_event(phy, 1350 HISI_PHYE_LINK_RESET); 1351 1352 } 1353 1354 if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) { 1355 u32 reg_value = hisi_sas_phy_read32(hisi_hba, 1356 phy_no, STP_LINK_TIMEOUT_STATE); 1357 1358 dev_warn(dev, "phy%d stp link timeout (0x%x)\n", 1359 phy_no, reg_value); 1360 if (reg_value & BIT(4)) 1361 hisi_sas_notify_phy_event(phy, 1362 HISI_PHYE_LINK_RESET); 1363 } 1364 1365 hisi_sas_phy_write32(hisi_hba, phy_no, 1366 CHL_INT2, irq_value2); 1367 } 1368 1369 1370 if (irq_msk & (2 << (phy_no * 4)) && irq_value0) { 1371 hisi_sas_phy_write32(hisi_hba, phy_no, 1372 CHL_INT0, irq_value0 1373 & (~CHL_INT0_SL_RX_BCST_ACK_MSK) 1374 & (~CHL_INT0_SL_PHY_ENABLE_MSK) 1375 & (~CHL_INT0_NOT_RDY_MSK)); 1376 } 1377 irq_msk &= ~(0xe << (phy_no * 4)); 1378 phy_no++; 1379 } 1380 1381 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp); 1382 1383 return IRQ_HANDLED; 1384 } 1385 1386 static const struct hisi_sas_hw_error axi_error[] = { 1387 { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" }, 1388 { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" }, 1389 { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" }, 1390 { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" }, 1391 { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" }, 1392 { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" }, 1393 { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" }, 1394 { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" }, 1395 {}, 1396 }; 1397 1398 static const struct hisi_sas_hw_error fifo_error[] = { 1399 { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" }, 1400 { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" }, 1401 { .msk = BIT(10), .msg = "GETDQE_FIFO" }, 1402 { .msk = BIT(11), .msg = "CMDP_FIFO" }, 1403 { .msk = BIT(12), .msg = "AWTCTRL_FIFO" }, 1404 {}, 1405 }; 1406 1407 static const struct hisi_sas_hw_error fatal_axi_error[] = { 1408 { 1409 .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF), 1410 .msg = "write pointer and depth", 1411 }, 1412 { 1413 .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF), 1414 .msg = "iptt no match slot", 1415 }, 1416 { 1417 .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF), 1418 .msg = "read pointer and depth", 1419 }, 1420 { 1421 .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF), 1422 .reg = HGC_AXI_FIFO_ERR_INFO, 1423 .sub = axi_error, 1424 }, 1425 { 1426 .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF), 1427 .reg = HGC_AXI_FIFO_ERR_INFO, 1428 .sub = fifo_error, 1429 }, 1430 { 1431 .irq_msk = BIT(ENT_INT_SRC3_LM_OFF), 1432 .msg = "LM add/fetch list", 1433 }, 1434 { 1435 .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF), 1436 .msg = "SAS_HGC_ABT fetch LM list", 1437 }, 1438 }; 1439 1440 static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) 1441 { 1442 u32 irq_value, irq_msk; 1443 struct hisi_hba *hisi_hba = p; 1444 struct device *dev = hisi_hba->dev; 1445 int i; 1446 1447 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 1448 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00); 1449 1450 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 1451 1452 for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) { 1453 const struct hisi_sas_hw_error *error = &fatal_axi_error[i]; 1454 1455 if (!(irq_value & error->irq_msk)) 1456 continue; 1457 1458 if (error->sub) { 1459 const struct hisi_sas_hw_error *sub = error->sub; 1460 u32 err_value = hisi_sas_read32(hisi_hba, error->reg); 1461 1462 for (; sub->msk || sub->msg; sub++) { 1463 if (!(err_value & sub->msk)) 1464 continue; 1465 1466 dev_err(dev, "%s error (0x%x) found!\n", 1467 sub->msg, irq_value); 1468 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1469 } 1470 } else { 1471 dev_err(dev, "%s error (0x%x) found!\n", 1472 error->msg, irq_value); 1473 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1474 } 1475 } 1476 1477 if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) { 1478 u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); 1479 u32 dev_id = reg_val & ITCT_DEV_MSK; 1480 struct hisi_sas_device *sas_dev = 1481 &hisi_hba->devices[dev_id]; 1482 1483 hisi_sas_write32(hisi_hba, ITCT_CLR, 0); 1484 dev_dbg(dev, "clear ITCT ok\n"); 1485 complete(sas_dev->completion); 1486 } 1487 1488 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00); 1489 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk); 1490 1491 return IRQ_HANDLED; 1492 } 1493 1494 static void 1495 slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, 1496 struct hisi_sas_slot *slot) 1497 { 1498 struct task_status_struct *ts = &task->task_status; 1499 struct hisi_sas_complete_v3_hdr *complete_queue = 1500 hisi_hba->complete_hdr[slot->cmplt_queue]; 1501 struct hisi_sas_complete_v3_hdr *complete_hdr = 1502 &complete_queue[slot->cmplt_queue_slot]; 1503 struct hisi_sas_err_record_v3 *record = 1504 hisi_sas_status_buf_addr_mem(slot); 1505 u32 dma_rx_err_type = record->dma_rx_err_type; 1506 u32 trans_tx_fail_type = record->trans_tx_fail_type; 1507 1508 switch (task->task_proto) { 1509 case SAS_PROTOCOL_SSP: 1510 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { 1511 ts->residual = trans_tx_fail_type; 1512 ts->stat = SAS_DATA_UNDERRUN; 1513 } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { 1514 ts->stat = SAS_QUEUE_FULL; 1515 slot->abort = 1; 1516 } else { 1517 ts->stat = SAS_OPEN_REJECT; 1518 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 1519 } 1520 break; 1521 case SAS_PROTOCOL_SATA: 1522 case SAS_PROTOCOL_STP: 1523 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1524 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { 1525 ts->residual = trans_tx_fail_type; 1526 ts->stat = SAS_DATA_UNDERRUN; 1527 } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { 1528 ts->stat = SAS_PHY_DOWN; 1529 slot->abort = 1; 1530 } else { 1531 ts->stat = SAS_OPEN_REJECT; 1532 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 1533 } 1534 hisi_sas_sata_done(task, slot); 1535 break; 1536 case SAS_PROTOCOL_SMP: 1537 ts->stat = SAM_STAT_CHECK_CONDITION; 1538 break; 1539 default: 1540 break; 1541 } 1542 } 1543 1544 static int 1545 slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) 1546 { 1547 struct sas_task *task = slot->task; 1548 struct hisi_sas_device *sas_dev; 1549 struct device *dev = hisi_hba->dev; 1550 struct task_status_struct *ts; 1551 struct domain_device *device; 1552 enum exec_status sts; 1553 struct hisi_sas_complete_v3_hdr *complete_queue = 1554 hisi_hba->complete_hdr[slot->cmplt_queue]; 1555 struct hisi_sas_complete_v3_hdr *complete_hdr = 1556 &complete_queue[slot->cmplt_queue_slot]; 1557 int aborted; 1558 unsigned long flags; 1559 1560 if (unlikely(!task || !task->lldd_task || !task->dev)) 1561 return -EINVAL; 1562 1563 ts = &task->task_status; 1564 device = task->dev; 1565 sas_dev = device->lldd_dev; 1566 1567 spin_lock_irqsave(&task->task_state_lock, flags); 1568 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; 1569 task->task_state_flags &= 1570 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1571 spin_unlock_irqrestore(&task->task_state_lock, flags); 1572 1573 memset(ts, 0, sizeof(*ts)); 1574 ts->resp = SAS_TASK_COMPLETE; 1575 if (unlikely(aborted)) { 1576 dev_dbg(dev, "slot complete: task(%p) aborted\n", task); 1577 ts->stat = SAS_ABORTED_TASK; 1578 spin_lock_irqsave(&hisi_hba->lock, flags); 1579 hisi_sas_slot_task_free(hisi_hba, task, slot); 1580 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1581 return ts->stat; 1582 } 1583 1584 if (unlikely(!sas_dev)) { 1585 dev_dbg(dev, "slot complete: port has not device\n"); 1586 ts->stat = SAS_PHY_DOWN; 1587 goto out; 1588 } 1589 1590 /* 1591 * Use SAS+TMF status codes 1592 */ 1593 switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK) 1594 >> CMPLT_HDR_ABORT_STAT_OFF) { 1595 case STAT_IO_ABORTED: 1596 /* this IO has been aborted by abort command */ 1597 ts->stat = SAS_ABORTED_TASK; 1598 goto out; 1599 case STAT_IO_COMPLETE: 1600 /* internal abort command complete */ 1601 ts->stat = TMF_RESP_FUNC_SUCC; 1602 goto out; 1603 case STAT_IO_NO_DEVICE: 1604 ts->stat = TMF_RESP_FUNC_COMPLETE; 1605 goto out; 1606 case STAT_IO_NOT_VALID: 1607 /* 1608 * abort single IO, the controller can't find the IO 1609 */ 1610 ts->stat = TMF_RESP_FUNC_FAILED; 1611 goto out; 1612 default: 1613 break; 1614 } 1615 1616 /* check for erroneous completion */ 1617 if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { 1618 u32 *error_info = hisi_sas_status_buf_addr_mem(slot); 1619 1620 slot_err_v3_hw(hisi_hba, task, slot); 1621 if (ts->stat != SAS_DATA_UNDERRUN) 1622 dev_info(dev, "erroneous completion iptt=%d task=%p " 1623 "CQ hdr: 0x%x 0x%x 0x%x 0x%x " 1624 "Error info: 0x%x 0x%x 0x%x 0x%x\n", 1625 slot->idx, task, 1626 complete_hdr->dw0, complete_hdr->dw1, 1627 complete_hdr->act, complete_hdr->dw3, 1628 error_info[0], error_info[1], 1629 error_info[2], error_info[3]); 1630 if (unlikely(slot->abort)) 1631 return ts->stat; 1632 goto out; 1633 } 1634 1635 switch (task->task_proto) { 1636 case SAS_PROTOCOL_SSP: { 1637 struct ssp_response_iu *iu = 1638 hisi_sas_status_buf_addr_mem(slot) + 1639 sizeof(struct hisi_sas_err_record); 1640 1641 sas_ssp_task_response(dev, task, iu); 1642 break; 1643 } 1644 case SAS_PROTOCOL_SMP: { 1645 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 1646 void *to; 1647 1648 ts->stat = SAM_STAT_GOOD; 1649 to = kmap_atomic(sg_page(sg_resp)); 1650 1651 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1, 1652 DMA_FROM_DEVICE); 1653 dma_unmap_sg(dev, &task->smp_task.smp_req, 1, 1654 DMA_TO_DEVICE); 1655 memcpy(to + sg_resp->offset, 1656 hisi_sas_status_buf_addr_mem(slot) + 1657 sizeof(struct hisi_sas_err_record), 1658 sg_dma_len(sg_resp)); 1659 kunmap_atomic(to); 1660 break; 1661 } 1662 case SAS_PROTOCOL_SATA: 1663 case SAS_PROTOCOL_STP: 1664 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1665 ts->stat = SAM_STAT_GOOD; 1666 hisi_sas_sata_done(task, slot); 1667 break; 1668 default: 1669 ts->stat = SAM_STAT_CHECK_CONDITION; 1670 break; 1671 } 1672 1673 if (!slot->port->port_attached) { 1674 dev_warn(dev, "slot complete: port %d has removed\n", 1675 slot->port->sas_port.id); 1676 ts->stat = SAS_PHY_DOWN; 1677 } 1678 1679 out: 1680 spin_lock_irqsave(&task->task_state_lock, flags); 1681 task->task_state_flags |= SAS_TASK_STATE_DONE; 1682 spin_unlock_irqrestore(&task->task_state_lock, flags); 1683 spin_lock_irqsave(&hisi_hba->lock, flags); 1684 hisi_sas_slot_task_free(hisi_hba, task, slot); 1685 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1686 sts = ts->stat; 1687 1688 if (task->task_done) 1689 task->task_done(task); 1690 1691 return sts; 1692 } 1693 1694 static void cq_tasklet_v3_hw(unsigned long val) 1695 { 1696 struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; 1697 struct hisi_hba *hisi_hba = cq->hisi_hba; 1698 struct hisi_sas_slot *slot; 1699 struct hisi_sas_complete_v3_hdr *complete_queue; 1700 u32 rd_point = cq->rd_point, wr_point; 1701 int queue = cq->id; 1702 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 1703 1704 complete_queue = hisi_hba->complete_hdr[queue]; 1705 1706 spin_lock(&dq->lock); 1707 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + 1708 (0x14 * queue)); 1709 1710 while (rd_point != wr_point) { 1711 struct hisi_sas_complete_v3_hdr *complete_hdr; 1712 int iptt; 1713 1714 complete_hdr = &complete_queue[rd_point]; 1715 1716 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; 1717 slot = &hisi_hba->slot_info[iptt]; 1718 slot->cmplt_queue_slot = rd_point; 1719 slot->cmplt_queue = queue; 1720 slot_complete_v3_hw(hisi_hba, slot); 1721 1722 if (++rd_point >= HISI_SAS_QUEUE_SLOTS) 1723 rd_point = 0; 1724 } 1725 1726 /* update rd_point */ 1727 cq->rd_point = rd_point; 1728 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 1729 spin_unlock(&dq->lock); 1730 } 1731 1732 static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) 1733 { 1734 struct hisi_sas_cq *cq = p; 1735 struct hisi_hba *hisi_hba = cq->hisi_hba; 1736 int queue = cq->id; 1737 1738 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); 1739 1740 tasklet_schedule(&cq->tasklet); 1741 1742 return IRQ_HANDLED; 1743 } 1744 1745 static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) 1746 { 1747 struct device *dev = hisi_hba->dev; 1748 struct pci_dev *pdev = hisi_hba->pci_dev; 1749 int vectors, rc; 1750 int i, k; 1751 int max_msi = HISI_SAS_MSI_COUNT_V3_HW; 1752 1753 vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1, 1754 max_msi, PCI_IRQ_MSI); 1755 if (vectors < max_msi) { 1756 dev_err(dev, "could not allocate all msi (%d)\n", vectors); 1757 return -ENOENT; 1758 } 1759 1760 rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), 1761 int_phy_up_down_bcast_v3_hw, 0, 1762 DRV_NAME " phy", hisi_hba); 1763 if (rc) { 1764 dev_err(dev, "could not request phy interrupt, rc=%d\n", rc); 1765 rc = -ENOENT; 1766 goto free_irq_vectors; 1767 } 1768 1769 rc = devm_request_irq(dev, pci_irq_vector(pdev, 2), 1770 int_chnl_int_v3_hw, 0, 1771 DRV_NAME " channel", hisi_hba); 1772 if (rc) { 1773 dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc); 1774 rc = -ENOENT; 1775 goto free_phy_irq; 1776 } 1777 1778 rc = devm_request_irq(dev, pci_irq_vector(pdev, 11), 1779 fatal_axi_int_v3_hw, 0, 1780 DRV_NAME " fatal", hisi_hba); 1781 if (rc) { 1782 dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc); 1783 rc = -ENOENT; 1784 goto free_chnl_interrupt; 1785 } 1786 1787 /* Init tasklets for cq only */ 1788 for (i = 0; i < hisi_hba->queue_count; i++) { 1789 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1790 struct tasklet_struct *t = &cq->tasklet; 1791 1792 rc = devm_request_irq(dev, pci_irq_vector(pdev, i+16), 1793 cq_interrupt_v3_hw, 0, 1794 DRV_NAME " cq", cq); 1795 if (rc) { 1796 dev_err(dev, 1797 "could not request cq%d interrupt, rc=%d\n", 1798 i, rc); 1799 rc = -ENOENT; 1800 goto free_cq_irqs; 1801 } 1802 1803 tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq); 1804 } 1805 1806 return 0; 1807 1808 free_cq_irqs: 1809 for (k = 0; k < i; k++) { 1810 struct hisi_sas_cq *cq = &hisi_hba->cq[k]; 1811 1812 free_irq(pci_irq_vector(pdev, k+16), cq); 1813 } 1814 free_irq(pci_irq_vector(pdev, 11), hisi_hba); 1815 free_chnl_interrupt: 1816 free_irq(pci_irq_vector(pdev, 2), hisi_hba); 1817 free_phy_irq: 1818 free_irq(pci_irq_vector(pdev, 1), hisi_hba); 1819 free_irq_vectors: 1820 pci_free_irq_vectors(pdev); 1821 return rc; 1822 } 1823 1824 static int hisi_sas_v3_init(struct hisi_hba *hisi_hba) 1825 { 1826 int rc; 1827 1828 rc = hw_init_v3_hw(hisi_hba); 1829 if (rc) 1830 return rc; 1831 1832 rc = interrupt_init_v3_hw(hisi_hba); 1833 if (rc) 1834 return rc; 1835 1836 return 0; 1837 } 1838 1839 static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no, 1840 struct sas_phy_linkrates *r) 1841 { 1842 u32 prog_phy_link_rate = 1843 hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); 1844 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1845 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1846 int i; 1847 enum sas_linkrate min, max; 1848 u32 rate_mask = 0; 1849 1850 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1851 max = sas_phy->phy->maximum_linkrate; 1852 min = r->minimum_linkrate; 1853 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1854 max = r->maximum_linkrate; 1855 min = sas_phy->phy->minimum_linkrate; 1856 } else 1857 return; 1858 1859 sas_phy->phy->maximum_linkrate = max; 1860 sas_phy->phy->minimum_linkrate = min; 1861 1862 max -= SAS_LINK_RATE_1_5_GBPS; 1863 1864 for (i = 0; i <= max; i++) 1865 rate_mask |= 1 << (i * 2); 1866 1867 prog_phy_link_rate &= ~0xff; 1868 prog_phy_link_rate |= rate_mask; 1869 1870 disable_phy_v3_hw(hisi_hba, phy_no); 1871 msleep(100); 1872 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 1873 prog_phy_link_rate); 1874 start_phy_v3_hw(hisi_hba, phy_no); 1875 } 1876 1877 static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) 1878 { 1879 struct pci_dev *pdev = hisi_hba->pci_dev; 1880 int i; 1881 1882 synchronize_irq(pci_irq_vector(pdev, 1)); 1883 synchronize_irq(pci_irq_vector(pdev, 2)); 1884 synchronize_irq(pci_irq_vector(pdev, 11)); 1885 for (i = 0; i < hisi_hba->queue_count; i++) { 1886 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); 1887 synchronize_irq(pci_irq_vector(pdev, i + 16)); 1888 } 1889 1890 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); 1891 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); 1892 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); 1893 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); 1894 1895 for (i = 0; i < hisi_hba->n_phy; i++) { 1896 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 1897 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); 1898 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1); 1899 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1); 1900 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1); 1901 } 1902 } 1903 1904 static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba) 1905 { 1906 return hisi_sas_read32(hisi_hba, PHY_STATE); 1907 } 1908 1909 static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1910 { 1911 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1912 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1913 struct sas_phy *sphy = sas_phy->phy; 1914 u32 reg_value; 1915 1916 /* loss dword sync */ 1917 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST); 1918 sphy->loss_of_dword_sync_count += reg_value; 1919 1920 /* phy reset problem */ 1921 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB); 1922 sphy->phy_reset_problem_count += reg_value; 1923 1924 /* invalid dword */ 1925 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW); 1926 sphy->invalid_dword_count += reg_value; 1927 1928 /* disparity err */ 1929 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR); 1930 sphy->running_disparity_error_count += reg_value; 1931 1932 } 1933 1934 static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) 1935 { 1936 struct device *dev = hisi_hba->dev; 1937 int rc; 1938 u32 status; 1939 1940 interrupt_disable_v3_hw(hisi_hba); 1941 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); 1942 hisi_sas_kill_tasklets(hisi_hba); 1943 1944 hisi_sas_stop_phys(hisi_hba); 1945 1946 mdelay(10); 1947 1948 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1); 1949 1950 /* wait until bus idle */ 1951 rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE + 1952 AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100); 1953 if (rc) { 1954 dev_err(dev, "axi bus is not idle, rc = %d\n", rc); 1955 return rc; 1956 } 1957 1958 hisi_sas_init_mem(hisi_hba); 1959 1960 return hw_init_v3_hw(hisi_hba); 1961 } 1962 1963 static const struct hisi_sas_hw hisi_sas_v3_hw = { 1964 .hw_init = hisi_sas_v3_init, 1965 .setup_itct = setup_itct_v3_hw, 1966 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW, 1967 .get_wideport_bitmap = get_wideport_bitmap_v3_hw, 1968 .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), 1969 .clear_itct = clear_itct_v3_hw, 1970 .sl_notify = sl_notify_v3_hw, 1971 .prep_ssp = prep_ssp_v3_hw, 1972 .prep_smp = prep_smp_v3_hw, 1973 .prep_stp = prep_ata_v3_hw, 1974 .prep_abort = prep_abort_v3_hw, 1975 .get_free_slot = get_free_slot_v3_hw, 1976 .start_delivery = start_delivery_v3_hw, 1977 .slot_complete = slot_complete_v3_hw, 1978 .phys_init = phys_init_v3_hw, 1979 .phy_start = start_phy_v3_hw, 1980 .phy_disable = disable_phy_v3_hw, 1981 .phy_hard_reset = phy_hard_reset_v3_hw, 1982 .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw, 1983 .phy_set_linkrate = phy_set_linkrate_v3_hw, 1984 .dereg_device = dereg_device_v3_hw, 1985 .soft_reset = soft_reset_v3_hw, 1986 .get_phys_state = get_phys_state_v3_hw, 1987 .get_events = phy_get_events_v3_hw, 1988 }; 1989 1990 static struct Scsi_Host * 1991 hisi_sas_shost_alloc_pci(struct pci_dev *pdev) 1992 { 1993 struct Scsi_Host *shost; 1994 struct hisi_hba *hisi_hba; 1995 struct device *dev = &pdev->dev; 1996 1997 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba)); 1998 if (!shost) { 1999 dev_err(dev, "shost alloc failed\n"); 2000 return NULL; 2001 } 2002 hisi_hba = shost_priv(shost); 2003 2004 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2005 hisi_hba->hw = &hisi_sas_v3_hw; 2006 hisi_hba->pci_dev = pdev; 2007 hisi_hba->dev = dev; 2008 hisi_hba->shost = shost; 2009 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2010 2011 timer_setup(&hisi_hba->timer, NULL, 0); 2012 2013 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2014 goto err_out; 2015 2016 if (hisi_sas_alloc(hisi_hba, shost)) { 2017 hisi_sas_free(hisi_hba); 2018 goto err_out; 2019 } 2020 2021 return shost; 2022 err_out: 2023 scsi_host_put(shost); 2024 dev_err(dev, "shost alloc failed\n"); 2025 return NULL; 2026 } 2027 2028 static int 2029 hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2030 { 2031 struct Scsi_Host *shost; 2032 struct hisi_hba *hisi_hba; 2033 struct device *dev = &pdev->dev; 2034 struct asd_sas_phy **arr_phy; 2035 struct asd_sas_port **arr_port; 2036 struct sas_ha_struct *sha; 2037 int rc, phy_nr, port_nr, i; 2038 2039 rc = pci_enable_device(pdev); 2040 if (rc) 2041 goto err_out; 2042 2043 pci_set_master(pdev); 2044 2045 rc = pci_request_regions(pdev, DRV_NAME); 2046 if (rc) 2047 goto err_out_disable_device; 2048 2049 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) || 2050 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) { 2051 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || 2052 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { 2053 dev_err(dev, "No usable DMA addressing method\n"); 2054 rc = -EIO; 2055 goto err_out_regions; 2056 } 2057 } 2058 2059 shost = hisi_sas_shost_alloc_pci(pdev); 2060 if (!shost) { 2061 rc = -ENOMEM; 2062 goto err_out_regions; 2063 } 2064 2065 sha = SHOST_TO_SAS_HA(shost); 2066 hisi_hba = shost_priv(shost); 2067 dev_set_drvdata(dev, sha); 2068 2069 hisi_hba->regs = pcim_iomap(pdev, 5, 0); 2070 if (!hisi_hba->regs) { 2071 dev_err(dev, "cannot map register.\n"); 2072 rc = -ENOMEM; 2073 goto err_out_ha; 2074 } 2075 2076 phy_nr = port_nr = hisi_hba->n_phy; 2077 2078 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2079 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2080 if (!arr_phy || !arr_port) { 2081 rc = -ENOMEM; 2082 goto err_out_ha; 2083 } 2084 2085 sha->sas_phy = arr_phy; 2086 sha->sas_port = arr_port; 2087 sha->core.shost = shost; 2088 sha->lldd_ha = hisi_hba; 2089 2090 shost->transportt = hisi_sas_stt; 2091 shost->max_id = HISI_SAS_MAX_DEVICES; 2092 shost->max_lun = ~0; 2093 shost->max_channel = 1; 2094 shost->max_cmd_len = 16; 2095 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT); 2096 shost->can_queue = hisi_hba->hw->max_command_entries; 2097 shost->cmd_per_lun = hisi_hba->hw->max_command_entries; 2098 2099 sha->sas_ha_name = DRV_NAME; 2100 sha->dev = dev; 2101 sha->lldd_module = THIS_MODULE; 2102 sha->sas_addr = &hisi_hba->sas_addr[0]; 2103 sha->num_phys = hisi_hba->n_phy; 2104 sha->core.shost = hisi_hba->shost; 2105 2106 for (i = 0; i < hisi_hba->n_phy; i++) { 2107 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2108 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2109 } 2110 2111 hisi_sas_init_add(hisi_hba); 2112 2113 rc = scsi_add_host(shost, dev); 2114 if (rc) 2115 goto err_out_ha; 2116 2117 rc = sas_register_ha(sha); 2118 if (rc) 2119 goto err_out_register_ha; 2120 2121 rc = hisi_hba->hw->hw_init(hisi_hba); 2122 if (rc) 2123 goto err_out_register_ha; 2124 2125 scsi_scan_host(shost); 2126 2127 return 0; 2128 2129 err_out_register_ha: 2130 scsi_remove_host(shost); 2131 err_out_ha: 2132 scsi_host_put(shost); 2133 err_out_regions: 2134 pci_release_regions(pdev); 2135 err_out_disable_device: 2136 pci_disable_device(pdev); 2137 err_out: 2138 return rc; 2139 } 2140 2141 static void 2142 hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) 2143 { 2144 int i; 2145 2146 free_irq(pci_irq_vector(pdev, 1), hisi_hba); 2147 free_irq(pci_irq_vector(pdev, 2), hisi_hba); 2148 free_irq(pci_irq_vector(pdev, 11), hisi_hba); 2149 for (i = 0; i < hisi_hba->queue_count; i++) { 2150 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2151 2152 free_irq(pci_irq_vector(pdev, i+16), cq); 2153 } 2154 pci_free_irq_vectors(pdev); 2155 } 2156 2157 static void hisi_sas_v3_remove(struct pci_dev *pdev) 2158 { 2159 struct device *dev = &pdev->dev; 2160 struct sas_ha_struct *sha = dev_get_drvdata(dev); 2161 struct hisi_hba *hisi_hba = sha->lldd_ha; 2162 struct Scsi_Host *shost = sha->core.shost; 2163 2164 sas_unregister_ha(sha); 2165 sas_remove_host(sha->core.shost); 2166 2167 hisi_sas_v3_destroy_irqs(pdev, hisi_hba); 2168 hisi_sas_kill_tasklets(hisi_hba); 2169 pci_release_regions(pdev); 2170 pci_disable_device(pdev); 2171 hisi_sas_free(hisi_hba); 2172 scsi_host_put(shost); 2173 } 2174 2175 static const struct hisi_sas_hw_error sas_ras_intr0_nfe[] = { 2176 { .irq_msk = BIT(19), .msg = "HILINK_INT" }, 2177 { .irq_msk = BIT(20), .msg = "HILINK_PLL0_OUT_OF_LOCK" }, 2178 { .irq_msk = BIT(21), .msg = "HILINK_PLL1_OUT_OF_LOCK" }, 2179 { .irq_msk = BIT(22), .msg = "HILINK_LOSS_OF_REFCLK0" }, 2180 { .irq_msk = BIT(23), .msg = "HILINK_LOSS_OF_REFCLK1" }, 2181 { .irq_msk = BIT(24), .msg = "DMAC0_TX_POISON" }, 2182 { .irq_msk = BIT(25), .msg = "DMAC1_TX_POISON" }, 2183 { .irq_msk = BIT(26), .msg = "DMAC2_TX_POISON" }, 2184 { .irq_msk = BIT(27), .msg = "DMAC3_TX_POISON" }, 2185 { .irq_msk = BIT(28), .msg = "DMAC4_TX_POISON" }, 2186 { .irq_msk = BIT(29), .msg = "DMAC5_TX_POISON" }, 2187 { .irq_msk = BIT(30), .msg = "DMAC6_TX_POISON" }, 2188 { .irq_msk = BIT(31), .msg = "DMAC7_TX_POISON" }, 2189 }; 2190 2191 static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = { 2192 { .irq_msk = BIT(0), .msg = "RXM_CFG_MEM3_ECC2B_INTR" }, 2193 { .irq_msk = BIT(1), .msg = "RXM_CFG_MEM2_ECC2B_INTR" }, 2194 { .irq_msk = BIT(2), .msg = "RXM_CFG_MEM1_ECC2B_INTR" }, 2195 { .irq_msk = BIT(3), .msg = "RXM_CFG_MEM0_ECC2B_INTR" }, 2196 { .irq_msk = BIT(4), .msg = "HGC_CQE_ECC2B_INTR" }, 2197 { .irq_msk = BIT(5), .msg = "LM_CFG_IOSTL_ECC2B_INTR" }, 2198 { .irq_msk = BIT(6), .msg = "LM_CFG_ITCTL_ECC2B_INTR" }, 2199 { .irq_msk = BIT(7), .msg = "HGC_ITCT_ECC2B_INTR" }, 2200 { .irq_msk = BIT(8), .msg = "HGC_IOST_ECC2B_INTR" }, 2201 { .irq_msk = BIT(9), .msg = "HGC_DQE_ECC2B_INTR" }, 2202 { .irq_msk = BIT(10), .msg = "DMAC0_RAM_ECC2B_INTR" }, 2203 { .irq_msk = BIT(11), .msg = "DMAC1_RAM_ECC2B_INTR" }, 2204 { .irq_msk = BIT(12), .msg = "DMAC2_RAM_ECC2B_INTR" }, 2205 { .irq_msk = BIT(13), .msg = "DMAC3_RAM_ECC2B_INTR" }, 2206 { .irq_msk = BIT(14), .msg = "DMAC4_RAM_ECC2B_INTR" }, 2207 { .irq_msk = BIT(15), .msg = "DMAC5_RAM_ECC2B_INTR" }, 2208 { .irq_msk = BIT(16), .msg = "DMAC6_RAM_ECC2B_INTR" }, 2209 { .irq_msk = BIT(17), .msg = "DMAC7_RAM_ECC2B_INTR" }, 2210 { .irq_msk = BIT(18), .msg = "OOO_RAM_ECC2B_INTR" }, 2211 { .irq_msk = BIT(20), .msg = "HGC_DQE_POISON_INTR" }, 2212 { .irq_msk = BIT(21), .msg = "HGC_IOST_POISON_INTR" }, 2213 { .irq_msk = BIT(22), .msg = "HGC_ITCT_POISON_INTR" }, 2214 { .irq_msk = BIT(23), .msg = "HGC_ITCT_NCQ_POISON_INTR" }, 2215 { .irq_msk = BIT(24), .msg = "DMAC0_RX_POISON" }, 2216 { .irq_msk = BIT(25), .msg = "DMAC1_RX_POISON" }, 2217 { .irq_msk = BIT(26), .msg = "DMAC2_RX_POISON" }, 2218 { .irq_msk = BIT(27), .msg = "DMAC3_RX_POISON" }, 2219 { .irq_msk = BIT(28), .msg = "DMAC4_RX_POISON" }, 2220 { .irq_msk = BIT(29), .msg = "DMAC5_RX_POISON" }, 2221 { .irq_msk = BIT(30), .msg = "DMAC6_RX_POISON" }, 2222 { .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" }, 2223 }; 2224 2225 static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba) 2226 { 2227 struct device *dev = hisi_hba->dev; 2228 const struct hisi_sas_hw_error *ras_error; 2229 bool need_reset = false; 2230 u32 irq_value; 2231 int i; 2232 2233 irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR0); 2234 for (i = 0; i < ARRAY_SIZE(sas_ras_intr0_nfe); i++) { 2235 ras_error = &sas_ras_intr0_nfe[i]; 2236 if (ras_error->irq_msk & irq_value) { 2237 dev_warn(dev, "SAS_RAS_INTR0: %s(irq_value=0x%x) found.\n", 2238 ras_error->msg, irq_value); 2239 need_reset = true; 2240 } 2241 } 2242 hisi_sas_write32(hisi_hba, SAS_RAS_INTR0, irq_value); 2243 2244 irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR1); 2245 for (i = 0; i < ARRAY_SIZE(sas_ras_intr1_nfe); i++) { 2246 ras_error = &sas_ras_intr1_nfe[i]; 2247 if (ras_error->irq_msk & irq_value) { 2248 dev_warn(dev, "SAS_RAS_INTR1: %s(irq_value=0x%x) found.\n", 2249 ras_error->msg, irq_value); 2250 need_reset = true; 2251 } 2252 } 2253 hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value); 2254 2255 return need_reset; 2256 } 2257 2258 static pci_ers_result_t hisi_sas_error_detected_v3_hw(struct pci_dev *pdev, 2259 pci_channel_state_t state) 2260 { 2261 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 2262 struct hisi_hba *hisi_hba = sha->lldd_ha; 2263 struct device *dev = hisi_hba->dev; 2264 2265 dev_info(dev, "PCI error: detected callback, state(%d)!!\n", state); 2266 if (state == pci_channel_io_perm_failure) 2267 return PCI_ERS_RESULT_DISCONNECT; 2268 2269 if (process_non_fatal_error_v3_hw(hisi_hba)) 2270 return PCI_ERS_RESULT_NEED_RESET; 2271 2272 return PCI_ERS_RESULT_CAN_RECOVER; 2273 } 2274 2275 static pci_ers_result_t hisi_sas_mmio_enabled_v3_hw(struct pci_dev *pdev) 2276 { 2277 return PCI_ERS_RESULT_RECOVERED; 2278 } 2279 2280 static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev) 2281 { 2282 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 2283 struct hisi_hba *hisi_hba = sha->lldd_ha; 2284 struct device *dev = hisi_hba->dev; 2285 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 2286 2287 dev_info(dev, "PCI error: slot reset callback!!\n"); 2288 queue_work(hisi_hba->wq, &r.work); 2289 wait_for_completion(r.completion); 2290 if (r.done) 2291 return PCI_ERS_RESULT_RECOVERED; 2292 2293 return PCI_ERS_RESULT_DISCONNECT; 2294 } 2295 2296 enum { 2297 /* instances of the controller */ 2298 hip08, 2299 }; 2300 2301 static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state) 2302 { 2303 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 2304 struct hisi_hba *hisi_hba = sha->lldd_ha; 2305 struct device *dev = hisi_hba->dev; 2306 struct Scsi_Host *shost = hisi_hba->shost; 2307 u32 device_state, status; 2308 int rc; 2309 u32 reg_val; 2310 unsigned long flags; 2311 2312 if (!pdev->pm_cap) { 2313 dev_err(dev, "PCI PM not supported\n"); 2314 return -ENODEV; 2315 } 2316 2317 set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 2318 scsi_block_requests(shost); 2319 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 2320 flush_workqueue(hisi_hba->wq); 2321 /* disable DQ/PHY/bus */ 2322 interrupt_disable_v3_hw(hisi_hba); 2323 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); 2324 hisi_sas_kill_tasklets(hisi_hba); 2325 2326 hisi_sas_stop_phys(hisi_hba); 2327 2328 reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + 2329 AM_CTRL_GLOBAL); 2330 reg_val |= 0x1; 2331 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + 2332 AM_CTRL_GLOBAL, reg_val); 2333 2334 /* wait until bus idle */ 2335 rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE + 2336 AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100); 2337 if (rc) { 2338 dev_err(dev, "axi bus is not idle, rc = %d\n", rc); 2339 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 2340 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 2341 scsi_unblock_requests(shost); 2342 return rc; 2343 } 2344 2345 hisi_sas_init_mem(hisi_hba); 2346 2347 device_state = pci_choose_state(pdev, state); 2348 dev_warn(dev, "entering operating state [D%d]\n", 2349 device_state); 2350 pci_save_state(pdev); 2351 pci_disable_device(pdev); 2352 pci_set_power_state(pdev, device_state); 2353 2354 spin_lock_irqsave(&hisi_hba->lock, flags); 2355 hisi_sas_release_tasks(hisi_hba); 2356 spin_unlock_irqrestore(&hisi_hba->lock, flags); 2357 2358 sas_suspend_ha(sha); 2359 return 0; 2360 } 2361 2362 static int hisi_sas_v3_resume(struct pci_dev *pdev) 2363 { 2364 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 2365 struct hisi_hba *hisi_hba = sha->lldd_ha; 2366 struct Scsi_Host *shost = hisi_hba->shost; 2367 struct device *dev = hisi_hba->dev; 2368 unsigned int rc; 2369 u32 device_state = pdev->current_state; 2370 2371 dev_warn(dev, "resuming from operating state [D%d]\n", 2372 device_state); 2373 pci_set_power_state(pdev, PCI_D0); 2374 pci_enable_wake(pdev, PCI_D0, 0); 2375 pci_restore_state(pdev); 2376 rc = pci_enable_device(pdev); 2377 if (rc) 2378 dev_err(dev, "enable device failed during resume (%d)\n", rc); 2379 2380 pci_set_master(pdev); 2381 scsi_unblock_requests(shost); 2382 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 2383 2384 sas_prep_resume_ha(sha); 2385 init_reg_v3_hw(hisi_hba); 2386 hisi_hba->hw->phys_init(hisi_hba); 2387 sas_resume_ha(sha); 2388 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 2389 2390 return 0; 2391 } 2392 2393 static const struct pci_device_id sas_v3_pci_table[] = { 2394 { PCI_VDEVICE(HUAWEI, 0xa230), hip08 }, 2395 {} 2396 }; 2397 MODULE_DEVICE_TABLE(pci, sas_v3_pci_table); 2398 2399 static const struct pci_error_handlers hisi_sas_err_handler = { 2400 .error_detected = hisi_sas_error_detected_v3_hw, 2401 .mmio_enabled = hisi_sas_mmio_enabled_v3_hw, 2402 .slot_reset = hisi_sas_slot_reset_v3_hw, 2403 }; 2404 2405 static struct pci_driver sas_v3_pci_driver = { 2406 .name = DRV_NAME, 2407 .id_table = sas_v3_pci_table, 2408 .probe = hisi_sas_v3_probe, 2409 .remove = hisi_sas_v3_remove, 2410 .suspend = hisi_sas_v3_suspend, 2411 .resume = hisi_sas_v3_resume, 2412 .err_handler = &hisi_sas_err_handler, 2413 }; 2414 2415 module_pci_driver(sas_v3_pci_driver); 2416 2417 MODULE_LICENSE("GPL"); 2418 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2419 MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device"); 2420 MODULE_ALIAS("pci:" DRV_NAME); 2421