1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2011 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include <linux/delay.h> 9 #include <linux/pci.h> 10 #include <scsi/scsi_tcq.h> 11 12 #define MASK(n) ((1ULL<<(n))-1) 13 #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \ 14 ((addr >> 25) & 0x3ff)) 15 #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \ 16 ((addr >> 25) & 0x3ff)) 17 #define MS_WIN(addr) (addr & 0x0ffc0000) 18 #define QLA82XX_PCI_MN_2M (0) 19 #define QLA82XX_PCI_MS_2M (0x80000) 20 #define QLA82XX_PCI_OCM0_2M (0xc0000) 21 #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800) 22 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) 23 #define BLOCK_PROTECT_BITS 0x0F 24 25 /* CRB window related */ 26 #define CRB_BLK(off) ((off >> 20) & 0x3f) 27 #define CRB_SUBBLK(off) ((off >> 16) & 0xf) 28 #define CRB_WINDOW_2M (0x130060) 29 #define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL) 30 #define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \ 31 ((off) & 0xf0000)) 32 #define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL) 33 #define CRB_INDIRECT_2M (0x1e0000UL) 34 35 #define MAX_CRB_XFORM 60 36 static unsigned long crb_addr_xform[MAX_CRB_XFORM]; 37 int qla82xx_crb_table_initialized; 38 39 #define qla82xx_crb_addr_transform(name) \ 40 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ 41 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) 42 43 static void qla82xx_crb_addr_transform_setup(void) 44 { 45 qla82xx_crb_addr_transform(XDMA); 46 qla82xx_crb_addr_transform(TIMR); 47 qla82xx_crb_addr_transform(SRE); 48 qla82xx_crb_addr_transform(SQN3); 49 qla82xx_crb_addr_transform(SQN2); 50 qla82xx_crb_addr_transform(SQN1); 51 qla82xx_crb_addr_transform(SQN0); 52 qla82xx_crb_addr_transform(SQS3); 53 qla82xx_crb_addr_transform(SQS2); 54 qla82xx_crb_addr_transform(SQS1); 55 qla82xx_crb_addr_transform(SQS0); 56 qla82xx_crb_addr_transform(RPMX7); 57 qla82xx_crb_addr_transform(RPMX6); 58 qla82xx_crb_addr_transform(RPMX5); 59 qla82xx_crb_addr_transform(RPMX4); 60 qla82xx_crb_addr_transform(RPMX3); 61 qla82xx_crb_addr_transform(RPMX2); 62 qla82xx_crb_addr_transform(RPMX1); 63 qla82xx_crb_addr_transform(RPMX0); 64 qla82xx_crb_addr_transform(ROMUSB); 65 qla82xx_crb_addr_transform(SN); 66 qla82xx_crb_addr_transform(QMN); 67 qla82xx_crb_addr_transform(QMS); 68 qla82xx_crb_addr_transform(PGNI); 69 qla82xx_crb_addr_transform(PGND); 70 qla82xx_crb_addr_transform(PGN3); 71 qla82xx_crb_addr_transform(PGN2); 72 qla82xx_crb_addr_transform(PGN1); 73 qla82xx_crb_addr_transform(PGN0); 74 qla82xx_crb_addr_transform(PGSI); 75 qla82xx_crb_addr_transform(PGSD); 76 qla82xx_crb_addr_transform(PGS3); 77 qla82xx_crb_addr_transform(PGS2); 78 qla82xx_crb_addr_transform(PGS1); 79 qla82xx_crb_addr_transform(PGS0); 80 qla82xx_crb_addr_transform(PS); 81 qla82xx_crb_addr_transform(PH); 82 qla82xx_crb_addr_transform(NIU); 83 qla82xx_crb_addr_transform(I2Q); 84 qla82xx_crb_addr_transform(EG); 85 qla82xx_crb_addr_transform(MN); 86 qla82xx_crb_addr_transform(MS); 87 qla82xx_crb_addr_transform(CAS2); 88 qla82xx_crb_addr_transform(CAS1); 89 qla82xx_crb_addr_transform(CAS0); 90 qla82xx_crb_addr_transform(CAM); 91 qla82xx_crb_addr_transform(C2C1); 92 qla82xx_crb_addr_transform(C2C0); 93 qla82xx_crb_addr_transform(SMB); 94 qla82xx_crb_addr_transform(OCM0); 95 /* 96 * Used only in P3 just define it for P2 also. 97 */ 98 qla82xx_crb_addr_transform(I2C0); 99 100 qla82xx_crb_table_initialized = 1; 101 } 102 103 struct crb_128M_2M_block_map crb_128M_2M_map[64] = { 104 {{{0, 0, 0, 0} } }, 105 {{{1, 0x0100000, 0x0102000, 0x120000}, 106 {1, 0x0110000, 0x0120000, 0x130000}, 107 {1, 0x0120000, 0x0122000, 0x124000}, 108 {1, 0x0130000, 0x0132000, 0x126000}, 109 {1, 0x0140000, 0x0142000, 0x128000}, 110 {1, 0x0150000, 0x0152000, 0x12a000}, 111 {1, 0x0160000, 0x0170000, 0x110000}, 112 {1, 0x0170000, 0x0172000, 0x12e000}, 113 {0, 0x0000000, 0x0000000, 0x000000}, 114 {0, 0x0000000, 0x0000000, 0x000000}, 115 {0, 0x0000000, 0x0000000, 0x000000}, 116 {0, 0x0000000, 0x0000000, 0x000000}, 117 {0, 0x0000000, 0x0000000, 0x000000}, 118 {0, 0x0000000, 0x0000000, 0x000000}, 119 {1, 0x01e0000, 0x01e0800, 0x122000}, 120 {0, 0x0000000, 0x0000000, 0x000000} } } , 121 {{{1, 0x0200000, 0x0210000, 0x180000} } }, 122 {{{0, 0, 0, 0} } }, 123 {{{1, 0x0400000, 0x0401000, 0x169000} } }, 124 {{{1, 0x0500000, 0x0510000, 0x140000} } }, 125 {{{1, 0x0600000, 0x0610000, 0x1c0000} } }, 126 {{{1, 0x0700000, 0x0704000, 0x1b8000} } }, 127 {{{1, 0x0800000, 0x0802000, 0x170000}, 128 {0, 0x0000000, 0x0000000, 0x000000}, 129 {0, 0x0000000, 0x0000000, 0x000000}, 130 {0, 0x0000000, 0x0000000, 0x000000}, 131 {0, 0x0000000, 0x0000000, 0x000000}, 132 {0, 0x0000000, 0x0000000, 0x000000}, 133 {0, 0x0000000, 0x0000000, 0x000000}, 134 {0, 0x0000000, 0x0000000, 0x000000}, 135 {0, 0x0000000, 0x0000000, 0x000000}, 136 {0, 0x0000000, 0x0000000, 0x000000}, 137 {0, 0x0000000, 0x0000000, 0x000000}, 138 {0, 0x0000000, 0x0000000, 0x000000}, 139 {0, 0x0000000, 0x0000000, 0x000000}, 140 {0, 0x0000000, 0x0000000, 0x000000}, 141 {0, 0x0000000, 0x0000000, 0x000000}, 142 {1, 0x08f0000, 0x08f2000, 0x172000} } }, 143 {{{1, 0x0900000, 0x0902000, 0x174000}, 144 {0, 0x0000000, 0x0000000, 0x000000}, 145 {0, 0x0000000, 0x0000000, 0x000000}, 146 {0, 0x0000000, 0x0000000, 0x000000}, 147 {0, 0x0000000, 0x0000000, 0x000000}, 148 {0, 0x0000000, 0x0000000, 0x000000}, 149 {0, 0x0000000, 0x0000000, 0x000000}, 150 {0, 0x0000000, 0x0000000, 0x000000}, 151 {0, 0x0000000, 0x0000000, 0x000000}, 152 {0, 0x0000000, 0x0000000, 0x000000}, 153 {0, 0x0000000, 0x0000000, 0x000000}, 154 {0, 0x0000000, 0x0000000, 0x000000}, 155 {0, 0x0000000, 0x0000000, 0x000000}, 156 {0, 0x0000000, 0x0000000, 0x000000}, 157 {0, 0x0000000, 0x0000000, 0x000000}, 158 {1, 0x09f0000, 0x09f2000, 0x176000} } }, 159 {{{0, 0x0a00000, 0x0a02000, 0x178000}, 160 {0, 0x0000000, 0x0000000, 0x000000}, 161 {0, 0x0000000, 0x0000000, 0x000000}, 162 {0, 0x0000000, 0x0000000, 0x000000}, 163 {0, 0x0000000, 0x0000000, 0x000000}, 164 {0, 0x0000000, 0x0000000, 0x000000}, 165 {0, 0x0000000, 0x0000000, 0x000000}, 166 {0, 0x0000000, 0x0000000, 0x000000}, 167 {0, 0x0000000, 0x0000000, 0x000000}, 168 {0, 0x0000000, 0x0000000, 0x000000}, 169 {0, 0x0000000, 0x0000000, 0x000000}, 170 {0, 0x0000000, 0x0000000, 0x000000}, 171 {0, 0x0000000, 0x0000000, 0x000000}, 172 {0, 0x0000000, 0x0000000, 0x000000}, 173 {0, 0x0000000, 0x0000000, 0x000000}, 174 {1, 0x0af0000, 0x0af2000, 0x17a000} } }, 175 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, 176 {0, 0x0000000, 0x0000000, 0x000000}, 177 {0, 0x0000000, 0x0000000, 0x000000}, 178 {0, 0x0000000, 0x0000000, 0x000000}, 179 {0, 0x0000000, 0x0000000, 0x000000}, 180 {0, 0x0000000, 0x0000000, 0x000000}, 181 {0, 0x0000000, 0x0000000, 0x000000}, 182 {0, 0x0000000, 0x0000000, 0x000000}, 183 {0, 0x0000000, 0x0000000, 0x000000}, 184 {0, 0x0000000, 0x0000000, 0x000000}, 185 {0, 0x0000000, 0x0000000, 0x000000}, 186 {0, 0x0000000, 0x0000000, 0x000000}, 187 {0, 0x0000000, 0x0000000, 0x000000}, 188 {0, 0x0000000, 0x0000000, 0x000000}, 189 {0, 0x0000000, 0x0000000, 0x000000}, 190 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, 191 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } }, 192 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } }, 193 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } }, 194 {{{1, 0x0f00000, 0x0f01000, 0x164000} } }, 195 {{{0, 0x1000000, 0x1004000, 0x1a8000} } }, 196 {{{1, 0x1100000, 0x1101000, 0x160000} } }, 197 {{{1, 0x1200000, 0x1201000, 0x161000} } }, 198 {{{1, 0x1300000, 0x1301000, 0x162000} } }, 199 {{{1, 0x1400000, 0x1401000, 0x163000} } }, 200 {{{1, 0x1500000, 0x1501000, 0x165000} } }, 201 {{{1, 0x1600000, 0x1601000, 0x166000} } }, 202 {{{0, 0, 0, 0} } }, 203 {{{0, 0, 0, 0} } }, 204 {{{0, 0, 0, 0} } }, 205 {{{0, 0, 0, 0} } }, 206 {{{0, 0, 0, 0} } }, 207 {{{0, 0, 0, 0} } }, 208 {{{1, 0x1d00000, 0x1d10000, 0x190000} } }, 209 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } }, 210 {{{1, 0x1f00000, 0x1f10000, 0x150000} } }, 211 {{{0} } }, 212 {{{1, 0x2100000, 0x2102000, 0x120000}, 213 {1, 0x2110000, 0x2120000, 0x130000}, 214 {1, 0x2120000, 0x2122000, 0x124000}, 215 {1, 0x2130000, 0x2132000, 0x126000}, 216 {1, 0x2140000, 0x2142000, 0x128000}, 217 {1, 0x2150000, 0x2152000, 0x12a000}, 218 {1, 0x2160000, 0x2170000, 0x110000}, 219 {1, 0x2170000, 0x2172000, 0x12e000}, 220 {0, 0x0000000, 0x0000000, 0x000000}, 221 {0, 0x0000000, 0x0000000, 0x000000}, 222 {0, 0x0000000, 0x0000000, 0x000000}, 223 {0, 0x0000000, 0x0000000, 0x000000}, 224 {0, 0x0000000, 0x0000000, 0x000000}, 225 {0, 0x0000000, 0x0000000, 0x000000}, 226 {0, 0x0000000, 0x0000000, 0x000000}, 227 {0, 0x0000000, 0x0000000, 0x000000} } }, 228 {{{1, 0x2200000, 0x2204000, 0x1b0000} } }, 229 {{{0} } }, 230 {{{0} } }, 231 {{{0} } }, 232 {{{0} } }, 233 {{{0} } }, 234 {{{1, 0x2800000, 0x2804000, 0x1a4000} } }, 235 {{{1, 0x2900000, 0x2901000, 0x16b000} } }, 236 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } }, 237 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } }, 238 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } }, 239 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } }, 240 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } }, 241 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } }, 242 {{{1, 0x3000000, 0x3000400, 0x1adc00} } }, 243 {{{0, 0x3100000, 0x3104000, 0x1a8000} } }, 244 {{{1, 0x3200000, 0x3204000, 0x1d4000} } }, 245 {{{1, 0x3300000, 0x3304000, 0x1a0000} } }, 246 {{{0} } }, 247 {{{1, 0x3500000, 0x3500400, 0x1ac000} } }, 248 {{{1, 0x3600000, 0x3600400, 0x1ae000} } }, 249 {{{1, 0x3700000, 0x3700400, 0x1ae400} } }, 250 {{{1, 0x3800000, 0x3804000, 0x1d0000} } }, 251 {{{1, 0x3900000, 0x3904000, 0x1b4000} } }, 252 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } }, 253 {{{0} } }, 254 {{{0} } }, 255 {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } }, 256 {{{1, 0x3e00000, 0x3e01000, 0x167000} } }, 257 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } 258 }; 259 260 /* 261 * top 12 bits of crb internal address (hub, agent) 262 */ 263 unsigned qla82xx_crb_hub_agt[64] = { 264 0, 265 QLA82XX_HW_CRB_HUB_AGT_ADR_PS, 266 QLA82XX_HW_CRB_HUB_AGT_ADR_MN, 267 QLA82XX_HW_CRB_HUB_AGT_ADR_MS, 268 0, 269 QLA82XX_HW_CRB_HUB_AGT_ADR_SRE, 270 QLA82XX_HW_CRB_HUB_AGT_ADR_NIU, 271 QLA82XX_HW_CRB_HUB_AGT_ADR_QMN, 272 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0, 273 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1, 274 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2, 275 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3, 276 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, 277 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, 278 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, 279 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4, 280 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, 281 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0, 282 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1, 283 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2, 284 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3, 285 QLA82XX_HW_CRB_HUB_AGT_ADR_PGND, 286 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI, 287 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0, 288 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1, 289 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2, 290 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3, 291 0, 292 QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI, 293 QLA82XX_HW_CRB_HUB_AGT_ADR_SN, 294 0, 295 QLA82XX_HW_CRB_HUB_AGT_ADR_EG, 296 0, 297 QLA82XX_HW_CRB_HUB_AGT_ADR_PS, 298 QLA82XX_HW_CRB_HUB_AGT_ADR_CAM, 299 0, 300 0, 301 0, 302 0, 303 0, 304 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, 305 0, 306 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1, 307 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2, 308 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3, 309 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4, 310 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5, 311 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6, 312 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7, 313 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, 314 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, 315 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, 316 0, 317 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0, 318 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8, 319 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9, 320 QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0, 321 0, 322 QLA82XX_HW_CRB_HUB_AGT_ADR_SMB, 323 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0, 324 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1, 325 0, 326 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC, 327 0, 328 }; 329 330 /* Device states */ 331 char *qdev_state[] = { 332 "Unknown", 333 "Cold", 334 "Initializing", 335 "Ready", 336 "Need Reset", 337 "Need Quiescent", 338 "Failed", 339 "Quiescent", 340 }; 341 342 /* 343 * In: 'off' is offset from CRB space in 128M pci map 344 * Out: 'off' is 2M pci map addr 345 * side effect: lock crb window 346 */ 347 static void 348 qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) 349 { 350 u32 win_read; 351 352 ha->crb_win = CRB_HI(*off); 353 writel(ha->crb_win, 354 (void *)(CRB_WINDOW_2M + ha->nx_pcibase)); 355 356 /* Read back value to make sure write has gone through before trying 357 * to use it. 358 */ 359 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase)); 360 if (win_read != ha->crb_win) { 361 DEBUG2(qla_printk(KERN_INFO, ha, 362 "%s: Written crbwin (0x%x) != Read crbwin (0x%x), " 363 "off=0x%lx\n", __func__, ha->crb_win, win_read, *off)); 364 } 365 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; 366 } 367 368 static inline unsigned long 369 qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) 370 { 371 /* See if we are currently pointing to the region we want to use next */ 372 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) { 373 /* No need to change window. PCIX and PCIEregs are in both 374 * regs are in both windows. 375 */ 376 return off; 377 } 378 379 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) { 380 /* We are in first CRB window */ 381 if (ha->curr_window != 0) 382 WARN_ON(1); 383 return off; 384 } 385 386 if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) { 387 /* We are in second CRB window */ 388 off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST; 389 390 if (ha->curr_window != 1) 391 return off; 392 393 /* We are in the QM or direct access 394 * register region - do nothing 395 */ 396 if ((off >= QLA82XX_PCI_DIRECT_CRB) && 397 (off < QLA82XX_PCI_CAMQM_MAX)) 398 return off; 399 } 400 /* strange address given */ 401 qla_printk(KERN_WARNING, ha, 402 "%s: Warning: unm_nic_pci_set_crbwindow called with" 403 " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off); 404 return off; 405 } 406 407 static int 408 qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off) 409 { 410 struct crb_128M_2M_sub_block_map *m; 411 412 if (*off >= QLA82XX_CRB_MAX) 413 return -1; 414 415 if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) { 416 *off = (*off - QLA82XX_PCI_CAMQM) + 417 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; 418 return 0; 419 } 420 421 if (*off < QLA82XX_PCI_CRBSPACE) 422 return -1; 423 424 *off -= QLA82XX_PCI_CRBSPACE; 425 426 /* Try direct map */ 427 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; 428 429 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { 430 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase; 431 return 0; 432 } 433 /* Not in direct map, use crb window */ 434 return 1; 435 } 436 437 #define CRB_WIN_LOCK_TIMEOUT 100000000 438 static int qla82xx_crb_win_lock(struct qla_hw_data *ha) 439 { 440 int done = 0, timeout = 0; 441 442 while (!done) { 443 /* acquire semaphore3 from PCI HW block */ 444 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); 445 if (done == 1) 446 break; 447 if (timeout >= CRB_WIN_LOCK_TIMEOUT) 448 return -1; 449 timeout++; 450 } 451 qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum); 452 return 0; 453 } 454 455 int 456 qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data) 457 { 458 unsigned long flags = 0; 459 int rv; 460 461 rv = qla82xx_pci_get_crb_addr_2M(ha, &off); 462 463 BUG_ON(rv == -1); 464 465 if (rv == 1) { 466 write_lock_irqsave(&ha->hw_lock, flags); 467 qla82xx_crb_win_lock(ha); 468 qla82xx_pci_set_crbwindow_2M(ha, &off); 469 } 470 471 writel(data, (void __iomem *)off); 472 473 if (rv == 1) { 474 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); 475 write_unlock_irqrestore(&ha->hw_lock, flags); 476 } 477 return 0; 478 } 479 480 int 481 qla82xx_rd_32(struct qla_hw_data *ha, ulong off) 482 { 483 unsigned long flags = 0; 484 int rv; 485 u32 data; 486 487 rv = qla82xx_pci_get_crb_addr_2M(ha, &off); 488 489 BUG_ON(rv == -1); 490 491 if (rv == 1) { 492 write_lock_irqsave(&ha->hw_lock, flags); 493 qla82xx_crb_win_lock(ha); 494 qla82xx_pci_set_crbwindow_2M(ha, &off); 495 } 496 data = RD_REG_DWORD((void __iomem *)off); 497 498 if (rv == 1) { 499 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); 500 write_unlock_irqrestore(&ha->hw_lock, flags); 501 } 502 return data; 503 } 504 505 #define IDC_LOCK_TIMEOUT 100000000 506 int qla82xx_idc_lock(struct qla_hw_data *ha) 507 { 508 int i; 509 int done = 0, timeout = 0; 510 511 while (!done) { 512 /* acquire semaphore5 from PCI HW block */ 513 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK)); 514 if (done == 1) 515 break; 516 if (timeout >= IDC_LOCK_TIMEOUT) 517 return -1; 518 519 timeout++; 520 521 /* Yield CPU */ 522 if (!in_interrupt()) 523 schedule(); 524 else { 525 for (i = 0; i < 20; i++) 526 cpu_relax(); 527 } 528 } 529 530 return 0; 531 } 532 533 void qla82xx_idc_unlock(struct qla_hw_data *ha) 534 { 535 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); 536 } 537 538 /* PCI Windowing for DDR regions. */ 539 #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \ 540 (((addr) <= (high)) && ((addr) >= (low))) 541 /* 542 * check memory access boundary. 543 * used by test agent. support ddr access only for now 544 */ 545 static unsigned long 546 qla82xx_pci_mem_bound_check(struct qla_hw_data *ha, 547 unsigned long long addr, int size) 548 { 549 if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 550 QLA82XX_ADDR_DDR_NET_MAX) || 551 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET, 552 QLA82XX_ADDR_DDR_NET_MAX) || 553 ((size != 1) && (size != 2) && (size != 4) && (size != 8))) 554 return 0; 555 else 556 return 1; 557 } 558 559 int qla82xx_pci_set_window_warning_count; 560 561 static unsigned long 562 qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) 563 { 564 int window; 565 u32 win_read; 566 567 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 568 QLA82XX_ADDR_DDR_NET_MAX)) { 569 /* DDR network side */ 570 window = MN_WIN(addr); 571 ha->ddr_mn_window = window; 572 qla82xx_wr_32(ha, 573 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); 574 win_read = qla82xx_rd_32(ha, 575 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); 576 if ((win_read << 17) != window) { 577 qla_printk(KERN_WARNING, ha, 578 "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n", 579 __func__, window, win_read); 580 } 581 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; 582 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, 583 QLA82XX_ADDR_OCM0_MAX)) { 584 unsigned int temp1; 585 if ((addr & 0x00ff800) == 0xff800) { 586 qla_printk(KERN_WARNING, ha, 587 "%s: QM access not handled.\n", __func__); 588 addr = -1UL; 589 } 590 window = OCM_WIN(addr); 591 ha->ddr_mn_window = window; 592 qla82xx_wr_32(ha, 593 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); 594 win_read = qla82xx_rd_32(ha, 595 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); 596 temp1 = ((window & 0x1FF) << 7) | 597 ((window & 0x0FFFE0000) >> 17); 598 if (win_read != temp1) { 599 qla_printk(KERN_WARNING, ha, 600 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n", 601 __func__, temp1, win_read); 602 } 603 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; 604 605 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, 606 QLA82XX_P3_ADDR_QDR_NET_MAX)) { 607 /* QDR network side */ 608 window = MS_WIN(addr); 609 ha->qdr_sn_window = window; 610 qla82xx_wr_32(ha, 611 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window); 612 win_read = qla82xx_rd_32(ha, 613 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); 614 if (win_read != window) { 615 qla_printk(KERN_WARNING, ha, 616 "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n", 617 __func__, window, win_read); 618 } 619 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; 620 } else { 621 /* 622 * peg gdb frequently accesses memory that doesn't exist, 623 * this limits the chit chat so debugging isn't slowed down. 624 */ 625 if ((qla82xx_pci_set_window_warning_count++ < 8) || 626 (qla82xx_pci_set_window_warning_count%64 == 0)) { 627 qla_printk(KERN_WARNING, ha, 628 "%s: Warning:%s Unknown address range!\n", __func__, 629 QLA2XXX_DRIVER_NAME); 630 } 631 addr = -1UL; 632 } 633 return addr; 634 } 635 636 /* check if address is in the same windows as the previous access */ 637 static int qla82xx_pci_is_same_window(struct qla_hw_data *ha, 638 unsigned long long addr) 639 { 640 int window; 641 unsigned long long qdr_max; 642 643 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; 644 645 /* DDR network side */ 646 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 647 QLA82XX_ADDR_DDR_NET_MAX)) 648 BUG(); 649 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, 650 QLA82XX_ADDR_OCM0_MAX)) 651 return 1; 652 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1, 653 QLA82XX_ADDR_OCM1_MAX)) 654 return 1; 655 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) { 656 /* QDR network side */ 657 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f; 658 if (ha->qdr_sn_window == window) 659 return 1; 660 } 661 return 0; 662 } 663 664 static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha, 665 u64 off, void *data, int size) 666 { 667 unsigned long flags; 668 void *addr = NULL; 669 int ret = 0; 670 u64 start; 671 uint8_t *mem_ptr = NULL; 672 unsigned long mem_base; 673 unsigned long mem_page; 674 675 write_lock_irqsave(&ha->hw_lock, flags); 676 677 /* 678 * If attempting to access unknown address or straddle hw windows, 679 * do not access. 680 */ 681 start = qla82xx_pci_set_window(ha, off); 682 if ((start == -1UL) || 683 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { 684 write_unlock_irqrestore(&ha->hw_lock, flags); 685 qla_printk(KERN_ERR, ha, 686 "%s out of bound pci memory access. " 687 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); 688 return -1; 689 } 690 691 write_unlock_irqrestore(&ha->hw_lock, flags); 692 mem_base = pci_resource_start(ha->pdev, 0); 693 mem_page = start & PAGE_MASK; 694 /* Map two pages whenever user tries to access addresses in two 695 * consecutive pages. 696 */ 697 if (mem_page != ((start + size - 1) & PAGE_MASK)) 698 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); 699 else 700 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); 701 if (mem_ptr == 0UL) { 702 *(u8 *)data = 0; 703 return -1; 704 } 705 addr = mem_ptr; 706 addr += start & (PAGE_SIZE - 1); 707 write_lock_irqsave(&ha->hw_lock, flags); 708 709 switch (size) { 710 case 1: 711 *(u8 *)data = readb(addr); 712 break; 713 case 2: 714 *(u16 *)data = readw(addr); 715 break; 716 case 4: 717 *(u32 *)data = readl(addr); 718 break; 719 case 8: 720 *(u64 *)data = readq(addr); 721 break; 722 default: 723 ret = -1; 724 break; 725 } 726 write_unlock_irqrestore(&ha->hw_lock, flags); 727 728 if (mem_ptr) 729 iounmap(mem_ptr); 730 return ret; 731 } 732 733 static int 734 qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, 735 u64 off, void *data, int size) 736 { 737 unsigned long flags; 738 void *addr = NULL; 739 int ret = 0; 740 u64 start; 741 uint8_t *mem_ptr = NULL; 742 unsigned long mem_base; 743 unsigned long mem_page; 744 745 write_lock_irqsave(&ha->hw_lock, flags); 746 747 /* 748 * If attempting to access unknown address or straddle hw windows, 749 * do not access. 750 */ 751 start = qla82xx_pci_set_window(ha, off); 752 if ((start == -1UL) || 753 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { 754 write_unlock_irqrestore(&ha->hw_lock, flags); 755 qla_printk(KERN_ERR, ha, 756 "%s out of bound pci memory access. " 757 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); 758 return -1; 759 } 760 761 write_unlock_irqrestore(&ha->hw_lock, flags); 762 mem_base = pci_resource_start(ha->pdev, 0); 763 mem_page = start & PAGE_MASK; 764 /* Map two pages whenever user tries to access addresses in two 765 * consecutive pages. 766 */ 767 if (mem_page != ((start + size - 1) & PAGE_MASK)) 768 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); 769 else 770 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); 771 if (mem_ptr == 0UL) 772 return -1; 773 774 addr = mem_ptr; 775 addr += start & (PAGE_SIZE - 1); 776 write_lock_irqsave(&ha->hw_lock, flags); 777 778 switch (size) { 779 case 1: 780 writeb(*(u8 *)data, addr); 781 break; 782 case 2: 783 writew(*(u16 *)data, addr); 784 break; 785 case 4: 786 writel(*(u32 *)data, addr); 787 break; 788 case 8: 789 writeq(*(u64 *)data, addr); 790 break; 791 default: 792 ret = -1; 793 break; 794 } 795 write_unlock_irqrestore(&ha->hw_lock, flags); 796 if (mem_ptr) 797 iounmap(mem_ptr); 798 return ret; 799 } 800 801 #define MTU_FUDGE_FACTOR 100 802 static unsigned long 803 qla82xx_decode_crb_addr(unsigned long addr) 804 { 805 int i; 806 unsigned long base_addr, offset, pci_base; 807 808 if (!qla82xx_crb_table_initialized) 809 qla82xx_crb_addr_transform_setup(); 810 811 pci_base = ADDR_ERROR; 812 base_addr = addr & 0xfff00000; 813 offset = addr & 0x000fffff; 814 815 for (i = 0; i < MAX_CRB_XFORM; i++) { 816 if (crb_addr_xform[i] == base_addr) { 817 pci_base = i << 20; 818 break; 819 } 820 } 821 if (pci_base == ADDR_ERROR) 822 return pci_base; 823 return pci_base + offset; 824 } 825 826 static long rom_max_timeout = 100; 827 static long qla82xx_rom_lock_timeout = 100; 828 829 static int 830 qla82xx_rom_lock(struct qla_hw_data *ha) 831 { 832 int done = 0, timeout = 0; 833 834 while (!done) { 835 /* acquire semaphore2 from PCI HW block */ 836 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); 837 if (done == 1) 838 break; 839 if (timeout >= qla82xx_rom_lock_timeout) 840 return -1; 841 timeout++; 842 } 843 qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); 844 return 0; 845 } 846 847 static int 848 qla82xx_wait_rom_busy(struct qla_hw_data *ha) 849 { 850 long timeout = 0; 851 long done = 0 ; 852 853 while (done == 0) { 854 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 855 done &= 4; 856 timeout++; 857 if (timeout >= rom_max_timeout) { 858 DEBUG(qla_printk(KERN_INFO, ha, 859 "%s: Timeout reached waiting for rom busy", 860 QLA2XXX_DRIVER_NAME)); 861 return -1; 862 } 863 } 864 return 0; 865 } 866 867 static int 868 qla82xx_wait_rom_done(struct qla_hw_data *ha) 869 { 870 long timeout = 0; 871 long done = 0 ; 872 873 while (done == 0) { 874 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 875 done &= 2; 876 timeout++; 877 if (timeout >= rom_max_timeout) { 878 DEBUG(qla_printk(KERN_INFO, ha, 879 "%s: Timeout reached waiting for rom done", 880 QLA2XXX_DRIVER_NAME)); 881 return -1; 882 } 883 } 884 return 0; 885 } 886 887 static int 888 qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 889 { 890 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); 891 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 892 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); 893 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb); 894 qla82xx_wait_rom_busy(ha); 895 if (qla82xx_wait_rom_done(ha)) { 896 qla_printk(KERN_WARNING, ha, 897 "%s: Error waiting for rom done\n", 898 QLA2XXX_DRIVER_NAME); 899 return -1; 900 } 901 /* Reset abyte_cnt and dummy_byte_cnt */ 902 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 903 udelay(10); 904 cond_resched(); 905 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); 906 *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); 907 return 0; 908 } 909 910 static int 911 qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 912 { 913 int ret, loops = 0; 914 915 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 916 udelay(100); 917 schedule(); 918 loops++; 919 } 920 if (loops >= 50000) { 921 qla_printk(KERN_INFO, ha, 922 "%s: qla82xx_rom_lock failed\n", 923 QLA2XXX_DRIVER_NAME); 924 return -1; 925 } 926 ret = qla82xx_do_rom_fast_read(ha, addr, valp); 927 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 928 return ret; 929 } 930 931 static int 932 qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) 933 { 934 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); 935 qla82xx_wait_rom_busy(ha); 936 if (qla82xx_wait_rom_done(ha)) { 937 qla_printk(KERN_WARNING, ha, 938 "Error waiting for rom done\n"); 939 return -1; 940 } 941 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); 942 return 0; 943 } 944 945 static int 946 qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) 947 { 948 long timeout = 0; 949 uint32_t done = 1 ; 950 uint32_t val; 951 int ret = 0; 952 953 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); 954 while ((done != 0) && (ret == 0)) { 955 ret = qla82xx_read_status_reg(ha, &val); 956 done = val & 1; 957 timeout++; 958 udelay(10); 959 cond_resched(); 960 if (timeout >= 50000) { 961 qla_printk(KERN_WARNING, ha, 962 "Timeout reached waiting for write finish"); 963 return -1; 964 } 965 } 966 return ret; 967 } 968 969 static int 970 qla82xx_flash_set_write_enable(struct qla_hw_data *ha) 971 { 972 uint32_t val; 973 qla82xx_wait_rom_busy(ha); 974 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); 975 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN); 976 qla82xx_wait_rom_busy(ha); 977 if (qla82xx_wait_rom_done(ha)) 978 return -1; 979 if (qla82xx_read_status_reg(ha, &val) != 0) 980 return -1; 981 if ((val & 2) != 2) 982 return -1; 983 return 0; 984 } 985 986 static int 987 qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) 988 { 989 if (qla82xx_flash_set_write_enable(ha)) 990 return -1; 991 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val); 992 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1); 993 if (qla82xx_wait_rom_done(ha)) { 994 qla_printk(KERN_WARNING, ha, 995 "Error waiting for rom done\n"); 996 return -1; 997 } 998 return qla82xx_flash_wait_write_finish(ha); 999 } 1000 1001 static int 1002 qla82xx_write_disable_flash(struct qla_hw_data *ha) 1003 { 1004 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); 1005 if (qla82xx_wait_rom_done(ha)) { 1006 qla_printk(KERN_WARNING, ha, 1007 "Error waiting for rom done\n"); 1008 return -1; 1009 } 1010 return 0; 1011 } 1012 1013 static int 1014 ql82xx_rom_lock_d(struct qla_hw_data *ha) 1015 { 1016 int loops = 0; 1017 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 1018 udelay(100); 1019 cond_resched(); 1020 loops++; 1021 } 1022 if (loops >= 50000) { 1023 qla_printk(KERN_WARNING, ha, "ROM lock failed\n"); 1024 return -1; 1025 } 1026 return 0;; 1027 } 1028 1029 static int 1030 qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, 1031 uint32_t data) 1032 { 1033 int ret = 0; 1034 1035 ret = ql82xx_rom_lock_d(ha); 1036 if (ret < 0) { 1037 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 1038 return ret; 1039 } 1040 1041 if (qla82xx_flash_set_write_enable(ha)) 1042 goto done_write; 1043 1044 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data); 1045 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr); 1046 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); 1047 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP); 1048 qla82xx_wait_rom_busy(ha); 1049 if (qla82xx_wait_rom_done(ha)) { 1050 qla_printk(KERN_WARNING, ha, 1051 "Error waiting for rom done\n"); 1052 ret = -1; 1053 goto done_write; 1054 } 1055 1056 ret = qla82xx_flash_wait_write_finish(ha); 1057 1058 done_write: 1059 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 1060 return ret; 1061 } 1062 1063 /* This routine does CRB initialize sequence 1064 * to put the ISP into operational state 1065 */ 1066 static int 1067 qla82xx_pinit_from_rom(scsi_qla_host_t *vha) 1068 { 1069 int addr, val; 1070 int i ; 1071 struct crb_addr_pair *buf; 1072 unsigned long off; 1073 unsigned offset, n; 1074 struct qla_hw_data *ha = vha->hw; 1075 1076 struct crb_addr_pair { 1077 long addr; 1078 long data; 1079 }; 1080 1081 /* Halt all the indiviual PEGs and other blocks of the ISP */ 1082 qla82xx_rom_lock(ha); 1083 1084 /* disable all I2Q */ 1085 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0); 1086 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0); 1087 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0); 1088 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0); 1089 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0); 1090 qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0); 1091 1092 /* disable all niu interrupts */ 1093 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); 1094 /* disable xge rx/tx */ 1095 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); 1096 /* disable xg1 rx/tx */ 1097 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); 1098 /* disable sideband mac */ 1099 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00); 1100 /* disable ap0 mac */ 1101 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00); 1102 /* disable ap1 mac */ 1103 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00); 1104 1105 /* halt sre */ 1106 val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); 1107 qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1))); 1108 1109 /* halt epg */ 1110 qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1); 1111 1112 /* halt timers */ 1113 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0); 1114 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0); 1115 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); 1116 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); 1117 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); 1118 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0); 1119 1120 /* halt pegs */ 1121 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); 1122 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1); 1123 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); 1124 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); 1125 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); 1126 msleep(20); 1127 1128 /* big hammer */ 1129 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 1130 /* don't reset CAM block on reset */ 1131 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); 1132 else 1133 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); 1134 1135 /* reset ms */ 1136 val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4); 1137 val |= (1 << 1); 1138 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val); 1139 msleep(20); 1140 1141 /* unreset ms */ 1142 val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4); 1143 val &= ~(1 << 1); 1144 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val); 1145 msleep(20); 1146 1147 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 1148 1149 /* Read the signature value from the flash. 1150 * Offset 0: Contain signature (0xcafecafe) 1151 * Offset 4: Offset and number of addr/value pairs 1152 * that present in CRB initialize sequence 1153 */ 1154 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || 1155 qla82xx_rom_fast_read(ha, 4, &n) != 0) { 1156 qla_printk(KERN_WARNING, ha, 1157 "[ERROR] Reading crb_init area: n: %08x\n", n); 1158 return -1; 1159 } 1160 1161 /* Offset in flash = lower 16 bits 1162 * Number of enteries = upper 16 bits 1163 */ 1164 offset = n & 0xffffU; 1165 n = (n >> 16) & 0xffffU; 1166 1167 /* number of addr/value pair should not exceed 1024 enteries */ 1168 if (n >= 1024) { 1169 qla_printk(KERN_WARNING, ha, 1170 "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n", 1171 QLA2XXX_DRIVER_NAME, __func__, n); 1172 return -1; 1173 } 1174 1175 qla_printk(KERN_INFO, ha, 1176 "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n); 1177 1178 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL); 1179 if (buf == NULL) { 1180 qla_printk(KERN_WARNING, ha, 1181 "%s: [ERROR] Unable to malloc memory.\n", 1182 QLA2XXX_DRIVER_NAME); 1183 return -1; 1184 } 1185 1186 for (i = 0; i < n; i++) { 1187 if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 || 1188 qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) { 1189 kfree(buf); 1190 return -1; 1191 } 1192 1193 buf[i].addr = addr; 1194 buf[i].data = val; 1195 } 1196 1197 for (i = 0; i < n; i++) { 1198 /* Translate internal CRB initialization 1199 * address to PCI bus address 1200 */ 1201 off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) + 1202 QLA82XX_PCI_CRBSPACE; 1203 /* Not all CRB addr/value pair to be written, 1204 * some of them are skipped 1205 */ 1206 1207 /* skipping cold reboot MAGIC */ 1208 if (off == QLA82XX_CAM_RAM(0x1fc)) 1209 continue; 1210 1211 /* do not reset PCI */ 1212 if (off == (ROMUSB_GLB + 0xbc)) 1213 continue; 1214 1215 /* skip core clock, so that firmware can increase the clock */ 1216 if (off == (ROMUSB_GLB + 0xc8)) 1217 continue; 1218 1219 /* skip the function enable register */ 1220 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION)) 1221 continue; 1222 1223 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2)) 1224 continue; 1225 1226 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB) 1227 continue; 1228 1229 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET) 1230 continue; 1231 1232 if (off == ADDR_ERROR) { 1233 qla_printk(KERN_WARNING, ha, 1234 "%s: [ERROR] Unknown addr: 0x%08lx\n", 1235 QLA2XXX_DRIVER_NAME, buf[i].addr); 1236 continue; 1237 } 1238 1239 qla82xx_wr_32(ha, off, buf[i].data); 1240 1241 /* ISP requires much bigger delay to settle down, 1242 * else crb_window returns 0xffffffff 1243 */ 1244 if (off == QLA82XX_ROMUSB_GLB_SW_RESET) 1245 msleep(1000); 1246 1247 /* ISP requires millisec delay between 1248 * successive CRB register updation 1249 */ 1250 msleep(1); 1251 } 1252 1253 kfree(buf); 1254 1255 /* Resetting the data and instruction cache */ 1256 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e); 1257 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8); 1258 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8); 1259 1260 /* Clear all protocol processing engines */ 1261 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0); 1262 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0); 1263 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0); 1264 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0); 1265 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0); 1266 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0); 1267 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0); 1268 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0); 1269 return 0; 1270 } 1271 1272 static int 1273 qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, 1274 u64 off, void *data, int size) 1275 { 1276 int i, j, ret = 0, loop, sz[2], off0; 1277 int scale, shift_amount, startword; 1278 uint32_t temp; 1279 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; 1280 1281 /* 1282 * If not MN, go check for MS or invalid. 1283 */ 1284 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) 1285 mem_crb = QLA82XX_CRB_QDR_NET; 1286 else { 1287 mem_crb = QLA82XX_CRB_DDR_NET; 1288 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) 1289 return qla82xx_pci_mem_write_direct(ha, 1290 off, data, size); 1291 } 1292 1293 off0 = off & 0x7; 1294 sz[0] = (size < (8 - off0)) ? size : (8 - off0); 1295 sz[1] = size - sz[0]; 1296 1297 off8 = off & 0xfffffff0; 1298 loop = (((off & 0xf) + size - 1) >> 4) + 1; 1299 shift_amount = 4; 1300 scale = 2; 1301 startword = (off & 0xf)/8; 1302 1303 for (i = 0; i < loop; i++) { 1304 if (qla82xx_pci_mem_read_2M(ha, off8 + 1305 (i << shift_amount), &word[i * scale], 8)) 1306 return -1; 1307 } 1308 1309 switch (size) { 1310 case 1: 1311 tmpw = *((uint8_t *)data); 1312 break; 1313 case 2: 1314 tmpw = *((uint16_t *)data); 1315 break; 1316 case 4: 1317 tmpw = *((uint32_t *)data); 1318 break; 1319 case 8: 1320 default: 1321 tmpw = *((uint64_t *)data); 1322 break; 1323 } 1324 1325 if (sz[0] == 8) { 1326 word[startword] = tmpw; 1327 } else { 1328 word[startword] &= 1329 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); 1330 word[startword] |= tmpw << (off0 * 8); 1331 } 1332 if (sz[1] != 0) { 1333 word[startword+1] &= ~(~0ULL << (sz[1] * 8)); 1334 word[startword+1] |= tmpw >> (sz[0] * 8); 1335 } 1336 1337 for (i = 0; i < loop; i++) { 1338 temp = off8 + (i << shift_amount); 1339 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); 1340 temp = 0; 1341 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); 1342 temp = word[i * scale] & 0xffffffff; 1343 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); 1344 temp = (word[i * scale] >> 32) & 0xffffffff; 1345 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); 1346 temp = word[i*scale + 1] & 0xffffffff; 1347 qla82xx_wr_32(ha, mem_crb + 1348 MIU_TEST_AGT_WRDATA_UPPER_LO, temp); 1349 temp = (word[i*scale + 1] >> 32) & 0xffffffff; 1350 qla82xx_wr_32(ha, mem_crb + 1351 MIU_TEST_AGT_WRDATA_UPPER_HI, temp); 1352 1353 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; 1354 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); 1355 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; 1356 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); 1357 1358 for (j = 0; j < MAX_CTL_CHECK; j++) { 1359 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); 1360 if ((temp & MIU_TA_CTL_BUSY) == 0) 1361 break; 1362 } 1363 1364 if (j >= MAX_CTL_CHECK) { 1365 if (printk_ratelimit()) 1366 dev_err(&ha->pdev->dev, 1367 "failed to write through agent\n"); 1368 ret = -1; 1369 break; 1370 } 1371 } 1372 1373 return ret; 1374 } 1375 1376 static int 1377 qla82xx_fw_load_from_flash(struct qla_hw_data *ha) 1378 { 1379 int i; 1380 long size = 0; 1381 long flashaddr = ha->flt_region_bootload << 2; 1382 long memaddr = BOOTLD_START; 1383 u64 data; 1384 u32 high, low; 1385 size = (IMAGE_START - BOOTLD_START) / 8; 1386 1387 for (i = 0; i < size; i++) { 1388 if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) || 1389 (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) { 1390 return -1; 1391 } 1392 data = ((u64)high << 32) | low ; 1393 qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8); 1394 flashaddr += 8; 1395 memaddr += 8; 1396 1397 if (i % 0x1000 == 0) 1398 msleep(1); 1399 } 1400 udelay(100); 1401 read_lock(&ha->hw_lock); 1402 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); 1403 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); 1404 read_unlock(&ha->hw_lock); 1405 return 0; 1406 } 1407 1408 int 1409 qla82xx_pci_mem_read_2M(struct qla_hw_data *ha, 1410 u64 off, void *data, int size) 1411 { 1412 int i, j = 0, k, start, end, loop, sz[2], off0[2]; 1413 int shift_amount; 1414 uint32_t temp; 1415 uint64_t off8, val, mem_crb, word[2] = {0, 0}; 1416 1417 /* 1418 * If not MN, go check for MS or invalid. 1419 */ 1420 1421 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) 1422 mem_crb = QLA82XX_CRB_QDR_NET; 1423 else { 1424 mem_crb = QLA82XX_CRB_DDR_NET; 1425 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) 1426 return qla82xx_pci_mem_read_direct(ha, 1427 off, data, size); 1428 } 1429 1430 off8 = off & 0xfffffff0; 1431 off0[0] = off & 0xf; 1432 sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]); 1433 shift_amount = 4; 1434 loop = ((off0[0] + size - 1) >> shift_amount) + 1; 1435 off0[1] = 0; 1436 sz[1] = size - sz[0]; 1437 1438 for (i = 0; i < loop; i++) { 1439 temp = off8 + (i << shift_amount); 1440 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); 1441 temp = 0; 1442 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp); 1443 temp = MIU_TA_CTL_ENABLE; 1444 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); 1445 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; 1446 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); 1447 1448 for (j = 0; j < MAX_CTL_CHECK; j++) { 1449 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); 1450 if ((temp & MIU_TA_CTL_BUSY) == 0) 1451 break; 1452 } 1453 1454 if (j >= MAX_CTL_CHECK) { 1455 if (printk_ratelimit()) 1456 dev_err(&ha->pdev->dev, 1457 "failed to read through agent\n"); 1458 break; 1459 } 1460 1461 start = off0[i] >> 2; 1462 end = (off0[i] + sz[i] - 1) >> 2; 1463 for (k = start; k <= end; k++) { 1464 temp = qla82xx_rd_32(ha, 1465 mem_crb + MIU_TEST_AGT_RDDATA(k)); 1466 word[i] |= ((uint64_t)temp << (32 * (k & 1))); 1467 } 1468 } 1469 1470 if (j >= MAX_CTL_CHECK) 1471 return -1; 1472 1473 if ((off0[0] & 7) == 0) { 1474 val = word[0]; 1475 } else { 1476 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | 1477 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); 1478 } 1479 1480 switch (size) { 1481 case 1: 1482 *(uint8_t *)data = val; 1483 break; 1484 case 2: 1485 *(uint16_t *)data = val; 1486 break; 1487 case 4: 1488 *(uint32_t *)data = val; 1489 break; 1490 case 8: 1491 *(uint64_t *)data = val; 1492 break; 1493 } 1494 return 0; 1495 } 1496 1497 1498 static struct qla82xx_uri_table_desc * 1499 qla82xx_get_table_desc(const u8 *unirom, int section) 1500 { 1501 uint32_t i; 1502 struct qla82xx_uri_table_desc *directory = 1503 (struct qla82xx_uri_table_desc *)&unirom[0]; 1504 __le32 offset; 1505 __le32 tab_type; 1506 __le32 entries = cpu_to_le32(directory->num_entries); 1507 1508 for (i = 0; i < entries; i++) { 1509 offset = cpu_to_le32(directory->findex) + 1510 (i * cpu_to_le32(directory->entry_size)); 1511 tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8)); 1512 1513 if (tab_type == section) 1514 return (struct qla82xx_uri_table_desc *)&unirom[offset]; 1515 } 1516 1517 return NULL; 1518 } 1519 1520 static struct qla82xx_uri_data_desc * 1521 qla82xx_get_data_desc(struct qla_hw_data *ha, 1522 u32 section, u32 idx_offset) 1523 { 1524 const u8 *unirom = ha->hablob->fw->data; 1525 int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset)); 1526 struct qla82xx_uri_table_desc *tab_desc = NULL; 1527 __le32 offset; 1528 1529 tab_desc = qla82xx_get_table_desc(unirom, section); 1530 if (!tab_desc) 1531 return NULL; 1532 1533 offset = cpu_to_le32(tab_desc->findex) + 1534 (cpu_to_le32(tab_desc->entry_size) * idx); 1535 1536 return (struct qla82xx_uri_data_desc *)&unirom[offset]; 1537 } 1538 1539 static u8 * 1540 qla82xx_get_bootld_offset(struct qla_hw_data *ha) 1541 { 1542 u32 offset = BOOTLD_START; 1543 struct qla82xx_uri_data_desc *uri_desc = NULL; 1544 1545 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { 1546 uri_desc = qla82xx_get_data_desc(ha, 1547 QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF); 1548 if (uri_desc) 1549 offset = cpu_to_le32(uri_desc->findex); 1550 } 1551 1552 return (u8 *)&ha->hablob->fw->data[offset]; 1553 } 1554 1555 static __le32 1556 qla82xx_get_fw_size(struct qla_hw_data *ha) 1557 { 1558 struct qla82xx_uri_data_desc *uri_desc = NULL; 1559 1560 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { 1561 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, 1562 QLA82XX_URI_FIRMWARE_IDX_OFF); 1563 if (uri_desc) 1564 return cpu_to_le32(uri_desc->size); 1565 } 1566 1567 return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]); 1568 } 1569 1570 static u8 * 1571 qla82xx_get_fw_offs(struct qla_hw_data *ha) 1572 { 1573 u32 offset = IMAGE_START; 1574 struct qla82xx_uri_data_desc *uri_desc = NULL; 1575 1576 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { 1577 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, 1578 QLA82XX_URI_FIRMWARE_IDX_OFF); 1579 if (uri_desc) 1580 offset = cpu_to_le32(uri_desc->findex); 1581 } 1582 1583 return (u8 *)&ha->hablob->fw->data[offset]; 1584 } 1585 1586 /* PCI related functions */ 1587 char * 1588 qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str) 1589 { 1590 int pcie_reg; 1591 struct qla_hw_data *ha = vha->hw; 1592 char lwstr[6]; 1593 uint16_t lnk; 1594 1595 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 1596 pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk); 1597 ha->link_width = (lnk >> 4) & 0x3f; 1598 1599 strcpy(str, "PCIe ("); 1600 strcat(str, "2.5Gb/s "); 1601 snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width); 1602 strcat(str, lwstr); 1603 return str; 1604 } 1605 1606 int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) 1607 { 1608 unsigned long val = 0; 1609 u32 control; 1610 1611 switch (region) { 1612 case 0: 1613 val = 0; 1614 break; 1615 case 1: 1616 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control); 1617 val = control + QLA82XX_MSIX_TBL_SPACE; 1618 break; 1619 } 1620 return val; 1621 } 1622 1623 1624 int 1625 qla82xx_iospace_config(struct qla_hw_data *ha) 1626 { 1627 uint32_t len = 0; 1628 1629 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { 1630 qla_printk(KERN_WARNING, ha, 1631 "Failed to reserve selected regions (%s)\n", 1632 pci_name(ha->pdev)); 1633 goto iospace_error_exit; 1634 } 1635 1636 /* Use MMIO operations for all accesses. */ 1637 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 1638 qla_printk(KERN_ERR, ha, 1639 "region #0 not an MMIO resource (%s), aborting\n", 1640 pci_name(ha->pdev)); 1641 goto iospace_error_exit; 1642 } 1643 1644 len = pci_resource_len(ha->pdev, 0); 1645 ha->nx_pcibase = 1646 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len); 1647 if (!ha->nx_pcibase) { 1648 qla_printk(KERN_ERR, ha, 1649 "cannot remap pcibase MMIO (%s), aborting\n", 1650 pci_name(ha->pdev)); 1651 pci_release_regions(ha->pdev); 1652 goto iospace_error_exit; 1653 } 1654 1655 /* Mapping of IO base pointer */ 1656 ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase + 1657 0xbc000 + (ha->pdev->devfn << 11)); 1658 1659 if (!ql2xdbwr) { 1660 ha->nxdb_wr_ptr = 1661 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) + 1662 (ha->pdev->devfn << 12)), 4); 1663 if (!ha->nxdb_wr_ptr) { 1664 qla_printk(KERN_ERR, ha, 1665 "cannot remap MMIO (%s), aborting\n", 1666 pci_name(ha->pdev)); 1667 pci_release_regions(ha->pdev); 1668 goto iospace_error_exit; 1669 } 1670 1671 /* Mapping of IO base pointer, 1672 * door bell read and write pointer 1673 */ 1674 ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) + 1675 (ha->pdev->devfn * 8); 1676 } else { 1677 ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ? 1678 QLA82XX_CAMRAM_DB1 : 1679 QLA82XX_CAMRAM_DB2); 1680 } 1681 1682 ha->max_req_queues = ha->max_rsp_queues = 1; 1683 ha->msix_count = ha->max_rsp_queues + 1; 1684 return 0; 1685 1686 iospace_error_exit: 1687 return -ENOMEM; 1688 } 1689 1690 /* GS related functions */ 1691 1692 /* Initialization related functions */ 1693 1694 /** 1695 * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers. 1696 * @ha: HA context 1697 * 1698 * Returns 0 on success. 1699 */ 1700 int 1701 qla82xx_pci_config(scsi_qla_host_t *vha) 1702 { 1703 struct qla_hw_data *ha = vha->hw; 1704 int ret; 1705 1706 pci_set_master(ha->pdev); 1707 ret = pci_set_mwi(ha->pdev); 1708 ha->chip_revision = ha->pdev->revision; 1709 return 0; 1710 } 1711 1712 /** 1713 * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers. 1714 * @ha: HA context 1715 * 1716 * Returns 0 on success. 1717 */ 1718 void 1719 qla82xx_reset_chip(scsi_qla_host_t *vha) 1720 { 1721 struct qla_hw_data *ha = vha->hw; 1722 ha->isp_ops->disable_intrs(ha); 1723 } 1724 1725 void qla82xx_config_rings(struct scsi_qla_host *vha) 1726 { 1727 struct qla_hw_data *ha = vha->hw; 1728 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 1729 struct init_cb_81xx *icb; 1730 struct req_que *req = ha->req_q_map[0]; 1731 struct rsp_que *rsp = ha->rsp_q_map[0]; 1732 1733 /* Setup ring parameters in initialization control block. */ 1734 icb = (struct init_cb_81xx *)ha->init_cb; 1735 icb->request_q_outpointer = __constant_cpu_to_le16(0); 1736 icb->response_q_inpointer = __constant_cpu_to_le16(0); 1737 icb->request_q_length = cpu_to_le16(req->length); 1738 icb->response_q_length = cpu_to_le16(rsp->length); 1739 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 1740 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); 1741 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1742 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1743 1744 WRT_REG_DWORD((unsigned long __iomem *)®->req_q_out[0], 0); 1745 WRT_REG_DWORD((unsigned long __iomem *)®->rsp_q_in[0], 0); 1746 WRT_REG_DWORD((unsigned long __iomem *)®->rsp_q_out[0], 0); 1747 } 1748 1749 void qla82xx_reset_adapter(struct scsi_qla_host *vha) 1750 { 1751 struct qla_hw_data *ha = vha->hw; 1752 vha->flags.online = 0; 1753 qla2x00_try_to_stop_firmware(vha); 1754 ha->isp_ops->disable_intrs(ha); 1755 } 1756 1757 static int 1758 qla82xx_fw_load_from_blob(struct qla_hw_data *ha) 1759 { 1760 u64 *ptr64; 1761 u32 i, flashaddr, size; 1762 __le64 data; 1763 1764 size = (IMAGE_START - BOOTLD_START) / 8; 1765 1766 ptr64 = (u64 *)qla82xx_get_bootld_offset(ha); 1767 flashaddr = BOOTLD_START; 1768 1769 for (i = 0; i < size; i++) { 1770 data = cpu_to_le64(ptr64[i]); 1771 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) 1772 return -EIO; 1773 flashaddr += 8; 1774 } 1775 1776 flashaddr = FLASH_ADDR_START; 1777 size = (__force u32)qla82xx_get_fw_size(ha) / 8; 1778 ptr64 = (u64 *)qla82xx_get_fw_offs(ha); 1779 1780 for (i = 0; i < size; i++) { 1781 data = cpu_to_le64(ptr64[i]); 1782 1783 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) 1784 return -EIO; 1785 flashaddr += 8; 1786 } 1787 udelay(100); 1788 1789 /* Write a magic value to CAMRAM register 1790 * at a specified offset to indicate 1791 * that all data is written and 1792 * ready for firmware to initialize. 1793 */ 1794 qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC); 1795 1796 read_lock(&ha->hw_lock); 1797 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); 1798 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); 1799 read_unlock(&ha->hw_lock); 1800 return 0; 1801 } 1802 1803 static int 1804 qla82xx_set_product_offset(struct qla_hw_data *ha) 1805 { 1806 struct qla82xx_uri_table_desc *ptab_desc = NULL; 1807 const uint8_t *unirom = ha->hablob->fw->data; 1808 uint32_t i; 1809 __le32 entries; 1810 __le32 flags, file_chiprev, offset; 1811 uint8_t chiprev = ha->chip_revision; 1812 /* Hardcoding mn_present flag for P3P */ 1813 int mn_present = 0; 1814 uint32_t flagbit; 1815 1816 ptab_desc = qla82xx_get_table_desc(unirom, 1817 QLA82XX_URI_DIR_SECT_PRODUCT_TBL); 1818 if (!ptab_desc) 1819 return -1; 1820 1821 entries = cpu_to_le32(ptab_desc->num_entries); 1822 1823 for (i = 0; i < entries; i++) { 1824 offset = cpu_to_le32(ptab_desc->findex) + 1825 (i * cpu_to_le32(ptab_desc->entry_size)); 1826 flags = cpu_to_le32(*((int *)&unirom[offset] + 1827 QLA82XX_URI_FLAGS_OFF)); 1828 file_chiprev = cpu_to_le32(*((int *)&unirom[offset] + 1829 QLA82XX_URI_CHIP_REV_OFF)); 1830 1831 flagbit = mn_present ? 1 : 2; 1832 1833 if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) { 1834 ha->file_prd_off = offset; 1835 return 0; 1836 } 1837 } 1838 return -1; 1839 } 1840 1841 int 1842 qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) 1843 { 1844 __le32 val; 1845 uint32_t min_size; 1846 struct qla_hw_data *ha = vha->hw; 1847 const struct firmware *fw = ha->hablob->fw; 1848 1849 ha->fw_type = fw_type; 1850 1851 if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) { 1852 if (qla82xx_set_product_offset(ha)) 1853 return -EINVAL; 1854 1855 min_size = QLA82XX_URI_FW_MIN_SIZE; 1856 } else { 1857 val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]); 1858 if ((__force u32)val != QLA82XX_BDINFO_MAGIC) 1859 return -EINVAL; 1860 1861 min_size = QLA82XX_FW_MIN_SIZE; 1862 } 1863 1864 if (fw->size < min_size) 1865 return -EINVAL; 1866 return 0; 1867 } 1868 1869 static int 1870 qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) 1871 { 1872 u32 val = 0; 1873 int retries = 60; 1874 1875 do { 1876 read_lock(&ha->hw_lock); 1877 val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE); 1878 read_unlock(&ha->hw_lock); 1879 1880 switch (val) { 1881 case PHAN_INITIALIZE_COMPLETE: 1882 case PHAN_INITIALIZE_ACK: 1883 return QLA_SUCCESS; 1884 case PHAN_INITIALIZE_FAILED: 1885 break; 1886 default: 1887 break; 1888 } 1889 qla_printk(KERN_WARNING, ha, 1890 "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n", 1891 val, retries); 1892 1893 msleep(500); 1894 1895 } while (--retries); 1896 1897 qla_printk(KERN_INFO, ha, 1898 "Cmd Peg initialization failed: 0x%x.\n", val); 1899 1900 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); 1901 read_lock(&ha->hw_lock); 1902 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); 1903 read_unlock(&ha->hw_lock); 1904 return QLA_FUNCTION_FAILED; 1905 } 1906 1907 static int 1908 qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) 1909 { 1910 u32 val = 0; 1911 int retries = 60; 1912 1913 do { 1914 read_lock(&ha->hw_lock); 1915 val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE); 1916 read_unlock(&ha->hw_lock); 1917 1918 switch (val) { 1919 case PHAN_INITIALIZE_COMPLETE: 1920 case PHAN_INITIALIZE_ACK: 1921 return QLA_SUCCESS; 1922 case PHAN_INITIALIZE_FAILED: 1923 break; 1924 default: 1925 break; 1926 } 1927 1928 qla_printk(KERN_WARNING, ha, 1929 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n", 1930 val, retries); 1931 1932 msleep(500); 1933 1934 } while (--retries); 1935 1936 qla_printk(KERN_INFO, ha, 1937 "Rcv Peg initialization failed: 0x%x.\n", val); 1938 read_lock(&ha->hw_lock); 1939 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); 1940 read_unlock(&ha->hw_lock); 1941 return QLA_FUNCTION_FAILED; 1942 } 1943 1944 /* ISR related functions */ 1945 uint32_t qla82xx_isr_int_target_mask_enable[8] = { 1946 ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1, 1947 ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3, 1948 ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5, 1949 ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7 1950 }; 1951 1952 uint32_t qla82xx_isr_int_target_status[8] = { 1953 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, 1954 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, 1955 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, 1956 ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7 1957 }; 1958 1959 static struct qla82xx_legacy_intr_set legacy_intr[] = \ 1960 QLA82XX_LEGACY_INTR_CONFIG; 1961 1962 /* 1963 * qla82xx_mbx_completion() - Process mailbox command completions. 1964 * @ha: SCSI driver HA context 1965 * @mb0: Mailbox0 register 1966 */ 1967 static void 1968 qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 1969 { 1970 uint16_t cnt; 1971 uint16_t __iomem *wptr; 1972 struct qla_hw_data *ha = vha->hw; 1973 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 1974 wptr = (uint16_t __iomem *)®->mailbox_out[1]; 1975 1976 /* Load return mailbox registers. */ 1977 ha->flags.mbox_int = 1; 1978 ha->mailbox_out[0] = mb0; 1979 1980 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 1981 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 1982 wptr++; 1983 } 1984 1985 if (ha->mcp) { 1986 DEBUG3_11(printk(KERN_INFO "%s(%ld): " 1987 "Got mailbox completion. cmd=%x.\n", 1988 __func__, vha->host_no, ha->mcp->mb[0])); 1989 } else { 1990 qla_printk(KERN_INFO, ha, 1991 "%s(%ld): MBX pointer ERROR!\n", 1992 __func__, vha->host_no); 1993 } 1994 } 1995 1996 /* 1997 * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 1998 * @irq: 1999 * @dev_id: SCSI driver HA context 2000 * @regs: 2001 * 2002 * Called by system whenever the host adapter generates an interrupt. 2003 * 2004 * Returns handled flag. 2005 */ 2006 irqreturn_t 2007 qla82xx_intr_handler(int irq, void *dev_id) 2008 { 2009 scsi_qla_host_t *vha; 2010 struct qla_hw_data *ha; 2011 struct rsp_que *rsp; 2012 struct device_reg_82xx __iomem *reg; 2013 int status = 0, status1 = 0; 2014 unsigned long flags; 2015 unsigned long iter; 2016 uint32_t stat; 2017 uint16_t mb[4]; 2018 2019 rsp = (struct rsp_que *) dev_id; 2020 if (!rsp) { 2021 printk(KERN_INFO 2022 "%s(): NULL response queue pointer\n", __func__); 2023 return IRQ_NONE; 2024 } 2025 ha = rsp->hw; 2026 2027 if (!ha->flags.msi_enabled) { 2028 status = qla82xx_rd_32(ha, ISR_INT_VECTOR); 2029 if (!(status & ha->nx_legacy_intr.int_vec_bit)) 2030 return IRQ_NONE; 2031 2032 status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG); 2033 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1)) 2034 return IRQ_NONE; 2035 } 2036 2037 /* clear the interrupt */ 2038 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); 2039 2040 /* read twice to ensure write is flushed */ 2041 qla82xx_rd_32(ha, ISR_INT_VECTOR); 2042 qla82xx_rd_32(ha, ISR_INT_VECTOR); 2043 2044 reg = &ha->iobase->isp82; 2045 2046 spin_lock_irqsave(&ha->hardware_lock, flags); 2047 vha = pci_get_drvdata(ha->pdev); 2048 for (iter = 1; iter--; ) { 2049 2050 if (RD_REG_DWORD(®->host_int)) { 2051 stat = RD_REG_DWORD(®->host_status); 2052 2053 switch (stat & 0xff) { 2054 case 0x1: 2055 case 0x2: 2056 case 0x10: 2057 case 0x11: 2058 qla82xx_mbx_completion(vha, MSW(stat)); 2059 status |= MBX_INTERRUPT; 2060 break; 2061 case 0x12: 2062 mb[0] = MSW(stat); 2063 mb[1] = RD_REG_WORD(®->mailbox_out[1]); 2064 mb[2] = RD_REG_WORD(®->mailbox_out[2]); 2065 mb[3] = RD_REG_WORD(®->mailbox_out[3]); 2066 qla2x00_async_event(vha, rsp, mb); 2067 break; 2068 case 0x13: 2069 qla24xx_process_response_queue(vha, rsp); 2070 break; 2071 default: 2072 DEBUG2(printk("scsi(%ld): " 2073 " Unrecognized interrupt type (%d).\n", 2074 vha->host_no, stat & 0xff)); 2075 break; 2076 } 2077 } 2078 WRT_REG_DWORD(®->host_int, 0); 2079 } 2080 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2081 if (!ha->flags.msi_enabled) 2082 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); 2083 2084 #ifdef QL_DEBUG_LEVEL_17 2085 if (!irq && ha->flags.eeh_busy) 2086 qla_printk(KERN_WARNING, ha, 2087 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", 2088 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2089 #endif 2090 2091 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2092 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2093 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2094 complete(&ha->mbx_intr_comp); 2095 } 2096 return IRQ_HANDLED; 2097 } 2098 2099 irqreturn_t 2100 qla82xx_msix_default(int irq, void *dev_id) 2101 { 2102 scsi_qla_host_t *vha; 2103 struct qla_hw_data *ha; 2104 struct rsp_que *rsp; 2105 struct device_reg_82xx __iomem *reg; 2106 int status = 0; 2107 unsigned long flags; 2108 uint32_t stat; 2109 uint16_t mb[4]; 2110 2111 rsp = (struct rsp_que *) dev_id; 2112 if (!rsp) { 2113 printk(KERN_INFO 2114 "%s(): NULL response queue pointer\n", __func__); 2115 return IRQ_NONE; 2116 } 2117 ha = rsp->hw; 2118 2119 reg = &ha->iobase->isp82; 2120 2121 spin_lock_irqsave(&ha->hardware_lock, flags); 2122 vha = pci_get_drvdata(ha->pdev); 2123 do { 2124 if (RD_REG_DWORD(®->host_int)) { 2125 stat = RD_REG_DWORD(®->host_status); 2126 2127 switch (stat & 0xff) { 2128 case 0x1: 2129 case 0x2: 2130 case 0x10: 2131 case 0x11: 2132 qla82xx_mbx_completion(vha, MSW(stat)); 2133 status |= MBX_INTERRUPT; 2134 break; 2135 case 0x12: 2136 mb[0] = MSW(stat); 2137 mb[1] = RD_REG_WORD(®->mailbox_out[1]); 2138 mb[2] = RD_REG_WORD(®->mailbox_out[2]); 2139 mb[3] = RD_REG_WORD(®->mailbox_out[3]); 2140 qla2x00_async_event(vha, rsp, mb); 2141 break; 2142 case 0x13: 2143 qla24xx_process_response_queue(vha, rsp); 2144 break; 2145 default: 2146 DEBUG2(printk("scsi(%ld): " 2147 " Unrecognized interrupt type (%d).\n", 2148 vha->host_no, stat & 0xff)); 2149 break; 2150 } 2151 } 2152 WRT_REG_DWORD(®->host_int, 0); 2153 } while (0); 2154 2155 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2156 2157 #ifdef QL_DEBUG_LEVEL_17 2158 if (!irq && ha->flags.eeh_busy) 2159 qla_printk(KERN_WARNING, ha, 2160 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", 2161 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2162 #endif 2163 2164 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2165 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2166 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2167 complete(&ha->mbx_intr_comp); 2168 } 2169 return IRQ_HANDLED; 2170 } 2171 2172 irqreturn_t 2173 qla82xx_msix_rsp_q(int irq, void *dev_id) 2174 { 2175 scsi_qla_host_t *vha; 2176 struct qla_hw_data *ha; 2177 struct rsp_que *rsp; 2178 struct device_reg_82xx __iomem *reg; 2179 2180 rsp = (struct rsp_que *) dev_id; 2181 if (!rsp) { 2182 printk(KERN_INFO 2183 "%s(): NULL response queue pointer\n", __func__); 2184 return IRQ_NONE; 2185 } 2186 2187 ha = rsp->hw; 2188 reg = &ha->iobase->isp82; 2189 spin_lock_irq(&ha->hardware_lock); 2190 vha = pci_get_drvdata(ha->pdev); 2191 qla24xx_process_response_queue(vha, rsp); 2192 WRT_REG_DWORD(®->host_int, 0); 2193 spin_unlock_irq(&ha->hardware_lock); 2194 return IRQ_HANDLED; 2195 } 2196 2197 void 2198 qla82xx_poll(int irq, void *dev_id) 2199 { 2200 scsi_qla_host_t *vha; 2201 struct qla_hw_data *ha; 2202 struct rsp_que *rsp; 2203 struct device_reg_82xx __iomem *reg; 2204 int status = 0; 2205 uint32_t stat; 2206 uint16_t mb[4]; 2207 unsigned long flags; 2208 2209 rsp = (struct rsp_que *) dev_id; 2210 if (!rsp) { 2211 printk(KERN_INFO 2212 "%s(): NULL response queue pointer\n", __func__); 2213 return; 2214 } 2215 ha = rsp->hw; 2216 2217 reg = &ha->iobase->isp82; 2218 spin_lock_irqsave(&ha->hardware_lock, flags); 2219 vha = pci_get_drvdata(ha->pdev); 2220 2221 if (RD_REG_DWORD(®->host_int)) { 2222 stat = RD_REG_DWORD(®->host_status); 2223 switch (stat & 0xff) { 2224 case 0x1: 2225 case 0x2: 2226 case 0x10: 2227 case 0x11: 2228 qla82xx_mbx_completion(vha, MSW(stat)); 2229 status |= MBX_INTERRUPT; 2230 break; 2231 case 0x12: 2232 mb[0] = MSW(stat); 2233 mb[1] = RD_REG_WORD(®->mailbox_out[1]); 2234 mb[2] = RD_REG_WORD(®->mailbox_out[2]); 2235 mb[3] = RD_REG_WORD(®->mailbox_out[3]); 2236 qla2x00_async_event(vha, rsp, mb); 2237 break; 2238 case 0x13: 2239 qla24xx_process_response_queue(vha, rsp); 2240 break; 2241 default: 2242 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2243 "(%d).\n", 2244 vha->host_no, stat & 0xff)); 2245 break; 2246 } 2247 } 2248 WRT_REG_DWORD(®->host_int, 0); 2249 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2250 } 2251 2252 void 2253 qla82xx_enable_intrs(struct qla_hw_data *ha) 2254 { 2255 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2256 qla82xx_mbx_intr_enable(vha); 2257 spin_lock_irq(&ha->hardware_lock); 2258 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); 2259 spin_unlock_irq(&ha->hardware_lock); 2260 ha->interrupts_on = 1; 2261 } 2262 2263 void 2264 qla82xx_disable_intrs(struct qla_hw_data *ha) 2265 { 2266 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2267 qla82xx_mbx_intr_disable(vha); 2268 spin_lock_irq(&ha->hardware_lock); 2269 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); 2270 spin_unlock_irq(&ha->hardware_lock); 2271 ha->interrupts_on = 0; 2272 } 2273 2274 void qla82xx_init_flags(struct qla_hw_data *ha) 2275 { 2276 struct qla82xx_legacy_intr_set *nx_legacy_intr; 2277 2278 /* ISP 8021 initializations */ 2279 rwlock_init(&ha->hw_lock); 2280 ha->qdr_sn_window = -1; 2281 ha->ddr_mn_window = -1; 2282 ha->curr_window = 255; 2283 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2284 nx_legacy_intr = &legacy_intr[ha->portnum]; 2285 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; 2286 ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg; 2287 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; 2288 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 2289 } 2290 2291 inline void 2292 qla82xx_set_drv_active(scsi_qla_host_t *vha) 2293 { 2294 uint32_t drv_active; 2295 struct qla_hw_data *ha = vha->hw; 2296 2297 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2298 2299 /* If reset value is all FF's, initialize DRV_ACTIVE */ 2300 if (drv_active == 0xffffffff) { 2301 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, 2302 QLA82XX_DRV_NOT_ACTIVE); 2303 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2304 } 2305 drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); 2306 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 2307 } 2308 2309 inline void 2310 qla82xx_clear_drv_active(struct qla_hw_data *ha) 2311 { 2312 uint32_t drv_active; 2313 2314 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2315 drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); 2316 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 2317 } 2318 2319 static inline int 2320 qla82xx_need_reset(struct qla_hw_data *ha) 2321 { 2322 uint32_t drv_state; 2323 int rval; 2324 2325 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2326 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2327 return rval; 2328 } 2329 2330 static inline void 2331 qla82xx_set_rst_ready(struct qla_hw_data *ha) 2332 { 2333 uint32_t drv_state; 2334 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2335 2336 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2337 2338 /* If reset value is all FF's, initialize DRV_STATE */ 2339 if (drv_state == 0xffffffff) { 2340 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY); 2341 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2342 } 2343 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2344 qla_printk(KERN_INFO, ha, 2345 "%s(%ld):drv_state = 0x%x\n", 2346 __func__, vha->host_no, drv_state); 2347 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 2348 } 2349 2350 static inline void 2351 qla82xx_clear_rst_ready(struct qla_hw_data *ha) 2352 { 2353 uint32_t drv_state; 2354 2355 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2356 drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2357 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 2358 } 2359 2360 static inline void 2361 qla82xx_set_qsnt_ready(struct qla_hw_data *ha) 2362 { 2363 uint32_t qsnt_state; 2364 2365 qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2366 qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); 2367 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); 2368 } 2369 2370 void 2371 qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha) 2372 { 2373 struct qla_hw_data *ha = vha->hw; 2374 uint32_t qsnt_state; 2375 2376 qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2377 qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); 2378 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); 2379 } 2380 2381 static int 2382 qla82xx_load_fw(scsi_qla_host_t *vha) 2383 { 2384 int rst; 2385 struct fw_blob *blob; 2386 struct qla_hw_data *ha = vha->hw; 2387 2388 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { 2389 qla_printk(KERN_ERR, ha, 2390 "%s: Error during CRB Initialization\n", __func__); 2391 return QLA_FUNCTION_FAILED; 2392 } 2393 udelay(500); 2394 2395 /* Bring QM and CAMRAM out of reset */ 2396 rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET); 2397 rst &= ~((1 << 28) | (1 << 24)); 2398 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst); 2399 2400 /* 2401 * FW Load priority: 2402 * 1) Operational firmware residing in flash. 2403 * 2) Firmware via request-firmware interface (.bin file). 2404 */ 2405 if (ql2xfwloadbin == 2) 2406 goto try_blob_fw; 2407 2408 qla_printk(KERN_INFO, ha, 2409 "Attempting to load firmware from flash\n"); 2410 2411 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { 2412 qla_printk(KERN_ERR, ha, 2413 "Firmware loaded successfully from flash\n"); 2414 return QLA_SUCCESS; 2415 } 2416 try_blob_fw: 2417 qla_printk(KERN_INFO, ha, 2418 "Attempting to load firmware from blob\n"); 2419 2420 /* Load firmware blob. */ 2421 blob = ha->hablob = qla2x00_request_firmware(vha); 2422 if (!blob) { 2423 qla_printk(KERN_ERR, ha, 2424 "Firmware image not present.\n"); 2425 goto fw_load_failed; 2426 } 2427 2428 /* Validating firmware blob */ 2429 if (qla82xx_validate_firmware_blob(vha, 2430 QLA82XX_FLASH_ROMIMAGE)) { 2431 /* Fallback to URI format */ 2432 if (qla82xx_validate_firmware_blob(vha, 2433 QLA82XX_UNIFIED_ROMIMAGE)) { 2434 qla_printk(KERN_ERR, ha, 2435 "No valid firmware image found!!!"); 2436 return QLA_FUNCTION_FAILED; 2437 } 2438 } 2439 2440 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { 2441 qla_printk(KERN_ERR, ha, 2442 "%s: Firmware loaded successfully " 2443 " from binary blob\n", __func__); 2444 return QLA_SUCCESS; 2445 } else { 2446 qla_printk(KERN_ERR, ha, 2447 "Firmware load failed from binary blob\n"); 2448 blob->fw = NULL; 2449 blob = NULL; 2450 goto fw_load_failed; 2451 } 2452 return QLA_SUCCESS; 2453 2454 fw_load_failed: 2455 return QLA_FUNCTION_FAILED; 2456 } 2457 2458 int 2459 qla82xx_start_firmware(scsi_qla_host_t *vha) 2460 { 2461 int pcie_cap; 2462 uint16_t lnk; 2463 struct qla_hw_data *ha = vha->hw; 2464 2465 /* scrub dma mask expansion register */ 2466 qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE); 2467 2468 /* Put both the PEG CMD and RCV PEG to default state 2469 * of 0 before resetting the hardware 2470 */ 2471 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0); 2472 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0); 2473 2474 /* Overwrite stale initialization register values */ 2475 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0); 2476 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); 2477 2478 if (qla82xx_load_fw(vha) != QLA_SUCCESS) { 2479 qla_printk(KERN_INFO, ha, 2480 "%s: Error trying to start fw!\n", __func__); 2481 return QLA_FUNCTION_FAILED; 2482 } 2483 2484 /* Handshake with the card before we register the devices. */ 2485 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) { 2486 qla_printk(KERN_INFO, ha, 2487 "%s: Error during card handshake!\n", __func__); 2488 return QLA_FUNCTION_FAILED; 2489 } 2490 2491 /* Negotiated Link width */ 2492 pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); 2493 pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk); 2494 ha->link_width = (lnk >> 4) & 0x3f; 2495 2496 /* Synchronize with Receive peg */ 2497 return qla82xx_check_rcvpeg_state(ha); 2498 } 2499 2500 static inline int 2501 qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, 2502 uint16_t tot_dsds) 2503 { 2504 uint32_t *cur_dsd = NULL; 2505 scsi_qla_host_t *vha; 2506 struct qla_hw_data *ha; 2507 struct scsi_cmnd *cmd; 2508 struct scatterlist *cur_seg; 2509 uint32_t *dsd_seg; 2510 void *next_dsd; 2511 uint8_t avail_dsds; 2512 uint8_t first_iocb = 1; 2513 uint32_t dsd_list_len; 2514 struct dsd_dma *dsd_ptr; 2515 struct ct6_dsd *ctx; 2516 2517 cmd = sp->cmd; 2518 2519 /* Update entry type to indicate Command Type 3 IOCB */ 2520 *((uint32_t *)(&cmd_pkt->entry_type)) = 2521 __constant_cpu_to_le32(COMMAND_TYPE_6); 2522 2523 /* No data transfer */ 2524 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 2525 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 2526 return 0; 2527 } 2528 2529 vha = sp->fcport->vha; 2530 ha = vha->hw; 2531 2532 /* Set transfer direction */ 2533 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 2534 cmd_pkt->control_flags = 2535 __constant_cpu_to_le16(CF_WRITE_DATA); 2536 ha->qla_stats.output_bytes += scsi_bufflen(cmd); 2537 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 2538 cmd_pkt->control_flags = 2539 __constant_cpu_to_le16(CF_READ_DATA); 2540 ha->qla_stats.input_bytes += scsi_bufflen(cmd); 2541 } 2542 2543 cur_seg = scsi_sglist(cmd); 2544 ctx = sp->ctx; 2545 2546 while (tot_dsds) { 2547 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? 2548 QLA_DSDS_PER_IOCB : tot_dsds; 2549 tot_dsds -= avail_dsds; 2550 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; 2551 2552 dsd_ptr = list_first_entry(&ha->gbl_dsd_list, 2553 struct dsd_dma, list); 2554 next_dsd = dsd_ptr->dsd_addr; 2555 list_del(&dsd_ptr->list); 2556 ha->gbl_dsd_avail--; 2557 list_add_tail(&dsd_ptr->list, &ctx->dsd_list); 2558 ctx->dsd_use_cnt++; 2559 ha->gbl_dsd_inuse++; 2560 2561 if (first_iocb) { 2562 first_iocb = 0; 2563 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 2564 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 2565 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 2566 cmd_pkt->fcp_data_dseg_len = dsd_list_len; 2567 } else { 2568 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 2569 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 2570 *cur_dsd++ = dsd_list_len; 2571 } 2572 cur_dsd = (uint32_t *)next_dsd; 2573 while (avail_dsds) { 2574 dma_addr_t sle_dma; 2575 2576 sle_dma = sg_dma_address(cur_seg); 2577 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 2578 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 2579 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); 2580 cur_seg = sg_next(cur_seg); 2581 avail_dsds--; 2582 } 2583 } 2584 2585 /* Null termination */ 2586 *cur_dsd++ = 0; 2587 *cur_dsd++ = 0; 2588 *cur_dsd++ = 0; 2589 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; 2590 return 0; 2591 } 2592 2593 /* 2594 * qla82xx_calc_dsd_lists() - Determine number of DSD list required 2595 * for Command Type 6. 2596 * 2597 * @dsds: number of data segment decriptors needed 2598 * 2599 * Returns the number of dsd list needed to store @dsds. 2600 */ 2601 inline uint16_t 2602 qla82xx_calc_dsd_lists(uint16_t dsds) 2603 { 2604 uint16_t dsd_lists = 0; 2605 2606 dsd_lists = (dsds/QLA_DSDS_PER_IOCB); 2607 if (dsds % QLA_DSDS_PER_IOCB) 2608 dsd_lists++; 2609 return dsd_lists; 2610 } 2611 2612 /* 2613 * qla82xx_start_scsi() - Send a SCSI command to the ISP 2614 * @sp: command to send to the ISP 2615 * 2616 * Returns non-zero if a failure occurred, else zero. 2617 */ 2618 int 2619 qla82xx_start_scsi(srb_t *sp) 2620 { 2621 int ret, nseg; 2622 unsigned long flags; 2623 struct scsi_cmnd *cmd; 2624 uint32_t *clr_ptr; 2625 uint32_t index; 2626 uint32_t handle; 2627 uint16_t cnt; 2628 uint16_t req_cnt; 2629 uint16_t tot_dsds; 2630 struct device_reg_82xx __iomem *reg; 2631 uint32_t dbval; 2632 uint32_t *fcp_dl; 2633 uint8_t additional_cdb_len; 2634 struct ct6_dsd *ctx; 2635 struct scsi_qla_host *vha = sp->fcport->vha; 2636 struct qla_hw_data *ha = vha->hw; 2637 struct req_que *req = NULL; 2638 struct rsp_que *rsp = NULL; 2639 char tag[2]; 2640 2641 /* Setup device pointers. */ 2642 ret = 0; 2643 reg = &ha->iobase->isp82; 2644 cmd = sp->cmd; 2645 req = vha->req; 2646 rsp = ha->rsp_q_map[0]; 2647 2648 /* So we know we haven't pci_map'ed anything yet */ 2649 tot_dsds = 0; 2650 2651 dbval = 0x04 | (ha->portnum << 5); 2652 2653 /* Send marker if required */ 2654 if (vha->marker_needed != 0) { 2655 if (qla2x00_marker(vha, req, 2656 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 2657 return QLA_FUNCTION_FAILED; 2658 vha->marker_needed = 0; 2659 } 2660 2661 /* Acquire ring specific lock */ 2662 spin_lock_irqsave(&ha->hardware_lock, flags); 2663 2664 /* Check for room in outstanding command list. */ 2665 handle = req->current_outstanding_cmd; 2666 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 2667 handle++; 2668 if (handle == MAX_OUTSTANDING_COMMANDS) 2669 handle = 1; 2670 if (!req->outstanding_cmds[handle]) 2671 break; 2672 } 2673 if (index == MAX_OUTSTANDING_COMMANDS) 2674 goto queuing_error; 2675 2676 /* Map the sg table so we have an accurate count of sg entries needed */ 2677 if (scsi_sg_count(cmd)) { 2678 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 2679 scsi_sg_count(cmd), cmd->sc_data_direction); 2680 if (unlikely(!nseg)) 2681 goto queuing_error; 2682 } else 2683 nseg = 0; 2684 2685 tot_dsds = nseg; 2686 2687 if (tot_dsds > ql2xshiftctondsd) { 2688 struct cmd_type_6 *cmd_pkt; 2689 uint16_t more_dsd_lists = 0; 2690 struct dsd_dma *dsd_ptr; 2691 uint16_t i; 2692 2693 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds); 2694 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) 2695 goto queuing_error; 2696 2697 if (more_dsd_lists <= ha->gbl_dsd_avail) 2698 goto sufficient_dsds; 2699 else 2700 more_dsd_lists -= ha->gbl_dsd_avail; 2701 2702 for (i = 0; i < more_dsd_lists; i++) { 2703 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 2704 if (!dsd_ptr) 2705 goto queuing_error; 2706 2707 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 2708 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 2709 if (!dsd_ptr->dsd_addr) { 2710 kfree(dsd_ptr); 2711 goto queuing_error; 2712 } 2713 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 2714 ha->gbl_dsd_avail++; 2715 } 2716 2717 sufficient_dsds: 2718 req_cnt = 1; 2719 2720 if (req->cnt < (req_cnt + 2)) { 2721 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2722 ®->req_q_out[0]); 2723 if (req->ring_index < cnt) 2724 req->cnt = cnt - req->ring_index; 2725 else 2726 req->cnt = req->length - 2727 (req->ring_index - cnt); 2728 } 2729 2730 if (req->cnt < (req_cnt + 2)) 2731 goto queuing_error; 2732 2733 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2734 if (!sp->ctx) { 2735 DEBUG(printk(KERN_INFO 2736 "%s(%ld): failed to allocate" 2737 " ctx.\n", __func__, vha->host_no)); 2738 goto queuing_error; 2739 } 2740 memset(ctx, 0, sizeof(struct ct6_dsd)); 2741 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, 2742 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 2743 if (!ctx->fcp_cmnd) { 2744 DEBUG2_3(printk("%s(%ld): failed to allocate" 2745 " fcp_cmnd.\n", __func__, vha->host_no)); 2746 goto queuing_error_fcp_cmnd; 2747 } 2748 2749 /* Initialize the DSD list and dma handle */ 2750 INIT_LIST_HEAD(&ctx->dsd_list); 2751 ctx->dsd_use_cnt = 0; 2752 2753 if (cmd->cmd_len > 16) { 2754 additional_cdb_len = cmd->cmd_len - 16; 2755 if ((cmd->cmd_len % 4) != 0) { 2756 /* SCSI command bigger than 16 bytes must be 2757 * multiple of 4 2758 */ 2759 goto queuing_error_fcp_cmnd; 2760 } 2761 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 2762 } else { 2763 additional_cdb_len = 0; 2764 ctx->fcp_cmnd_len = 12 + 16 + 4; 2765 } 2766 2767 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; 2768 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2769 2770 /* Zero out remaining portion of packet. */ 2771 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 2772 clr_ptr = (uint32_t *)cmd_pkt + 2; 2773 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2774 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2775 2776 /* Set NPORT-ID and LUN number*/ 2777 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2778 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2779 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2780 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2781 cmd_pkt->vp_index = sp->fcport->vp_idx; 2782 2783 /* Build IOCB segments */ 2784 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 2785 goto queuing_error_fcp_cmnd; 2786 2787 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 2788 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2789 2790 /* 2791 * Update tagged queuing modifier -- default is TSK_SIMPLE (0). 2792 */ 2793 if (scsi_populate_tag_msg(cmd, tag)) { 2794 switch (tag[0]) { 2795 case HEAD_OF_QUEUE_TAG: 2796 ctx->fcp_cmnd->task_attribute = 2797 TSK_HEAD_OF_QUEUE; 2798 break; 2799 case ORDERED_QUEUE_TAG: 2800 ctx->fcp_cmnd->task_attribute = 2801 TSK_ORDERED; 2802 break; 2803 } 2804 } 2805 2806 /* build FCP_CMND IU */ 2807 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2808 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); 2809 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 2810 2811 if (cmd->sc_data_direction == DMA_TO_DEVICE) 2812 ctx->fcp_cmnd->additional_cdb_len |= 1; 2813 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 2814 ctx->fcp_cmnd->additional_cdb_len |= 2; 2815 2816 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 2817 2818 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + 2819 additional_cdb_len); 2820 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); 2821 2822 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); 2823 cmd_pkt->fcp_cmnd_dseg_address[0] = 2824 cpu_to_le32(LSD(ctx->fcp_cmnd_dma)); 2825 cmd_pkt->fcp_cmnd_dseg_address[1] = 2826 cpu_to_le32(MSD(ctx->fcp_cmnd_dma)); 2827 2828 sp->flags |= SRB_FCP_CMND_DMA_VALID; 2829 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2830 /* Set total data segment count. */ 2831 cmd_pkt->entry_count = (uint8_t)req_cnt; 2832 /* Specify response queue number where 2833 * completion should happen 2834 */ 2835 cmd_pkt->entry_status = (uint8_t) rsp->id; 2836 } else { 2837 struct cmd_type_7 *cmd_pkt; 2838 req_cnt = qla24xx_calc_iocbs(tot_dsds); 2839 if (req->cnt < (req_cnt + 2)) { 2840 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2841 ®->req_q_out[0]); 2842 if (req->ring_index < cnt) 2843 req->cnt = cnt - req->ring_index; 2844 else 2845 req->cnt = req->length - 2846 (req->ring_index - cnt); 2847 } 2848 if (req->cnt < (req_cnt + 2)) 2849 goto queuing_error; 2850 2851 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 2852 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2853 2854 /* Zero out remaining portion of packet. */ 2855 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 2856 clr_ptr = (uint32_t *)cmd_pkt + 2; 2857 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2858 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2859 2860 /* Set NPORT-ID and LUN number*/ 2861 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2862 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2863 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2864 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2865 cmd_pkt->vp_index = sp->fcport->vp_idx; 2866 2867 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 2868 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 2869 sizeof(cmd_pkt->lun)); 2870 2871 /* 2872 * Update tagged queuing modifier -- default is TSK_SIMPLE (0). 2873 */ 2874 if (scsi_populate_tag_msg(cmd, tag)) { 2875 switch (tag[0]) { 2876 case HEAD_OF_QUEUE_TAG: 2877 cmd_pkt->task = TSK_HEAD_OF_QUEUE; 2878 break; 2879 case ORDERED_QUEUE_TAG: 2880 cmd_pkt->task = TSK_ORDERED; 2881 break; 2882 } 2883 } 2884 2885 /* Load SCSI command packet. */ 2886 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 2887 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 2888 2889 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2890 2891 /* Build IOCB segments */ 2892 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); 2893 2894 /* Set total data segment count. */ 2895 cmd_pkt->entry_count = (uint8_t)req_cnt; 2896 /* Specify response queue number where 2897 * completion should happen. 2898 */ 2899 cmd_pkt->entry_status = (uint8_t) rsp->id; 2900 2901 } 2902 /* Build command packet. */ 2903 req->current_outstanding_cmd = handle; 2904 req->outstanding_cmds[handle] = sp; 2905 sp->handle = handle; 2906 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 2907 req->cnt -= req_cnt; 2908 wmb(); 2909 2910 /* Adjust ring index. */ 2911 req->ring_index++; 2912 if (req->ring_index == req->length) { 2913 req->ring_index = 0; 2914 req->ring_ptr = req->ring; 2915 } else 2916 req->ring_ptr++; 2917 2918 sp->flags |= SRB_DMA_VALID; 2919 2920 /* Set chip new ring index. */ 2921 /* write, read and verify logic */ 2922 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 2923 if (ql2xdbwr) 2924 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); 2925 else { 2926 WRT_REG_DWORD( 2927 (unsigned long __iomem *)ha->nxdb_wr_ptr, 2928 dbval); 2929 wmb(); 2930 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { 2931 WRT_REG_DWORD( 2932 (unsigned long __iomem *)ha->nxdb_wr_ptr, 2933 dbval); 2934 wmb(); 2935 } 2936 } 2937 2938 /* Manage unprocessed RIO/ZIO commands in response queue. */ 2939 if (vha->flags.process_response_queue && 2940 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 2941 qla24xx_process_response_queue(vha, rsp); 2942 2943 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2944 return QLA_SUCCESS; 2945 2946 queuing_error_fcp_cmnd: 2947 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); 2948 queuing_error: 2949 if (tot_dsds) 2950 scsi_dma_unmap(cmd); 2951 2952 if (sp->ctx) { 2953 mempool_free(sp->ctx, ha->ctx_mempool); 2954 sp->ctx = NULL; 2955 } 2956 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2957 2958 return QLA_FUNCTION_FAILED; 2959 } 2960 2961 static uint32_t * 2962 qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, 2963 uint32_t length) 2964 { 2965 uint32_t i; 2966 uint32_t val; 2967 struct qla_hw_data *ha = vha->hw; 2968 2969 /* Dword reads to flash. */ 2970 for (i = 0; i < length/4; i++, faddr += 4) { 2971 if (qla82xx_rom_fast_read(ha, faddr, &val)) { 2972 qla_printk(KERN_WARNING, ha, 2973 "Do ROM fast read failed\n"); 2974 goto done_read; 2975 } 2976 dwptr[i] = __constant_cpu_to_le32(val); 2977 } 2978 done_read: 2979 return dwptr; 2980 } 2981 2982 static int 2983 qla82xx_unprotect_flash(struct qla_hw_data *ha) 2984 { 2985 int ret; 2986 uint32_t val; 2987 2988 ret = ql82xx_rom_lock_d(ha); 2989 if (ret < 0) { 2990 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 2991 return ret; 2992 } 2993 2994 ret = qla82xx_read_status_reg(ha, &val); 2995 if (ret < 0) 2996 goto done_unprotect; 2997 2998 val &= ~(BLOCK_PROTECT_BITS << 2); 2999 ret = qla82xx_write_status_reg(ha, val); 3000 if (ret < 0) { 3001 val |= (BLOCK_PROTECT_BITS << 2); 3002 qla82xx_write_status_reg(ha, val); 3003 } 3004 3005 if (qla82xx_write_disable_flash(ha) != 0) 3006 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3007 3008 done_unprotect: 3009 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3010 return ret; 3011 } 3012 3013 static int 3014 qla82xx_protect_flash(struct qla_hw_data *ha) 3015 { 3016 int ret; 3017 uint32_t val; 3018 3019 ret = ql82xx_rom_lock_d(ha); 3020 if (ret < 0) { 3021 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3022 return ret; 3023 } 3024 3025 ret = qla82xx_read_status_reg(ha, &val); 3026 if (ret < 0) 3027 goto done_protect; 3028 3029 val |= (BLOCK_PROTECT_BITS << 2); 3030 /* LOCK all sectors */ 3031 ret = qla82xx_write_status_reg(ha, val); 3032 if (ret < 0) 3033 qla_printk(KERN_WARNING, ha, "Write status register failed\n"); 3034 3035 if (qla82xx_write_disable_flash(ha) != 0) 3036 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3037 done_protect: 3038 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3039 return ret; 3040 } 3041 3042 static int 3043 qla82xx_erase_sector(struct qla_hw_data *ha, int addr) 3044 { 3045 int ret = 0; 3046 3047 ret = ql82xx_rom_lock_d(ha); 3048 if (ret < 0) { 3049 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3050 return ret; 3051 } 3052 3053 qla82xx_flash_set_write_enable(ha); 3054 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); 3055 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); 3056 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE); 3057 3058 if (qla82xx_wait_rom_done(ha)) { 3059 qla_printk(KERN_WARNING, ha, 3060 "Error waiting for rom done\n"); 3061 ret = -1; 3062 goto done; 3063 } 3064 ret = qla82xx_flash_wait_write_finish(ha); 3065 done: 3066 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3067 return ret; 3068 } 3069 3070 /* 3071 * Address and length are byte address 3072 */ 3073 uint8_t * 3074 qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, 3075 uint32_t offset, uint32_t length) 3076 { 3077 scsi_block_requests(vha->host); 3078 qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length); 3079 scsi_unblock_requests(vha->host); 3080 return buf; 3081 } 3082 3083 static int 3084 qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, 3085 uint32_t faddr, uint32_t dwords) 3086 { 3087 int ret; 3088 uint32_t liter; 3089 uint32_t sec_mask, rest_addr; 3090 dma_addr_t optrom_dma; 3091 void *optrom = NULL; 3092 int page_mode = 0; 3093 struct qla_hw_data *ha = vha->hw; 3094 3095 ret = -1; 3096 3097 /* Prepare burst-capable write on supported ISPs. */ 3098 if (page_mode && !(faddr & 0xfff) && 3099 dwords > OPTROM_BURST_DWORDS) { 3100 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 3101 &optrom_dma, GFP_KERNEL); 3102 if (!optrom) { 3103 qla_printk(KERN_DEBUG, ha, 3104 "Unable to allocate memory for optrom " 3105 "burst write (%x KB).\n", 3106 OPTROM_BURST_SIZE / 1024); 3107 } 3108 } 3109 3110 rest_addr = ha->fdt_block_size - 1; 3111 sec_mask = ~rest_addr; 3112 3113 ret = qla82xx_unprotect_flash(ha); 3114 if (ret) { 3115 qla_printk(KERN_WARNING, ha, 3116 "Unable to unprotect flash for update.\n"); 3117 goto write_done; 3118 } 3119 3120 for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) { 3121 /* Are we at the beginning of a sector? */ 3122 if ((faddr & rest_addr) == 0) { 3123 3124 ret = qla82xx_erase_sector(ha, faddr); 3125 if (ret) { 3126 DEBUG9(qla_printk(KERN_ERR, ha, 3127 "Unable to erase sector: " 3128 "address=%x.\n", faddr)); 3129 break; 3130 } 3131 } 3132 3133 /* Go with burst-write. */ 3134 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) { 3135 /* Copy data to DMA'ble buffer. */ 3136 memcpy(optrom, dwptr, OPTROM_BURST_SIZE); 3137 3138 ret = qla2x00_load_ram(vha, optrom_dma, 3139 (ha->flash_data_off | faddr), 3140 OPTROM_BURST_DWORDS); 3141 if (ret != QLA_SUCCESS) { 3142 qla_printk(KERN_WARNING, ha, 3143 "Unable to burst-write optrom segment " 3144 "(%x/%x/%llx).\n", ret, 3145 (ha->flash_data_off | faddr), 3146 (unsigned long long)optrom_dma); 3147 qla_printk(KERN_WARNING, ha, 3148 "Reverting to slow-write.\n"); 3149 3150 dma_free_coherent(&ha->pdev->dev, 3151 OPTROM_BURST_SIZE, optrom, optrom_dma); 3152 optrom = NULL; 3153 } else { 3154 liter += OPTROM_BURST_DWORDS - 1; 3155 faddr += OPTROM_BURST_DWORDS - 1; 3156 dwptr += OPTROM_BURST_DWORDS - 1; 3157 continue; 3158 } 3159 } 3160 3161 ret = qla82xx_write_flash_dword(ha, faddr, 3162 cpu_to_le32(*dwptr)); 3163 if (ret) { 3164 DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program" 3165 "flash address=%x data=%x.\n", __func__, 3166 ha->host_no, faddr, *dwptr)); 3167 break; 3168 } 3169 } 3170 3171 ret = qla82xx_protect_flash(ha); 3172 if (ret) 3173 qla_printk(KERN_WARNING, ha, 3174 "Unable to protect flash after update.\n"); 3175 write_done: 3176 if (optrom) 3177 dma_free_coherent(&ha->pdev->dev, 3178 OPTROM_BURST_SIZE, optrom, optrom_dma); 3179 return ret; 3180 } 3181 3182 int 3183 qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, 3184 uint32_t offset, uint32_t length) 3185 { 3186 int rval; 3187 3188 /* Suspend HBA. */ 3189 scsi_block_requests(vha->host); 3190 rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset, 3191 length >> 2); 3192 scsi_unblock_requests(vha->host); 3193 3194 /* Convert return ISP82xx to generic */ 3195 if (rval) 3196 rval = QLA_FUNCTION_FAILED; 3197 else 3198 rval = QLA_SUCCESS; 3199 return rval; 3200 } 3201 3202 void 3203 qla82xx_start_iocbs(srb_t *sp) 3204 { 3205 struct qla_hw_data *ha = sp->fcport->vha->hw; 3206 struct req_que *req = ha->req_q_map[0]; 3207 struct device_reg_82xx __iomem *reg; 3208 uint32_t dbval; 3209 3210 /* Adjust ring index. */ 3211 req->ring_index++; 3212 if (req->ring_index == req->length) { 3213 req->ring_index = 0; 3214 req->ring_ptr = req->ring; 3215 } else 3216 req->ring_ptr++; 3217 3218 reg = &ha->iobase->isp82; 3219 dbval = 0x04 | (ha->portnum << 5); 3220 3221 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 3222 if (ql2xdbwr) 3223 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); 3224 else { 3225 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval); 3226 wmb(); 3227 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { 3228 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, 3229 dbval); 3230 wmb(); 3231 } 3232 } 3233 } 3234 3235 void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) 3236 { 3237 if (qla82xx_rom_lock(ha)) 3238 /* Someone else is holding the lock. */ 3239 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n"); 3240 3241 /* 3242 * Either we got the lock, or someone 3243 * else died while holding it. 3244 * In either case, unlock. 3245 */ 3246 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 3247 } 3248 3249 /* 3250 * qla82xx_device_bootstrap 3251 * Initialize device, set DEV_READY, start fw 3252 * 3253 * Note: 3254 * IDC lock must be held upon entry 3255 * 3256 * Return: 3257 * Success : 0 3258 * Failed : 1 3259 */ 3260 static int 3261 qla82xx_device_bootstrap(scsi_qla_host_t *vha) 3262 { 3263 int rval = QLA_SUCCESS; 3264 int i, timeout; 3265 uint32_t old_count, count; 3266 struct qla_hw_data *ha = vha->hw; 3267 int need_reset = 0, peg_stuck = 1; 3268 3269 need_reset = qla82xx_need_reset(ha); 3270 3271 old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 3272 3273 for (i = 0; i < 10; i++) { 3274 timeout = msleep_interruptible(200); 3275 if (timeout) { 3276 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3277 QLA82XX_DEV_FAILED); 3278 return QLA_FUNCTION_FAILED; 3279 } 3280 3281 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 3282 if (count != old_count) 3283 peg_stuck = 0; 3284 } 3285 3286 if (need_reset) { 3287 /* We are trying to perform a recovery here. */ 3288 if (peg_stuck) 3289 qla82xx_rom_lock_recovery(ha); 3290 goto dev_initialize; 3291 } else { 3292 /* Start of day for this ha context. */ 3293 if (peg_stuck) { 3294 /* Either we are the first or recovery in progress. */ 3295 qla82xx_rom_lock_recovery(ha); 3296 goto dev_initialize; 3297 } else 3298 /* Firmware already running. */ 3299 goto dev_ready; 3300 } 3301 3302 return rval; 3303 3304 dev_initialize: 3305 /* set to DEV_INITIALIZING */ 3306 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); 3307 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING); 3308 3309 /* Driver that sets device state to initializating sets IDC version */ 3310 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); 3311 3312 qla82xx_idc_unlock(ha); 3313 rval = qla82xx_start_firmware(vha); 3314 qla82xx_idc_lock(ha); 3315 3316 if (rval != QLA_SUCCESS) { 3317 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 3318 qla82xx_clear_drv_active(ha); 3319 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); 3320 return rval; 3321 } 3322 3323 dev_ready: 3324 qla_printk(KERN_INFO, ha, "HW State: READY\n"); 3325 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); 3326 3327 return QLA_SUCCESS; 3328 } 3329 3330 /* 3331 * qla82xx_need_qsnt_handler 3332 * Code to start quiescence sequence 3333 * 3334 * Note: 3335 * IDC lock must be held upon entry 3336 * 3337 * Return: void 3338 */ 3339 3340 static void 3341 qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) 3342 { 3343 struct qla_hw_data *ha = vha->hw; 3344 uint32_t dev_state, drv_state, drv_active; 3345 unsigned long reset_timeout; 3346 3347 if (vha->flags.online) { 3348 /*Block any further I/O and wait for pending cmnds to complete*/ 3349 qla82xx_quiescent_state_cleanup(vha); 3350 } 3351 3352 /* Set the quiescence ready bit */ 3353 qla82xx_set_qsnt_ready(ha); 3354 3355 /*wait for 30 secs for other functions to ack */ 3356 reset_timeout = jiffies + (30 * HZ); 3357 3358 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3359 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3360 /* Its 2 that is written when qsnt is acked, moving one bit */ 3361 drv_active = drv_active << 0x01; 3362 3363 while (drv_state != drv_active) { 3364 3365 if (time_after_eq(jiffies, reset_timeout)) { 3366 /* quiescence timeout, other functions didn't ack 3367 * changing the state to DEV_READY 3368 */ 3369 qla_printk(KERN_INFO, ha, 3370 "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME); 3371 qla_printk(KERN_INFO, ha, 3372 "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active, 3373 drv_state); 3374 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3375 QLA82XX_DEV_READY); 3376 qla_printk(KERN_INFO, ha, 3377 "HW State: DEV_READY\n"); 3378 qla82xx_idc_unlock(ha); 3379 qla2x00_perform_loop_resync(vha); 3380 qla82xx_idc_lock(ha); 3381 3382 qla82xx_clear_qsnt_ready(vha); 3383 return; 3384 } 3385 3386 qla82xx_idc_unlock(ha); 3387 msleep(1000); 3388 qla82xx_idc_lock(ha); 3389 3390 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3391 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3392 drv_active = drv_active << 0x01; 3393 } 3394 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3395 /* everyone acked so set the state to DEV_QUIESCENCE */ 3396 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) { 3397 qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n"); 3398 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT); 3399 } 3400 } 3401 3402 /* 3403 * qla82xx_wait_for_state_change 3404 * Wait for device state to change from given current state 3405 * 3406 * Note: 3407 * IDC lock must not be held upon entry 3408 * 3409 * Return: 3410 * Changed device state. 3411 */ 3412 uint32_t 3413 qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state) 3414 { 3415 struct qla_hw_data *ha = vha->hw; 3416 uint32_t dev_state; 3417 3418 do { 3419 msleep(1000); 3420 qla82xx_idc_lock(ha); 3421 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3422 qla82xx_idc_unlock(ha); 3423 } while (dev_state == curr_state); 3424 3425 return dev_state; 3426 } 3427 3428 static void 3429 qla82xx_dev_failed_handler(scsi_qla_host_t *vha) 3430 { 3431 struct qla_hw_data *ha = vha->hw; 3432 3433 /* Disable the board */ 3434 qla_printk(KERN_INFO, ha, "Disabling the board\n"); 3435 3436 qla82xx_idc_lock(ha); 3437 qla82xx_clear_drv_active(ha); 3438 qla82xx_idc_unlock(ha); 3439 3440 /* Set DEV_FAILED flag to disable timer */ 3441 vha->device_flags |= DFLG_DEV_FAILED; 3442 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 3443 qla2x00_mark_all_devices_lost(vha, 0); 3444 vha->flags.online = 0; 3445 vha->flags.init_done = 0; 3446 } 3447 3448 /* 3449 * qla82xx_need_reset_handler 3450 * Code to start reset sequence 3451 * 3452 * Note: 3453 * IDC lock must be held upon entry 3454 * 3455 * Return: 3456 * Success : 0 3457 * Failed : 1 3458 */ 3459 static void 3460 qla82xx_need_reset_handler(scsi_qla_host_t *vha) 3461 { 3462 uint32_t dev_state, drv_state, drv_active; 3463 unsigned long reset_timeout; 3464 struct qla_hw_data *ha = vha->hw; 3465 struct req_que *req = ha->req_q_map[0]; 3466 3467 if (vha->flags.online) { 3468 qla82xx_idc_unlock(ha); 3469 qla2x00_abort_isp_cleanup(vha); 3470 ha->isp_ops->get_flash_version(vha, req->ring); 3471 ha->isp_ops->nvram_config(vha); 3472 qla82xx_idc_lock(ha); 3473 } 3474 3475 qla82xx_set_rst_ready(ha); 3476 3477 /* wait for 10 seconds for reset ack from all functions */ 3478 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); 3479 3480 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3481 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3482 3483 while (drv_state != drv_active) { 3484 if (time_after_eq(jiffies, reset_timeout)) { 3485 qla_printk(KERN_INFO, ha, 3486 "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME); 3487 break; 3488 } 3489 qla82xx_idc_unlock(ha); 3490 msleep(1000); 3491 qla82xx_idc_lock(ha); 3492 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3493 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3494 } 3495 3496 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3497 qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state, 3498 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3499 3500 /* Force to DEV_COLD unless someone else is starting a reset */ 3501 if (dev_state != QLA82XX_DEV_INITIALIZING) { 3502 qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); 3503 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3504 } 3505 } 3506 3507 int 3508 qla82xx_check_fw_alive(scsi_qla_host_t *vha) 3509 { 3510 uint32_t fw_heartbeat_counter; 3511 int status = 0; 3512 3513 fw_heartbeat_counter = qla82xx_rd_32(vha->hw, 3514 QLA82XX_PEG_ALIVE_COUNTER); 3515 /* all 0xff, assume AER/EEH in progress, ignore */ 3516 if (fw_heartbeat_counter == 0xffffffff) 3517 return status; 3518 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { 3519 vha->seconds_since_last_heartbeat++; 3520 /* FW not alive after 2 seconds */ 3521 if (vha->seconds_since_last_heartbeat == 2) { 3522 vha->seconds_since_last_heartbeat = 0; 3523 status = 1; 3524 } 3525 } else 3526 vha->seconds_since_last_heartbeat = 0; 3527 vha->fw_heartbeat_counter = fw_heartbeat_counter; 3528 return status; 3529 } 3530 3531 /* 3532 * qla82xx_device_state_handler 3533 * Main state handler 3534 * 3535 * Note: 3536 * IDC lock must be held upon entry 3537 * 3538 * Return: 3539 * Success : 0 3540 * Failed : 1 3541 */ 3542 int 3543 qla82xx_device_state_handler(scsi_qla_host_t *vha) 3544 { 3545 uint32_t dev_state; 3546 int rval = QLA_SUCCESS; 3547 unsigned long dev_init_timeout; 3548 struct qla_hw_data *ha = vha->hw; 3549 3550 qla82xx_idc_lock(ha); 3551 if (!vha->flags.init_done) 3552 qla82xx_set_drv_active(vha); 3553 3554 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3555 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, 3556 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3557 3558 /* wait for 30 seconds for device to go ready */ 3559 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 3560 3561 while (1) { 3562 3563 if (time_after_eq(jiffies, dev_init_timeout)) { 3564 DEBUG(qla_printk(KERN_INFO, ha, 3565 "%s: device init failed!\n", 3566 QLA2XXX_DRIVER_NAME)); 3567 rval = QLA_FUNCTION_FAILED; 3568 break; 3569 } 3570 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3571 qla_printk(KERN_INFO, ha, 3572 "2:Device state is 0x%x = %s\n", dev_state, 3573 dev_state < MAX_STATES ? 3574 qdev_state[dev_state] : "Unknown"); 3575 3576 switch (dev_state) { 3577 case QLA82XX_DEV_READY: 3578 goto exit; 3579 case QLA82XX_DEV_COLD: 3580 rval = qla82xx_device_bootstrap(vha); 3581 goto exit; 3582 case QLA82XX_DEV_INITIALIZING: 3583 qla82xx_idc_unlock(ha); 3584 msleep(1000); 3585 qla82xx_idc_lock(ha); 3586 break; 3587 case QLA82XX_DEV_NEED_RESET: 3588 qla82xx_need_reset_handler(vha); 3589 dev_init_timeout = jiffies + 3590 (ha->nx_dev_init_timeout * HZ); 3591 break; 3592 case QLA82XX_DEV_NEED_QUIESCENT: 3593 qla82xx_need_qsnt_handler(vha); 3594 /* Reset timeout value after quiescence handler */ 3595 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\ 3596 * HZ); 3597 break; 3598 case QLA82XX_DEV_QUIESCENT: 3599 /* Owner will exit and other will wait for the state 3600 * to get changed 3601 */ 3602 if (ha->flags.quiesce_owner) 3603 goto exit; 3604 3605 qla82xx_idc_unlock(ha); 3606 msleep(1000); 3607 qla82xx_idc_lock(ha); 3608 3609 /* Reset timeout value after quiescence handler */ 3610 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\ 3611 * HZ); 3612 break; 3613 case QLA82XX_DEV_FAILED: 3614 qla82xx_dev_failed_handler(vha); 3615 rval = QLA_FUNCTION_FAILED; 3616 goto exit; 3617 default: 3618 qla82xx_idc_unlock(ha); 3619 msleep(1000); 3620 qla82xx_idc_lock(ha); 3621 } 3622 } 3623 exit: 3624 qla82xx_idc_unlock(ha); 3625 return rval; 3626 } 3627 3628 void qla82xx_watchdog(scsi_qla_host_t *vha) 3629 { 3630 uint32_t dev_state, halt_status; 3631 struct qla_hw_data *ha = vha->hw; 3632 3633 /* don't poll if reset is going on */ 3634 if (!ha->flags.isp82xx_reset_hdlr_active) { 3635 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3636 if (dev_state == QLA82XX_DEV_NEED_RESET && 3637 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 3638 qla_printk(KERN_WARNING, ha, 3639 "%s(): Adapter reset needed!\n", __func__); 3640 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3641 qla2xxx_wake_dpc(vha); 3642 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 3643 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { 3644 DEBUG(qla_printk(KERN_INFO, ha, 3645 "scsi(%ld) %s - detected quiescence needed\n", 3646 vha->host_no, __func__)); 3647 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 3648 qla2xxx_wake_dpc(vha); 3649 } else { 3650 if (qla82xx_check_fw_alive(vha)) { 3651 halt_status = qla82xx_rd_32(ha, 3652 QLA82XX_PEG_HALT_STATUS1); 3653 if (halt_status & HALT_STATUS_UNRECOVERABLE) { 3654 set_bit(ISP_UNRECOVERABLE, 3655 &vha->dpc_flags); 3656 } else { 3657 qla_printk(KERN_INFO, ha, 3658 "scsi(%ld): %s - detect abort needed\n", 3659 vha->host_no, __func__); 3660 set_bit(ISP_ABORT_NEEDED, 3661 &vha->dpc_flags); 3662 } 3663 qla2xxx_wake_dpc(vha); 3664 ha->flags.isp82xx_fw_hung = 1; 3665 if (ha->flags.mbox_busy) { 3666 ha->flags.mbox_int = 1; 3667 DEBUG2(qla_printk(KERN_ERR, ha, 3668 "Due to fw hung, doing premature " 3669 "completion of mbx command\n")); 3670 if (test_bit(MBX_INTR_WAIT, 3671 &ha->mbx_cmd_flags)) 3672 complete(&ha->mbx_intr_comp); 3673 } 3674 } 3675 } 3676 } 3677 } 3678 3679 int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 3680 { 3681 int rval; 3682 rval = qla82xx_device_state_handler(vha); 3683 return rval; 3684 } 3685 3686 /* 3687 * qla82xx_abort_isp 3688 * Resets ISP and aborts all outstanding commands. 3689 * 3690 * Input: 3691 * ha = adapter block pointer. 3692 * 3693 * Returns: 3694 * 0 = success 3695 */ 3696 int 3697 qla82xx_abort_isp(scsi_qla_host_t *vha) 3698 { 3699 int rval; 3700 struct qla_hw_data *ha = vha->hw; 3701 uint32_t dev_state; 3702 3703 if (vha->device_flags & DFLG_DEV_FAILED) { 3704 qla_printk(KERN_WARNING, ha, 3705 "%s(%ld): Device in failed state, " 3706 "Exiting.\n", __func__, vha->host_no); 3707 return QLA_SUCCESS; 3708 } 3709 ha->flags.isp82xx_reset_hdlr_active = 1; 3710 3711 qla82xx_idc_lock(ha); 3712 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3713 if (dev_state == QLA82XX_DEV_READY) { 3714 qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); 3715 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3716 QLA82XX_DEV_NEED_RESET); 3717 } else 3718 qla_printk(KERN_INFO, ha, "HW State: %s\n", 3719 dev_state < MAX_STATES ? 3720 qdev_state[dev_state] : "Unknown"); 3721 qla82xx_idc_unlock(ha); 3722 3723 rval = qla82xx_device_state_handler(vha); 3724 3725 qla82xx_idc_lock(ha); 3726 qla82xx_clear_rst_ready(ha); 3727 qla82xx_idc_unlock(ha); 3728 3729 if (rval == QLA_SUCCESS) { 3730 ha->flags.isp82xx_fw_hung = 0; 3731 ha->flags.isp82xx_reset_hdlr_active = 0; 3732 qla82xx_restart_isp(vha); 3733 } 3734 3735 if (rval) { 3736 vha->flags.online = 1; 3737 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 3738 if (ha->isp_abort_cnt == 0) { 3739 qla_printk(KERN_WARNING, ha, 3740 "ISP error recovery failed - " 3741 "board disabled\n"); 3742 /* 3743 * The next call disables the board 3744 * completely. 3745 */ 3746 ha->isp_ops->reset_adapter(vha); 3747 vha->flags.online = 0; 3748 clear_bit(ISP_ABORT_RETRY, 3749 &vha->dpc_flags); 3750 rval = QLA_SUCCESS; 3751 } else { /* schedule another ISP abort */ 3752 ha->isp_abort_cnt--; 3753 DEBUG(qla_printk(KERN_INFO, ha, 3754 "qla%ld: ISP abort - retry remaining %d\n", 3755 vha->host_no, ha->isp_abort_cnt)); 3756 rval = QLA_FUNCTION_FAILED; 3757 } 3758 } else { 3759 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3760 DEBUG(qla_printk(KERN_INFO, ha, 3761 "(%ld): ISP error recovery - retrying (%d) " 3762 "more times\n", vha->host_no, ha->isp_abort_cnt)); 3763 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3764 rval = QLA_FUNCTION_FAILED; 3765 } 3766 } 3767 return rval; 3768 } 3769 3770 /* 3771 * qla82xx_fcoe_ctx_reset 3772 * Perform a quick reset and aborts all outstanding commands. 3773 * This will only perform an FCoE context reset and avoids a full blown 3774 * chip reset. 3775 * 3776 * Input: 3777 * ha = adapter block pointer. 3778 * is_reset_path = flag for identifying the reset path. 3779 * 3780 * Returns: 3781 * 0 = success 3782 */ 3783 int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha) 3784 { 3785 int rval = QLA_FUNCTION_FAILED; 3786 3787 if (vha->flags.online) { 3788 /* Abort all outstanding commands, so as to be requeued later */ 3789 qla2x00_abort_isp_cleanup(vha); 3790 } 3791 3792 /* Stop currently executing firmware. 3793 * This will destroy existing FCoE context at the F/W end. 3794 */ 3795 qla2x00_try_to_stop_firmware(vha); 3796 3797 /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */ 3798 rval = qla82xx_restart_isp(vha); 3799 3800 return rval; 3801 } 3802 3803 /* 3804 * qla2x00_wait_for_fcoe_ctx_reset 3805 * Wait till the FCoE context is reset. 3806 * 3807 * Note: 3808 * Does context switching here. 3809 * Release SPIN_LOCK (if any) before calling this routine. 3810 * 3811 * Return: 3812 * Success (fcoe_ctx reset is done) : 0 3813 * Failed (fcoe_ctx reset not completed within max loop timout ) : 1 3814 */ 3815 int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) 3816 { 3817 int status = QLA_FUNCTION_FAILED; 3818 unsigned long wait_reset; 3819 3820 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); 3821 while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 3822 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 3823 && time_before(jiffies, wait_reset)) { 3824 3825 set_current_state(TASK_UNINTERRUPTIBLE); 3826 schedule_timeout(HZ); 3827 3828 if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) && 3829 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 3830 status = QLA_SUCCESS; 3831 break; 3832 } 3833 } 3834 DEBUG2(printk(KERN_INFO 3835 "%s status=%d\n", __func__, status)); 3836 3837 return status; 3838 } 3839 3840 void 3841 qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) 3842 { 3843 int i; 3844 unsigned long flags; 3845 struct qla_hw_data *ha = vha->hw; 3846 3847 /* Check if 82XX firmware is alive or not 3848 * We may have arrived here from NEED_RESET 3849 * detection only 3850 */ 3851 if (!ha->flags.isp82xx_fw_hung) { 3852 for (i = 0; i < 2; i++) { 3853 msleep(1000); 3854 if (qla82xx_check_fw_alive(vha)) { 3855 ha->flags.isp82xx_fw_hung = 1; 3856 if (ha->flags.mbox_busy) { 3857 ha->flags.mbox_int = 1; 3858 complete(&ha->mbx_intr_comp); 3859 } 3860 break; 3861 } 3862 } 3863 } 3864 3865 /* Abort all commands gracefully if fw NOT hung */ 3866 if (!ha->flags.isp82xx_fw_hung) { 3867 int cnt, que; 3868 srb_t *sp; 3869 struct req_que *req; 3870 3871 spin_lock_irqsave(&ha->hardware_lock, flags); 3872 for (que = 0; que < ha->max_req_queues; que++) { 3873 req = ha->req_q_map[que]; 3874 if (!req) 3875 continue; 3876 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 3877 sp = req->outstanding_cmds[cnt]; 3878 if (sp) { 3879 if (!sp->ctx || 3880 (sp->flags & SRB_FCP_CMND_DMA_VALID)) { 3881 spin_unlock_irqrestore( 3882 &ha->hardware_lock, flags); 3883 if (ha->isp_ops->abort_command(sp)) { 3884 qla_printk(KERN_INFO, ha, 3885 "scsi(%ld): mbx abort command failed in %s\n", 3886 vha->host_no, __func__); 3887 } else { 3888 qla_printk(KERN_INFO, ha, 3889 "scsi(%ld): mbx abort command success in %s\n", 3890 vha->host_no, __func__); 3891 } 3892 spin_lock_irqsave(&ha->hardware_lock, flags); 3893 } 3894 } 3895 } 3896 } 3897 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3898 3899 /* Wait for pending cmds (physical and virtual) to complete */ 3900 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0, 3901 WAIT_HOST) == QLA_SUCCESS) { 3902 DEBUG2(qla_printk(KERN_INFO, ha, 3903 "Done wait for pending commands\n")); 3904 } 3905 } 3906 } 3907