1 /* 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 #include "bfad_drv.h" 19 #include "bfa_ioc.h" 20 #include "bfi_reg.h" 21 #include "bfa_defs.h" 22 23 BFA_TRC_FILE(CNA, IOC_CT); 24 25 #define bfa_ioc_ct_sync_pos(__ioc) \ 26 ((uint32_t) (1 << bfa_ioc_pcifn(__ioc))) 27 #define BFA_IOC_SYNC_REQD_SH 16 28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) 29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) 30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) 31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \ 32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) 33 34 /* 35 * forward declarations 36 */ 37 static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); 38 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); 39 static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc); 40 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); 41 static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc); 42 static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc); 43 static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc); 44 static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc); 45 static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc); 46 47 static struct bfa_ioc_hwif_s hwif_ct; 48 static struct bfa_ioc_hwif_s hwif_ct2; 49 50 /* 51 * Return true if firmware of current driver matches the running firmware. 52 */ 53 static bfa_boolean_t 54 bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) 55 { 56 enum bfi_ioc_state ioc_fwstate; 57 u32 usecnt; 58 struct bfi_ioc_image_hdr_s fwhdr; 59 60 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 61 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 62 63 /* 64 * If usage count is 0, always return TRUE. 65 */ 66 if (usecnt == 0) { 67 writel(1, ioc->ioc_regs.ioc_usage_reg); 68 readl(ioc->ioc_regs.ioc_usage_sem_reg); 69 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 70 writel(0, ioc->ioc_regs.ioc_fail_sync); 71 bfa_trc(ioc, usecnt); 72 return BFA_TRUE; 73 } 74 75 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 76 bfa_trc(ioc, ioc_fwstate); 77 78 /* 79 * Use count cannot be non-zero and chip in uninitialized state. 80 */ 81 WARN_ON(ioc_fwstate == BFI_IOC_UNINIT); 82 83 /* 84 * Check if another driver with a different firmware is active 85 */ 86 bfa_ioc_fwver_get(ioc, &fwhdr); 87 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { 88 readl(ioc->ioc_regs.ioc_usage_sem_reg); 89 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 90 bfa_trc(ioc, usecnt); 91 return BFA_FALSE; 92 } 93 94 /* 95 * Same firmware version. Increment the reference count. 96 */ 97 usecnt++; 98 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 99 readl(ioc->ioc_regs.ioc_usage_sem_reg); 100 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 101 bfa_trc(ioc, usecnt); 102 return BFA_TRUE; 103 } 104 105 static void 106 bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) 107 { 108 u32 usecnt; 109 110 /* 111 * decrement usage count 112 */ 113 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 114 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 115 WARN_ON(usecnt <= 0); 116 117 usecnt--; 118 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 119 bfa_trc(ioc, usecnt); 120 121 readl(ioc->ioc_regs.ioc_usage_sem_reg); 122 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 123 } 124 125 /* 126 * Notify other functions on HB failure. 127 */ 128 static void 129 bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc) 130 { 131 if (bfa_ioc_is_cna(ioc)) { 132 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); 133 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); 134 /* Wait for halt to take effect */ 135 readl(ioc->ioc_regs.ll_halt); 136 readl(ioc->ioc_regs.alt_ll_halt); 137 } else { 138 writel(~0U, ioc->ioc_regs.err_set); 139 readl(ioc->ioc_regs.err_set); 140 } 141 } 142 143 /* 144 * Host to LPU mailbox message addresses 145 */ 146 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = { 147 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, 148 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, 149 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, 150 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } 151 }; 152 153 /* 154 * Host <-> LPU mailbox command/status registers - port 0 155 */ 156 static struct { u32 hfn, lpu; } ct_p0reg[] = { 157 { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, 158 { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, 159 { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, 160 { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } 161 }; 162 163 /* 164 * Host <-> LPU mailbox command/status registers - port 1 165 */ 166 static struct { u32 hfn, lpu; } ct_p1reg[] = { 167 { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, 168 { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, 169 { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, 170 { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } 171 }; 172 173 static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; } 174 ct2_reg[] = { 175 { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, 176 CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT, 177 CT2_HOSTFN_LPU0_READ_STAT}, 178 { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, 179 CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT, 180 CT2_HOSTFN_LPU1_READ_STAT}, 181 }; 182 183 static void 184 bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) 185 { 186 void __iomem *rb; 187 int pcifn = bfa_ioc_pcifn(ioc); 188 189 rb = bfa_ioc_bar0(ioc); 190 191 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; 192 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; 193 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; 194 195 if (ioc->port_id == 0) { 196 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 197 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 198 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; 199 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; 200 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; 201 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 202 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; 203 } else { 204 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); 205 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 206 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; 207 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; 208 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; 209 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 210 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; 211 } 212 213 /* 214 * PSS control registers 215 */ 216 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); 217 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); 218 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG); 219 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG); 220 221 /* 222 * IOC semaphore registers and serialization 223 */ 224 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); 225 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); 226 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 227 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 228 ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC); 229 230 /* 231 * sram memory access 232 */ 233 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); 234 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; 235 236 /* 237 * err set reg : for notification of hb failure in fcmode 238 */ 239 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 240 } 241 242 static void 243 bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc) 244 { 245 void __iomem *rb; 246 int port = bfa_ioc_portid(ioc); 247 248 rb = bfa_ioc_bar0(ioc); 249 250 ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; 251 ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; 252 ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; 253 ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; 254 ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; 255 ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; 256 257 if (port == 0) { 258 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; 259 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; 260 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; 261 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 262 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; 263 } else { 264 ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG); 265 ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG); 266 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; 267 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 268 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; 269 } 270 271 /* 272 * PSS control registers 273 */ 274 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); 275 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); 276 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG); 277 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG); 278 279 /* 280 * IOC semaphore registers and serialization 281 */ 282 ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG); 283 ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG); 284 ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG); 285 ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT); 286 ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC); 287 288 /* 289 * sram memory access 290 */ 291 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); 292 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; 293 294 /* 295 * err set reg : for notification of hb failure in fcmode 296 */ 297 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 298 } 299 300 /* 301 * Initialize IOC to port mapping. 302 */ 303 304 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) 305 static void 306 bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc) 307 { 308 void __iomem *rb = ioc->pcidev.pci_bar_kva; 309 u32 r32; 310 311 /* 312 * For catapult, base port id on personality register and IOC type 313 */ 314 r32 = readl(rb + FNC_PERS_REG); 315 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); 316 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; 317 318 bfa_trc(ioc, bfa_ioc_pcifn(ioc)); 319 bfa_trc(ioc, ioc->port_id); 320 } 321 322 static void 323 bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc) 324 { 325 void __iomem *rb = ioc->pcidev.pci_bar_kva; 326 u32 r32; 327 328 r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); 329 ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); 330 331 bfa_trc(ioc, bfa_ioc_pcifn(ioc)); 332 bfa_trc(ioc, ioc->port_id); 333 } 334 335 /* 336 * Set interrupt mode for a function: INTX or MSIX 337 */ 338 static void 339 bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) 340 { 341 void __iomem *rb = ioc->pcidev.pci_bar_kva; 342 u32 r32, mode; 343 344 r32 = readl(rb + FNC_PERS_REG); 345 bfa_trc(ioc, r32); 346 347 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & 348 __F0_INTX_STATUS; 349 350 /* 351 * If already in desired mode, do not change anything 352 */ 353 if ((!msix && mode) || (msix && !mode)) 354 return; 355 356 if (msix) 357 mode = __F0_INTX_STATUS_MSIX; 358 else 359 mode = __F0_INTX_STATUS_INTA; 360 361 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 362 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 363 bfa_trc(ioc, r32); 364 365 writel(r32, rb + FNC_PERS_REG); 366 } 367 368 bfa_boolean_t 369 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc) 370 { 371 u32 r32; 372 373 r32 = readl(ioc->ioc_regs.lpu_read_stat); 374 if (r32) { 375 writel(1, ioc->ioc_regs.lpu_read_stat); 376 return BFA_TRUE; 377 } 378 379 return BFA_FALSE; 380 } 381 382 /* 383 * Cleanup hw semaphore and usecnt registers 384 */ 385 static void 386 bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) 387 { 388 389 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 390 writel(0, ioc->ioc_regs.ioc_usage_reg); 391 readl(ioc->ioc_regs.ioc_usage_sem_reg); 392 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 393 394 writel(0, ioc->ioc_regs.ioc_fail_sync); 395 /* 396 * Read the hw sem reg to make sure that it is locked 397 * before we clear it. If it is not locked, writing 1 398 * will lock it instead of clearing it. 399 */ 400 readl(ioc->ioc_regs.ioc_sem_reg); 401 writel(1, ioc->ioc_regs.ioc_sem_reg); 402 } 403 404 static bfa_boolean_t 405 bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc) 406 { 407 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 408 uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); 409 410 /* 411 * Driver load time. If the sync required bit for this PCI fn 412 * is set, it is due to an unclean exit by the driver for this 413 * PCI fn in the previous incarnation. Whoever comes here first 414 * should clean it up, no matter which PCI fn. 415 */ 416 417 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { 418 writel(0, ioc->ioc_regs.ioc_fail_sync); 419 writel(1, ioc->ioc_regs.ioc_usage_reg); 420 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); 421 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); 422 return BFA_TRUE; 423 } 424 425 return bfa_ioc_ct_sync_complete(ioc); 426 } 427 428 /* 429 * Synchronized IOC failure processing routines 430 */ 431 static void 432 bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc) 433 { 434 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 435 uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); 436 437 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); 438 } 439 440 static void 441 bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc) 442 { 443 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 444 uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | 445 bfa_ioc_ct_sync_pos(ioc); 446 447 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); 448 } 449 450 static void 451 bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc) 452 { 453 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 454 455 writel((r32 | bfa_ioc_ct_sync_pos(ioc)), 456 ioc->ioc_regs.ioc_fail_sync); 457 } 458 459 static bfa_boolean_t 460 bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc) 461 { 462 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 463 uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); 464 uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); 465 uint32_t tmp_ackd; 466 467 if (sync_ackd == 0) 468 return BFA_TRUE; 469 470 /* 471 * The check below is to see whether any other PCI fn 472 * has reinitialized the ASIC (reset sync_ackd bits) 473 * and failed again while this IOC was waiting for hw 474 * semaphore (in bfa_iocpf_sm_semwait()). 475 */ 476 tmp_ackd = sync_ackd; 477 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && 478 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) 479 sync_ackd |= bfa_ioc_ct_sync_pos(ioc); 480 481 if (sync_reqd == sync_ackd) { 482 writel(bfa_ioc_ct_clear_sync_ackd(r32), 483 ioc->ioc_regs.ioc_fail_sync); 484 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 485 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); 486 return BFA_TRUE; 487 } 488 489 /* 490 * If another PCI fn reinitialized and failed again while 491 * this IOC was waiting for hw sem, the sync_ackd bit for 492 * this IOC need to be set again to allow reinitialization. 493 */ 494 if (tmp_ackd != sync_ackd) 495 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); 496 497 return BFA_FALSE; 498 } 499 500 /** 501 * Called from bfa_ioc_attach() to map asic specific calls. 502 */ 503 static void 504 bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif) 505 { 506 hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock; 507 hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock; 508 hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail; 509 hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 510 hwif->ioc_sync_start = bfa_ioc_ct_sync_start; 511 hwif->ioc_sync_join = bfa_ioc_ct_sync_join; 512 hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave; 513 hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack; 514 hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete; 515 } 516 517 /** 518 * Called from bfa_ioc_attach() to map asic specific calls. 519 */ 520 void 521 bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc) 522 { 523 bfa_ioc_set_ctx_hwif(ioc, &hwif_ct); 524 525 hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init; 526 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; 527 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; 528 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 529 ioc->ioc_hwif = &hwif_ct; 530 } 531 532 /** 533 * Called from bfa_ioc_attach() to map asic specific calls. 534 */ 535 void 536 bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc) 537 { 538 bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2); 539 540 hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init; 541 hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init; 542 hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port; 543 hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat; 544 hwif_ct2.ioc_isr_mode_set = NULL; 545 ioc->ioc_hwif = &hwif_ct2; 546 } 547 548 /* 549 * Workaround for MSI-X resource allocation for catapult-2 with no asic block 550 */ 551 #define HOSTFN_MSIX_DEFAULT 64 552 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 553 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c 554 #define __MSIX_VT_NUMVT__MK 0x003ff800 555 #define __MSIX_VT_NUMVT__SH 11 556 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH) 557 #define __MSIX_VT_OFST_ 0x000007ff 558 void 559 bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc) 560 { 561 void __iomem *rb = ioc->pcidev.pci_bar_kva; 562 u32 r32; 563 564 r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); 565 if (r32 & __MSIX_VT_NUMVT__MK) { 566 writel(r32 & __MSIX_VT_OFST_, 567 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); 568 return; 569 } 570 571 writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) | 572 HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), 573 rb + HOSTFN_MSIX_VT_OFST_NUMVT); 574 writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), 575 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); 576 } 577 578 bfa_status_t 579 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode) 580 { 581 u32 pll_sclk, pll_fclk, r32; 582 bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC); 583 584 pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | 585 __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | 586 __APP_PLL_SCLK_JITLMT0_1(3U) | 587 __APP_PLL_SCLK_CNTLMT0_1(1U); 588 pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST | 589 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | 590 __APP_PLL_LCLK_JITLMT0_1(3U) | 591 __APP_PLL_LCLK_CNTLMT0_1(1U); 592 593 if (fcmode) { 594 writel(0, (rb + OP_MODE)); 595 writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 | 596 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG)); 597 } else { 598 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); 599 writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG)); 600 } 601 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); 602 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); 603 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 604 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 605 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 606 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 607 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 608 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 609 writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET, 610 rb + APP_PLL_SCLK_CTL_REG); 611 writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET, 612 rb + APP_PLL_LCLK_CTL_REG); 613 writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET | 614 __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); 615 writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET | 616 __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); 617 readl(rb + HOSTFN0_INT_MSK); 618 udelay(2000); 619 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 620 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 621 writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); 622 writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); 623 624 if (!fcmode) { 625 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); 626 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); 627 } 628 r32 = readl((rb + PSS_CTL_REG)); 629 r32 &= ~__PSS_LMEM_RESET; 630 writel(r32, (rb + PSS_CTL_REG)); 631 udelay(1000); 632 if (!fcmode) { 633 writel(0, (rb + PMM_1T_RESET_REG_P0)); 634 writel(0, (rb + PMM_1T_RESET_REG_P1)); 635 } 636 637 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); 638 udelay(1000); 639 r32 = readl((rb + MBIST_STAT_REG)); 640 writel(0, (rb + MBIST_CTL_REG)); 641 return BFA_STATUS_OK; 642 } 643 644 static void 645 bfa_ioc_ct2_sclk_init(void __iomem *rb) 646 { 647 u32 r32; 648 649 /* 650 * put s_clk PLL and PLL FSM in reset 651 */ 652 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 653 r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN); 654 r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS | 655 __APP_PLL_SCLK_LOGIC_SOFT_RESET); 656 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); 657 658 /* 659 * Ignore mode and program for the max clock (which is FC16) 660 * Firmware/NFC will do the PLL init appropiately 661 */ 662 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 663 r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); 664 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); 665 666 /* 667 * while doing PLL init dont clock gate ethernet subsystem 668 */ 669 r32 = readl((rb + CT2_CHIP_MISC_PRG)); 670 writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG)); 671 672 r32 = readl((rb + CT2_PCIE_MISC_REG)); 673 writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG)); 674 675 /* 676 * set sclk value 677 */ 678 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 679 r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL | 680 __APP_PLL_SCLK_CLK_DIV2); 681 writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG)); 682 683 /* 684 * poll for s_clk lock or delay 1ms 685 */ 686 udelay(1000); 687 } 688 689 static void 690 bfa_ioc_ct2_lclk_init(void __iomem *rb) 691 { 692 u32 r32; 693 694 /* 695 * put l_clk PLL and PLL FSM in reset 696 */ 697 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); 698 r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN); 699 r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS | 700 __APP_PLL_LCLK_LOGIC_SOFT_RESET); 701 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); 702 703 /* 704 * set LPU speed (set for FC16 which will work for other modes) 705 */ 706 r32 = readl((rb + CT2_CHIP_MISC_PRG)); 707 writel(r32, (rb + CT2_CHIP_MISC_PRG)); 708 709 /* 710 * set LPU half speed (set for FC16 which will work for other modes) 711 */ 712 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); 713 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); 714 715 /* 716 * set lclk for mode (set for FC16) 717 */ 718 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); 719 r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); 720 r32 |= 0x20c1731b; 721 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); 722 723 /* 724 * poll for s_clk lock or delay 1ms 725 */ 726 udelay(1000); 727 } 728 729 static void 730 bfa_ioc_ct2_mem_init(void __iomem *rb) 731 { 732 u32 r32; 733 734 r32 = readl((rb + PSS_CTL_REG)); 735 r32 &= ~__PSS_LMEM_RESET; 736 writel(r32, (rb + PSS_CTL_REG)); 737 udelay(1000); 738 739 writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); 740 udelay(1000); 741 writel(0, (rb + CT2_MBIST_CTL_REG)); 742 } 743 744 void 745 bfa_ioc_ct2_mac_reset(void __iomem *rb) 746 { 747 /* put port0, port1 MAC & AHB in reset */ 748 writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), 749 rb + CT2_CSI_MAC_CONTROL_REG(0)); 750 writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), 751 rb + CT2_CSI_MAC_CONTROL_REG(1)); 752 } 753 754 static void 755 bfa_ioc_ct2_enable_flash(void __iomem *rb) 756 { 757 u32 r32; 758 759 r32 = readl((rb + PSS_GPIO_OUT_REG)); 760 writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); 761 r32 = readl((rb + PSS_GPIO_OE_REG)); 762 writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); 763 } 764 765 #define CT2_NFC_MAX_DELAY 1000 766 #define CT2_NFC_PAUSE_MAX_DELAY 4000 767 #define CT2_NFC_VER_VALID 0x147 768 #define CT2_NFC_STATE_RUNNING 0x20000001 769 #define BFA_IOC_PLL_POLL 1000000 770 771 static bfa_boolean_t 772 bfa_ioc_ct2_nfc_halted(void __iomem *rb) 773 { 774 u32 r32; 775 776 r32 = readl(rb + CT2_NFC_CSR_SET_REG); 777 if (r32 & __NFC_CONTROLLER_HALTED) 778 return BFA_TRUE; 779 780 return BFA_FALSE; 781 } 782 783 static void 784 bfa_ioc_ct2_nfc_halt(void __iomem *rb) 785 { 786 int i; 787 788 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG); 789 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { 790 if (bfa_ioc_ct2_nfc_halted(rb)) 791 break; 792 udelay(1000); 793 } 794 WARN_ON(!bfa_ioc_ct2_nfc_halted(rb)); 795 } 796 797 static void 798 bfa_ioc_ct2_nfc_resume(void __iomem *rb) 799 { 800 u32 r32; 801 int i; 802 803 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG); 804 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { 805 r32 = readl(rb + CT2_NFC_CSR_SET_REG); 806 if (!(r32 & __NFC_CONTROLLER_HALTED)) 807 return; 808 udelay(1000); 809 } 810 WARN_ON(1); 811 } 812 813 static void 814 bfa_ioc_ct2_clk_reset(void __iomem *rb) 815 { 816 u32 r32; 817 818 bfa_ioc_ct2_sclk_init(rb); 819 bfa_ioc_ct2_lclk_init(rb); 820 821 /* 822 * release soft reset on s_clk & l_clk 823 */ 824 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 825 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, 826 (rb + CT2_APP_PLL_SCLK_CTL_REG)); 827 828 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); 829 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, 830 (rb + CT2_APP_PLL_LCLK_CTL_REG)); 831 832 } 833 834 static void 835 bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb) 836 { 837 u32 r32, i; 838 839 r32 = readl((rb + PSS_CTL_REG)); 840 r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); 841 writel(r32, (rb + PSS_CTL_REG)); 842 843 writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG); 844 845 for (i = 0; i < BFA_IOC_PLL_POLL; i++) { 846 r32 = readl(rb + CT2_NFC_FLASH_STS_REG); 847 848 if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)) 849 break; 850 } 851 WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)); 852 853 for (i = 0; i < BFA_IOC_PLL_POLL; i++) { 854 r32 = readl(rb + CT2_NFC_FLASH_STS_REG); 855 856 if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)) 857 break; 858 } 859 WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS)); 860 861 r32 = readl(rb + CT2_CSI_FW_CTL_REG); 862 WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); 863 } 864 865 static void 866 bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb) 867 { 868 u32 r32; 869 int i; 870 871 if (bfa_ioc_ct2_nfc_halted(rb)) 872 bfa_ioc_ct2_nfc_resume(rb); 873 for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) { 874 r32 = readl(rb + CT2_NFC_STS_REG); 875 if (r32 == CT2_NFC_STATE_RUNNING) 876 return; 877 udelay(1000); 878 } 879 880 r32 = readl(rb + CT2_NFC_STS_REG); 881 WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING)); 882 } 883 884 bfa_status_t 885 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) 886 { 887 u32 wgn, r32, nfc_ver; 888 889 wgn = readl(rb + CT2_WGN_STATUS); 890 891 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { 892 /* 893 * If flash is corrupted, enable flash explicitly 894 */ 895 bfa_ioc_ct2_clk_reset(rb); 896 bfa_ioc_ct2_enable_flash(rb); 897 898 bfa_ioc_ct2_mac_reset(rb); 899 900 bfa_ioc_ct2_clk_reset(rb); 901 bfa_ioc_ct2_enable_flash(rb); 902 903 } else { 904 nfc_ver = readl(rb + CT2_RSC_GPR15_REG); 905 906 if ((nfc_ver >= CT2_NFC_VER_VALID) && 907 (wgn == (__A2T_AHB_LOAD | __WGN_READY))) { 908 909 bfa_ioc_ct2_wait_till_nfc_running(rb); 910 911 bfa_ioc_ct2_nfc_clk_reset(rb); 912 } else { 913 bfa_ioc_ct2_nfc_halt(rb); 914 915 bfa_ioc_ct2_clk_reset(rb); 916 bfa_ioc_ct2_mac_reset(rb); 917 bfa_ioc_ct2_clk_reset(rb); 918 919 } 920 } 921 922 /* 923 * Mask the interrupts and clear any 924 * pending interrupts left by BIOS/EFI 925 */ 926 927 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); 928 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); 929 930 /* For first time initialization, no need to clear interrupts */ 931 r32 = readl(rb + HOST_SEM5_REG); 932 if (r32 & 0x1) { 933 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 934 if (r32 == 1) { 935 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); 936 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 937 } 938 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); 939 if (r32 == 1) { 940 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); 941 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); 942 } 943 } 944 945 bfa_ioc_ct2_mem_init(rb); 946 947 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); 948 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); 949 950 return BFA_STATUS_OK; 951 } 952