1 /* 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 3 * All rights reserved 4 * www.brocade.com 5 * 6 * Linux driver for Brocade Fibre Channel Host Bus Adapter. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License (GPL) Version 2 as 10 * published by the Free Software Foundation 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 #include "bfad_drv.h" 19 #include "bfa_ioc.h" 20 #include "bfi_reg.h" 21 #include "bfa_defs.h" 22 23 BFA_TRC_FILE(CNA, IOC_CT); 24 25 #define bfa_ioc_ct_sync_pos(__ioc) \ 26 ((uint32_t) (1 << bfa_ioc_pcifn(__ioc))) 27 #define BFA_IOC_SYNC_REQD_SH 16 28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) 29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) 30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) 31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \ 32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) 33 34 /* 35 * forward declarations 36 */ 37 static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); 38 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); 39 static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc); 40 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); 41 static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc); 42 static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc); 43 static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc); 44 static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc); 45 static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc); 46 47 static struct bfa_ioc_hwif_s hwif_ct; 48 static struct bfa_ioc_hwif_s hwif_ct2; 49 50 /* 51 * Return true if firmware of current driver matches the running firmware. 52 */ 53 static bfa_boolean_t 54 bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) 55 { 56 enum bfi_ioc_state ioc_fwstate; 57 u32 usecnt; 58 struct bfi_ioc_image_hdr_s fwhdr; 59 60 /* 61 * Firmware match check is relevant only for CNA. 62 */ 63 if (!bfa_ioc_is_cna(ioc)) 64 return BFA_TRUE; 65 66 /* 67 * If bios boot (flash based) -- do not increment usage count 68 */ 69 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < 70 BFA_IOC_FWIMG_MINSZ) 71 return BFA_TRUE; 72 73 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 74 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 75 76 /* 77 * If usage count is 0, always return TRUE. 78 */ 79 if (usecnt == 0) { 80 writel(1, ioc->ioc_regs.ioc_usage_reg); 81 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 82 writel(0, ioc->ioc_regs.ioc_fail_sync); 83 bfa_trc(ioc, usecnt); 84 return BFA_TRUE; 85 } 86 87 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 88 bfa_trc(ioc, ioc_fwstate); 89 90 /* 91 * Use count cannot be non-zero and chip in uninitialized state. 92 */ 93 WARN_ON(ioc_fwstate == BFI_IOC_UNINIT); 94 95 /* 96 * Check if another driver with a different firmware is active 97 */ 98 bfa_ioc_fwver_get(ioc, &fwhdr); 99 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { 100 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 101 bfa_trc(ioc, usecnt); 102 return BFA_FALSE; 103 } 104 105 /* 106 * Same firmware version. Increment the reference count. 107 */ 108 usecnt++; 109 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 110 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 111 bfa_trc(ioc, usecnt); 112 return BFA_TRUE; 113 } 114 115 static void 116 bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) 117 { 118 u32 usecnt; 119 120 /* 121 * Firmware lock is relevant only for CNA. 122 */ 123 if (!bfa_ioc_is_cna(ioc)) 124 return; 125 126 /* 127 * If bios boot (flash based) -- do not decrement usage count 128 */ 129 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < 130 BFA_IOC_FWIMG_MINSZ) 131 return; 132 133 /* 134 * decrement usage count 135 */ 136 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 137 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 138 WARN_ON(usecnt <= 0); 139 140 usecnt--; 141 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 142 bfa_trc(ioc, usecnt); 143 144 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 145 } 146 147 /* 148 * Notify other functions on HB failure. 149 */ 150 static void 151 bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc) 152 { 153 if (bfa_ioc_is_cna(ioc)) { 154 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); 155 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); 156 /* Wait for halt to take effect */ 157 readl(ioc->ioc_regs.ll_halt); 158 readl(ioc->ioc_regs.alt_ll_halt); 159 } else { 160 writel(~0U, ioc->ioc_regs.err_set); 161 readl(ioc->ioc_regs.err_set); 162 } 163 } 164 165 /* 166 * Host to LPU mailbox message addresses 167 */ 168 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = { 169 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, 170 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, 171 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, 172 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } 173 }; 174 175 /* 176 * Host <-> LPU mailbox command/status registers - port 0 177 */ 178 static struct { u32 hfn, lpu; } ct_p0reg[] = { 179 { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, 180 { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, 181 { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, 182 { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } 183 }; 184 185 /* 186 * Host <-> LPU mailbox command/status registers - port 1 187 */ 188 static struct { u32 hfn, lpu; } ct_p1reg[] = { 189 { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, 190 { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, 191 { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, 192 { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } 193 }; 194 195 static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu; } ct2_reg[] = { 196 { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, 197 CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT }, 198 { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, 199 CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT }, 200 }; 201 202 static void 203 bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) 204 { 205 void __iomem *rb; 206 int pcifn = bfa_ioc_pcifn(ioc); 207 208 rb = bfa_ioc_bar0(ioc); 209 210 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; 211 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; 212 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; 213 214 if (ioc->port_id == 0) { 215 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 216 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 217 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; 218 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; 219 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; 220 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 221 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; 222 } else { 223 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); 224 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 225 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; 226 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; 227 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; 228 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 229 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; 230 } 231 232 /* 233 * PSS control registers 234 */ 235 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); 236 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); 237 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG); 238 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG); 239 240 /* 241 * IOC semaphore registers and serialization 242 */ 243 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); 244 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); 245 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 246 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 247 ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC); 248 249 /* 250 * sram memory access 251 */ 252 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); 253 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; 254 255 /* 256 * err set reg : for notification of hb failure in fcmode 257 */ 258 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 259 } 260 261 static void 262 bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc) 263 { 264 void __iomem *rb; 265 int port = bfa_ioc_portid(ioc); 266 267 rb = bfa_ioc_bar0(ioc); 268 269 ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; 270 ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; 271 ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; 272 ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; 273 ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; 274 275 if (port == 0) { 276 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; 277 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; 278 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; 279 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 280 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; 281 } else { 282 ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG); 283 ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG); 284 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; 285 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 286 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; 287 } 288 289 /* 290 * PSS control registers 291 */ 292 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); 293 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); 294 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG); 295 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG); 296 297 /* 298 * IOC semaphore registers and serialization 299 */ 300 ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG); 301 ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG); 302 ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG); 303 ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT); 304 ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC); 305 306 /* 307 * sram memory access 308 */ 309 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); 310 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; 311 312 /* 313 * err set reg : for notification of hb failure in fcmode 314 */ 315 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 316 } 317 318 /* 319 * Initialize IOC to port mapping. 320 */ 321 322 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) 323 static void 324 bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc) 325 { 326 void __iomem *rb = ioc->pcidev.pci_bar_kva; 327 u32 r32; 328 329 /* 330 * For catapult, base port id on personality register and IOC type 331 */ 332 r32 = readl(rb + FNC_PERS_REG); 333 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); 334 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; 335 336 bfa_trc(ioc, bfa_ioc_pcifn(ioc)); 337 bfa_trc(ioc, ioc->port_id); 338 } 339 340 static void 341 bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc) 342 { 343 ioc->port_id = bfa_ioc_pcifn(ioc) % 2; 344 345 bfa_trc(ioc, bfa_ioc_pcifn(ioc)); 346 bfa_trc(ioc, ioc->port_id); 347 } 348 349 /* 350 * Set interrupt mode for a function: INTX or MSIX 351 */ 352 static void 353 bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) 354 { 355 void __iomem *rb = ioc->pcidev.pci_bar_kva; 356 u32 r32, mode; 357 358 r32 = readl(rb + FNC_PERS_REG); 359 bfa_trc(ioc, r32); 360 361 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & 362 __F0_INTX_STATUS; 363 364 /* 365 * If already in desired mode, do not change anything 366 */ 367 if ((!msix && mode) || (msix && !mode)) 368 return; 369 370 if (msix) 371 mode = __F0_INTX_STATUS_MSIX; 372 else 373 mode = __F0_INTX_STATUS_INTA; 374 375 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 376 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 377 bfa_trc(ioc, r32); 378 379 writel(r32, rb + FNC_PERS_REG); 380 } 381 382 /* 383 * Cleanup hw semaphore and usecnt registers 384 */ 385 static void 386 bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) 387 { 388 389 if (bfa_ioc_is_cna(ioc)) { 390 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 391 writel(0, ioc->ioc_regs.ioc_usage_reg); 392 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 393 } 394 395 /* 396 * Read the hw sem reg to make sure that it is locked 397 * before we clear it. If it is not locked, writing 1 398 * will lock it instead of clearing it. 399 */ 400 readl(ioc->ioc_regs.ioc_sem_reg); 401 writel(1, ioc->ioc_regs.ioc_sem_reg); 402 } 403 404 static bfa_boolean_t 405 bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc) 406 { 407 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 408 uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); 409 410 /* 411 * Driver load time. If the sync required bit for this PCI fn 412 * is set, it is due to an unclean exit by the driver for this 413 * PCI fn in the previous incarnation. Whoever comes here first 414 * should clean it up, no matter which PCI fn. 415 */ 416 417 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { 418 writel(0, ioc->ioc_regs.ioc_fail_sync); 419 writel(1, ioc->ioc_regs.ioc_usage_reg); 420 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); 421 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); 422 return BFA_TRUE; 423 } 424 425 return bfa_ioc_ct_sync_complete(ioc); 426 } 427 428 /* 429 * Synchronized IOC failure processing routines 430 */ 431 static void 432 bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc) 433 { 434 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 435 uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); 436 437 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); 438 } 439 440 static void 441 bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc) 442 { 443 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 444 uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | 445 bfa_ioc_ct_sync_pos(ioc); 446 447 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); 448 } 449 450 static void 451 bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc) 452 { 453 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 454 455 writel((r32 | bfa_ioc_ct_sync_pos(ioc)), 456 ioc->ioc_regs.ioc_fail_sync); 457 } 458 459 static bfa_boolean_t 460 bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc) 461 { 462 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); 463 uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); 464 uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); 465 uint32_t tmp_ackd; 466 467 if (sync_ackd == 0) 468 return BFA_TRUE; 469 470 /* 471 * The check below is to see whether any other PCI fn 472 * has reinitialized the ASIC (reset sync_ackd bits) 473 * and failed again while this IOC was waiting for hw 474 * semaphore (in bfa_iocpf_sm_semwait()). 475 */ 476 tmp_ackd = sync_ackd; 477 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && 478 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) 479 sync_ackd |= bfa_ioc_ct_sync_pos(ioc); 480 481 if (sync_reqd == sync_ackd) { 482 writel(bfa_ioc_ct_clear_sync_ackd(r32), 483 ioc->ioc_regs.ioc_fail_sync); 484 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 485 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); 486 return BFA_TRUE; 487 } 488 489 /* 490 * If another PCI fn reinitialized and failed again while 491 * this IOC was waiting for hw sem, the sync_ackd bit for 492 * this IOC need to be set again to allow reinitialization. 493 */ 494 if (tmp_ackd != sync_ackd) 495 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); 496 497 return BFA_FALSE; 498 } 499 500 /** 501 * Called from bfa_ioc_attach() to map asic specific calls. 502 */ 503 static void 504 bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif) 505 { 506 hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock; 507 hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock; 508 hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail; 509 hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 510 hwif->ioc_sync_start = bfa_ioc_ct_sync_start; 511 hwif->ioc_sync_join = bfa_ioc_ct_sync_join; 512 hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave; 513 hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack; 514 hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete; 515 } 516 517 /** 518 * Called from bfa_ioc_attach() to map asic specific calls. 519 */ 520 void 521 bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc) 522 { 523 bfa_ioc_set_ctx_hwif(ioc, &hwif_ct); 524 525 hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init; 526 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; 527 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; 528 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 529 ioc->ioc_hwif = &hwif_ct; 530 } 531 532 /** 533 * Called from bfa_ioc_attach() to map asic specific calls. 534 */ 535 void 536 bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc) 537 { 538 bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2); 539 540 hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init; 541 hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init; 542 hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port; 543 hwif_ct2.ioc_isr_mode_set = NULL; 544 ioc->ioc_hwif = &hwif_ct2; 545 } 546 547 /* 548 * Temporary workaround for MSI-X resource allocation for catapult-2. 549 */ 550 #define HOSTFN_MSIX_DEFAULT 16 551 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c 552 #define __MSIX_VT_NUMVT__MK 0x003ff800 553 #define __MSIX_VT_NUMVT__SH 11 554 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH) 555 void 556 bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc) 557 { 558 void __iomem *rb = ioc->pcidev.pci_bar_kva; 559 u32 r32; 560 561 r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); 562 if (r32 & __MSIX_VT_NUMVT__MK) 563 return; 564 565 writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) | 566 HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), 567 rb + HOSTFN_MSIX_VT_OFST_NUMVT); 568 } 569 570 bfa_status_t 571 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode) 572 { 573 u32 pll_sclk, pll_fclk, r32; 574 bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC); 575 576 pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | 577 __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | 578 __APP_PLL_SCLK_JITLMT0_1(3U) | 579 __APP_PLL_SCLK_CNTLMT0_1(1U); 580 pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST | 581 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | 582 __APP_PLL_LCLK_JITLMT0_1(3U) | 583 __APP_PLL_LCLK_CNTLMT0_1(1U); 584 585 if (fcmode) { 586 writel(0, (rb + OP_MODE)); 587 writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 | 588 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG)); 589 } else { 590 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); 591 writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG)); 592 } 593 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); 594 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); 595 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 596 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 597 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 598 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 599 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 600 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 601 writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET, 602 rb + APP_PLL_SCLK_CTL_REG); 603 writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET, 604 rb + APP_PLL_LCLK_CTL_REG); 605 writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET | 606 __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); 607 writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET | 608 __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); 609 readl(rb + HOSTFN0_INT_MSK); 610 udelay(2000); 611 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 612 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 613 writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); 614 writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); 615 616 if (!fcmode) { 617 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); 618 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); 619 } 620 r32 = readl((rb + PSS_CTL_REG)); 621 r32 &= ~__PSS_LMEM_RESET; 622 writel(r32, (rb + PSS_CTL_REG)); 623 udelay(1000); 624 if (!fcmode) { 625 writel(0, (rb + PMM_1T_RESET_REG_P0)); 626 writel(0, (rb + PMM_1T_RESET_REG_P1)); 627 } 628 629 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); 630 udelay(1000); 631 r32 = readl((rb + MBIST_STAT_REG)); 632 writel(0, (rb + MBIST_CTL_REG)); 633 return BFA_STATUS_OK; 634 } 635 636 static struct { u32 sclk, speed, half_speed; } ct2_pll[] = { 637 {0}, /* unused */ 638 {__APP_PLL_SCLK_CLK_DIV2, 0, 0}, /* FC 8G */ 639 {0, 0, 0}, /* FC 16G */ 640 {__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2, 0, /* ETH */ 641 __APP_LPUCLK_HALFSPEED}, 642 {0, 0, 0}, /* COMBO */ 643 }; 644 645 static void 646 bfa_ioc_ct2_sclk_init(void __iomem *rb, enum bfi_asic_mode mode) 647 { 648 u32 r32; 649 650 /* 651 * put s_clk PLL and PLL FSM in reset 652 */ 653 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 654 r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN); 655 r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS | 656 __APP_PLL_SCLK_LOGIC_SOFT_RESET); 657 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); 658 659 /* 660 * select clock speed based on mode 661 */ 662 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 663 r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); 664 writel(r32 | ct2_pll[mode].sclk, (rb + CT2_APP_PLL_SCLK_CTL_REG)); 665 666 /* 667 * while doing PLL init dont clock gate ethernet subsystem 668 */ 669 r32 = readl((rb + CT2_CHIP_MISC_PRG)); 670 writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG)); 671 672 r32 = readl((rb + CT2_PCIE_MISC_REG)); 673 writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG)); 674 675 /* 676 * set sclk value 677 */ 678 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 679 r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL | 680 __APP_PLL_SCLK_CLK_DIV2); 681 writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG)); 682 683 /* 684 * poll for s_clk lock or delay 1ms 685 */ 686 udelay(1000); 687 688 /* 689 * release soft reset on s_clk & l_clk 690 */ 691 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 692 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, 693 (rb + CT2_APP_PLL_SCLK_CTL_REG)); 694 695 /* 696 * clock gating for ethernet subsystem if not in ethernet mode 697 */ 698 if (mode != BFI_ASIC_MODE_ETH) { 699 r32 = readl((rb + CT2_CHIP_MISC_PRG)); 700 writel(r32 & ~__ETH_CLK_ENABLE_PORT0, 701 (rb + CT2_CHIP_MISC_PRG)); 702 703 r32 = readl((rb + CT2_PCIE_MISC_REG)); 704 writel(r32 & ~__ETH_CLK_ENABLE_PORT1, 705 (rb + CT2_PCIE_MISC_REG)); 706 } 707 } 708 709 static void 710 bfa_ioc_ct2_lclk_init(void __iomem *rb, enum bfi_asic_mode mode) 711 { 712 u32 r32; 713 714 /* 715 * put l_clk PLL and PLL FSM in reset 716 */ 717 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); 718 r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN); 719 r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS | 720 __APP_PLL_LCLK_LOGIC_SOFT_RESET); 721 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); 722 723 /* 724 * set LPU speed 725 */ 726 r32 = readl((rb + CT2_CHIP_MISC_PRG)); 727 writel(r32 | ct2_pll[mode].speed, 728 (rb + CT2_CHIP_MISC_PRG)); 729 730 /* 731 * set LPU half speed 732 */ 733 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); 734 writel(r32 | ct2_pll[mode].half_speed, 735 (rb + CT2_APP_PLL_LCLK_CTL_REG)); 736 737 /* 738 * set lclk for mode 739 */ 740 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); 741 r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); 742 if (mode == BFI_ASIC_MODE_FC || mode == BFI_ASIC_MODE_FC16 || 743 mode == BFI_ASIC_MODE_ETH) 744 r32 |= 0x20c1731b; 745 else 746 r32 |= 0x2081731b; 747 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); 748 749 /* 750 * poll for s_clk lock or delay 1ms 751 */ 752 udelay(1000); 753 754 /* 755 * release soft reset on s_clk & l_clk 756 */ 757 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); 758 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, 759 (rb + CT2_APP_PLL_LCLK_CTL_REG)); 760 } 761 762 static void 763 bfa_ioc_ct2_mem_init(void __iomem *rb, enum bfi_asic_mode mode) 764 { 765 bfa_boolean_t fcmode; 766 u32 r32; 767 768 fcmode = (mode == BFI_ASIC_MODE_FC) || (mode == BFI_ASIC_MODE_FC16); 769 if (!fcmode) { 770 writel(__PMM_1T_PNDB_P | __PMM_1T_RESET_P, 771 (rb + CT2_PMM_1T_CONTROL_REG_P0)); 772 writel(__PMM_1T_PNDB_P | __PMM_1T_RESET_P, 773 (rb + CT2_PMM_1T_CONTROL_REG_P1)); 774 } 775 776 r32 = readl((rb + PSS_CTL_REG)); 777 r32 &= ~__PSS_LMEM_RESET; 778 writel(r32, (rb + PSS_CTL_REG)); 779 udelay(1000); 780 781 if (!fcmode) { 782 writel(__PMM_1T_PNDB_P, (rb + CT2_PMM_1T_CONTROL_REG_P0)); 783 writel(__PMM_1T_PNDB_P, (rb + CT2_PMM_1T_CONTROL_REG_P1)); 784 } 785 786 writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); 787 udelay(1000); 788 writel(0, (rb + CT2_MBIST_CTL_REG)); 789 } 790 791 bfa_status_t 792 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) 793 { 794 bfa_ioc_ct2_sclk_init(rb, mode); 795 bfa_ioc_ct2_lclk_init(rb, mode); 796 bfa_ioc_ct2_mem_init(rb, mode); 797 798 /* 799 * Disable flash presence to NFC by clearing GPIO 0 800 */ 801 writel(0, (rb + PSS_GPIO_OUT_REG)); 802 writel(1, (rb + PSS_GPIO_OE_REG)); 803 804 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); 805 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); 806 return BFA_STATUS_OK; 807 } 808