1 /* 2 * Linux network driver for QLogic BR-series Converged Network Adapter. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License (GPL) Version 2 as 6 * published by the Free Software Foundation 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 /* 14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 15 * Copyright (c) 2014-2015 QLogic Corporation 16 * All rights reserved 17 * www.qlogic.com 18 */ 19 20 #include "bfa_ioc.h" 21 #include "cna.h" 22 #include "bfi.h" 23 #include "bfi_reg.h" 24 #include "bfa_defs.h" 25 26 #define bfa_ioc_ct_sync_pos(__ioc) BIT(bfa_ioc_pcifn(__ioc)) 27 #define BFA_IOC_SYNC_REQD_SH 16 28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) 29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) 30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) 31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \ 32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) 33 34 /* 35 * forward declarations 36 */ 37 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc); 38 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc); 39 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); 40 static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc); 41 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); 42 static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc); 43 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); 44 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); 45 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); 46 static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc); 47 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); 48 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); 49 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); 50 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); 51 static void bfa_ioc_ct_set_cur_ioc_fwstate( 52 struct bfa_ioc *ioc, enum bfi_ioc_state fwstate); 53 static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc); 54 static void bfa_ioc_ct_set_alt_ioc_fwstate( 55 struct bfa_ioc *ioc, enum bfi_ioc_state fwstate); 56 static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc); 57 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, 58 enum bfi_asic_mode asic_mode); 59 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, 60 enum bfi_asic_mode asic_mode); 61 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc); 62 63 static const struct bfa_ioc_hwif nw_hwif_ct = { 64 .ioc_pll_init = bfa_ioc_ct_pll_init, 65 .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, 66 .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, 67 .ioc_reg_init = bfa_ioc_ct_reg_init, 68 .ioc_map_port = bfa_ioc_ct_map_port, 69 .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set, 70 .ioc_notify_fail = bfa_ioc_ct_notify_fail, 71 .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, 72 .ioc_sync_start = bfa_ioc_ct_sync_start, 73 .ioc_sync_join = bfa_ioc_ct_sync_join, 74 .ioc_sync_leave = bfa_ioc_ct_sync_leave, 75 .ioc_sync_ack = bfa_ioc_ct_sync_ack, 76 .ioc_sync_complete = bfa_ioc_ct_sync_complete, 77 .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate, 78 .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate, 79 .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate, 80 .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate, 81 }; 82 83 static const struct bfa_ioc_hwif nw_hwif_ct2 = { 84 .ioc_pll_init = bfa_ioc_ct2_pll_init, 85 .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, 86 .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, 87 .ioc_reg_init = bfa_ioc_ct2_reg_init, 88 .ioc_map_port = bfa_ioc_ct2_map_port, 89 .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat, 90 .ioc_isr_mode_set = NULL, 91 .ioc_notify_fail = bfa_ioc_ct_notify_fail, 92 .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, 93 .ioc_sync_start = bfa_ioc_ct_sync_start, 94 .ioc_sync_join = bfa_ioc_ct_sync_join, 95 .ioc_sync_leave = bfa_ioc_ct_sync_leave, 96 .ioc_sync_ack = bfa_ioc_ct_sync_ack, 97 .ioc_sync_complete = bfa_ioc_ct_sync_complete, 98 .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate, 99 .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate, 100 .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate, 101 .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate, 102 }; 103 104 /* Called from bfa_ioc_attach() to map asic specific calls. */ 105 void 106 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) 107 { 108 ioc->ioc_hwif = &nw_hwif_ct; 109 } 110 111 void 112 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc) 113 { 114 ioc->ioc_hwif = &nw_hwif_ct2; 115 } 116 117 /* Return true if firmware of current driver matches the running firmware. */ 118 static bool 119 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) 120 { 121 enum bfi_ioc_state ioc_fwstate; 122 u32 usecnt; 123 struct bfi_ioc_image_hdr fwhdr; 124 125 /** 126 * If bios boot (flash based) -- do not increment usage count 127 */ 128 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < 129 BFA_IOC_FWIMG_MINSZ) 130 return true; 131 132 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 133 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 134 135 /** 136 * If usage count is 0, always return TRUE. 137 */ 138 if (usecnt == 0) { 139 writel(1, ioc->ioc_regs.ioc_usage_reg); 140 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 141 writel(0, ioc->ioc_regs.ioc_fail_sync); 142 return true; 143 } 144 145 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 146 147 /** 148 * Use count cannot be non-zero and chip in uninitialized state. 149 */ 150 BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT)); 151 152 /** 153 * Check if another driver with a different firmware is active 154 */ 155 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 156 if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) { 157 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 158 return false; 159 } 160 161 /** 162 * Same firmware version. Increment the reference count. 163 */ 164 usecnt++; 165 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 166 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 167 return true; 168 } 169 170 static void 171 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) 172 { 173 u32 usecnt; 174 175 /** 176 * If bios boot (flash based) -- do not decrement usage count 177 */ 178 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < 179 BFA_IOC_FWIMG_MINSZ) 180 return; 181 182 /** 183 * decrement usage count 184 */ 185 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 186 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 187 BUG_ON(!(usecnt > 0)); 188 189 usecnt--; 190 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 191 192 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 193 } 194 195 /* Notify other functions on HB failure. */ 196 static void 197 bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) 198 { 199 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); 200 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); 201 /* Wait for halt to take effect */ 202 readl(ioc->ioc_regs.ll_halt); 203 readl(ioc->ioc_regs.alt_ll_halt); 204 } 205 206 /* Host to LPU mailbox message addresses */ 207 static const struct { 208 u32 hfn_mbox; 209 u32 lpu_mbox; 210 u32 hfn_pgn; 211 } ct_fnreg[] = { 212 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, 213 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, 214 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, 215 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } 216 }; 217 218 /* Host <-> LPU mailbox command/status registers - port 0 */ 219 static const struct { 220 u32 hfn; 221 u32 lpu; 222 } ct_p0reg[] = { 223 { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, 224 { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, 225 { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, 226 { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } 227 }; 228 229 /* Host <-> LPU mailbox command/status registers - port 1 */ 230 static const struct { 231 u32 hfn; 232 u32 lpu; 233 } ct_p1reg[] = { 234 { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, 235 { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, 236 { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, 237 { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } 238 }; 239 240 static const struct { 241 u32 hfn_mbox; 242 u32 lpu_mbox; 243 u32 hfn_pgn; 244 u32 hfn; 245 u32 lpu; 246 u32 lpu_read; 247 } ct2_reg[] = { 248 { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, 249 CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT, 250 CT2_HOSTFN_LPU0_READ_STAT}, 251 { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, 252 CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT, 253 CT2_HOSTFN_LPU1_READ_STAT}, 254 }; 255 256 static void 257 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) 258 { 259 void __iomem *rb; 260 int pcifn = bfa_ioc_pcifn(ioc); 261 262 rb = bfa_ioc_bar0(ioc); 263 264 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; 265 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; 266 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; 267 268 if (ioc->port_id == 0) { 269 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 270 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 271 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; 272 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; 273 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; 274 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 275 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; 276 } else { 277 ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG; 278 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG; 279 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; 280 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; 281 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; 282 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 283 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; 284 } 285 286 /* 287 * PSS control registers 288 */ 289 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; 290 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; 291 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG; 292 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG; 293 294 /* 295 * IOC semaphore registers and serialization 296 */ 297 ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG; 298 ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG; 299 ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG; 300 ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT; 301 ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC; 302 303 /** 304 * sram memory access 305 */ 306 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; 307 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; 308 309 /* 310 * err set reg : for notification of hb failure in fcmode 311 */ 312 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 313 } 314 315 static void 316 bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc) 317 { 318 void __iomem *rb; 319 int port = bfa_ioc_portid(ioc); 320 321 rb = bfa_ioc_bar0(ioc); 322 323 ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; 324 ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; 325 ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; 326 ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; 327 ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; 328 ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; 329 330 if (port == 0) { 331 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; 332 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; 333 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; 334 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 335 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; 336 } else { 337 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG; 338 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; 339 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; 340 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 341 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; 342 } 343 344 /* 345 * PSS control registers 346 */ 347 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; 348 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; 349 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG; 350 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG; 351 352 /* 353 * IOC semaphore registers and serialization 354 */ 355 ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG; 356 ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG; 357 ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG; 358 ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT; 359 ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC; 360 361 /** 362 * sram memory access 363 */ 364 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; 365 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; 366 367 /* 368 * err set reg : for notification of hb failure in fcmode 369 */ 370 ioc->ioc_regs.err_set = rb + ERR_SET_REG; 371 } 372 373 /* Initialize IOC to port mapping. */ 374 375 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) 376 static void 377 bfa_ioc_ct_map_port(struct bfa_ioc *ioc) 378 { 379 void __iomem *rb = ioc->pcidev.pci_bar_kva; 380 u32 r32; 381 382 /** 383 * For catapult, base port id on personality register and IOC type 384 */ 385 r32 = readl(rb + FNC_PERS_REG); 386 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); 387 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; 388 389 } 390 391 static void 392 bfa_ioc_ct2_map_port(struct bfa_ioc *ioc) 393 { 394 void __iomem *rb = ioc->pcidev.pci_bar_kva; 395 u32 r32; 396 397 r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); 398 ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); 399 } 400 401 /* Set interrupt mode for a function: INTX or MSIX */ 402 static void 403 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) 404 { 405 void __iomem *rb = ioc->pcidev.pci_bar_kva; 406 u32 r32, mode; 407 408 r32 = readl(rb + FNC_PERS_REG); 409 410 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & 411 __F0_INTX_STATUS; 412 413 /** 414 * If already in desired mode, do not change anything 415 */ 416 if ((!msix && mode) || (msix && !mode)) 417 return; 418 419 if (msix) 420 mode = __F0_INTX_STATUS_MSIX; 421 else 422 mode = __F0_INTX_STATUS_INTA; 423 424 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 425 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 426 427 writel(r32, rb + FNC_PERS_REG); 428 } 429 430 static bool 431 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc) 432 { 433 u32 r32; 434 435 r32 = readl(ioc->ioc_regs.lpu_read_stat); 436 if (r32) { 437 writel(1, ioc->ioc_regs.lpu_read_stat); 438 return true; 439 } 440 441 return false; 442 } 443 444 /* MSI-X resource allocation for 1860 with no asic block */ 445 #define HOSTFN_MSIX_DEFAULT 64 446 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 447 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c 448 #define __MSIX_VT_NUMVT__MK 0x003ff800 449 #define __MSIX_VT_NUMVT__SH 11 450 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH) 451 #define __MSIX_VT_OFST_ 0x000007ff 452 void 453 bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc) 454 { 455 void __iomem *rb = ioc->pcidev.pci_bar_kva; 456 u32 r32; 457 458 r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); 459 if (r32 & __MSIX_VT_NUMVT__MK) { 460 writel(r32 & __MSIX_VT_OFST_, 461 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); 462 return; 463 } 464 465 writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) | 466 HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), 467 rb + HOSTFN_MSIX_VT_OFST_NUMVT); 468 writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), 469 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); 470 } 471 472 /* Cleanup hw semaphore and usecnt registers */ 473 static void 474 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) 475 { 476 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 477 writel(0, ioc->ioc_regs.ioc_usage_reg); 478 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 479 480 /* 481 * Read the hw sem reg to make sure that it is locked 482 * before we clear it. If it is not locked, writing 1 483 * will lock it instead of clearing it. 484 */ 485 readl(ioc->ioc_regs.ioc_sem_reg); 486 bfa_nw_ioc_hw_sem_release(ioc); 487 } 488 489 /* Synchronized IOC failure processing routines */ 490 static bool 491 bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) 492 { 493 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 494 u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); 495 496 /* 497 * Driver load time. If the sync required bit for this PCI fn 498 * is set, it is due to an unclean exit by the driver for this 499 * PCI fn in the previous incarnation. Whoever comes here first 500 * should clean it up, no matter which PCI fn. 501 */ 502 503 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { 504 writel(0, ioc->ioc_regs.ioc_fail_sync); 505 writel(1, ioc->ioc_regs.ioc_usage_reg); 506 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); 507 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); 508 return true; 509 } 510 511 return bfa_ioc_ct_sync_complete(ioc); 512 } 513 /* Synchronized IOC failure processing routines */ 514 static void 515 bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) 516 { 517 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 518 u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); 519 520 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); 521 } 522 523 static void 524 bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc) 525 { 526 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 527 u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | 528 bfa_ioc_ct_sync_pos(ioc); 529 530 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); 531 } 532 533 static void 534 bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc) 535 { 536 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 537 538 writel(r32 | bfa_ioc_ct_sync_pos(ioc), ioc->ioc_regs.ioc_fail_sync); 539 } 540 541 static bool 542 bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) 543 { 544 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 545 u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); 546 u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); 547 u32 tmp_ackd; 548 549 if (sync_ackd == 0) 550 return true; 551 552 /** 553 * The check below is to see whether any other PCI fn 554 * has reinitialized the ASIC (reset sync_ackd bits) 555 * and failed again while this IOC was waiting for hw 556 * semaphore (in bfa_iocpf_sm_semwait()). 557 */ 558 tmp_ackd = sync_ackd; 559 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && 560 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) 561 sync_ackd |= bfa_ioc_ct_sync_pos(ioc); 562 563 if (sync_reqd == sync_ackd) { 564 writel(bfa_ioc_ct_clear_sync_ackd(r32), 565 ioc->ioc_regs.ioc_fail_sync); 566 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 567 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); 568 return true; 569 } 570 571 /** 572 * If another PCI fn reinitialized and failed again while 573 * this IOC was waiting for hw sem, the sync_ackd bit for 574 * this IOC need to be set again to allow reinitialization. 575 */ 576 if (tmp_ackd != sync_ackd) 577 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); 578 579 return false; 580 } 581 582 static void 583 bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc, 584 enum bfi_ioc_state fwstate) 585 { 586 writel(fwstate, ioc->ioc_regs.ioc_fwstate); 587 } 588 589 static enum bfi_ioc_state 590 bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc) 591 { 592 return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate); 593 } 594 595 static void 596 bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc, 597 enum bfi_ioc_state fwstate) 598 { 599 writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate); 600 } 601 602 static enum bfi_ioc_state 603 bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc) 604 { 605 return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate); 606 } 607 608 static enum bfa_status 609 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) 610 { 611 u32 pll_sclk, pll_fclk, r32; 612 bool fcmode = (asic_mode == BFI_ASIC_MODE_FC); 613 614 pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | 615 __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | 616 __APP_PLL_SCLK_JITLMT0_1(3U) | 617 __APP_PLL_SCLK_CNTLMT0_1(1U); 618 pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST | 619 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | 620 __APP_PLL_LCLK_JITLMT0_1(3U) | 621 __APP_PLL_LCLK_CNTLMT0_1(1U); 622 623 if (fcmode) { 624 writel(0, (rb + OP_MODE)); 625 writel(__APP_EMS_CMLCKSEL | 626 __APP_EMS_REFCKBUFEN2 | 627 __APP_EMS_CHANNEL_SEL, 628 (rb + ETH_MAC_SER_REG)); 629 } else { 630 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); 631 writel(__APP_EMS_REFCKBUFEN1, 632 (rb + ETH_MAC_SER_REG)); 633 } 634 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); 635 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); 636 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 637 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 638 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 639 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 640 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 641 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 642 writel(pll_sclk | 643 __APP_PLL_SCLK_LOGIC_SOFT_RESET, 644 rb + APP_PLL_SCLK_CTL_REG); 645 writel(pll_fclk | 646 __APP_PLL_LCLK_LOGIC_SOFT_RESET, 647 rb + APP_PLL_LCLK_CTL_REG); 648 writel(pll_sclk | 649 __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE, 650 rb + APP_PLL_SCLK_CTL_REG); 651 writel(pll_fclk | 652 __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE, 653 rb + APP_PLL_LCLK_CTL_REG); 654 readl(rb + HOSTFN0_INT_MSK); 655 udelay(2000); 656 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 657 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 658 writel(pll_sclk | 659 __APP_PLL_SCLK_ENABLE, 660 rb + APP_PLL_SCLK_CTL_REG); 661 writel(pll_fclk | 662 __APP_PLL_LCLK_ENABLE, 663 rb + APP_PLL_LCLK_CTL_REG); 664 665 if (!fcmode) { 666 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); 667 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); 668 } 669 r32 = readl(rb + PSS_CTL_REG); 670 r32 &= ~__PSS_LMEM_RESET; 671 writel(r32, (rb + PSS_CTL_REG)); 672 udelay(1000); 673 if (!fcmode) { 674 writel(0, (rb + PMM_1T_RESET_REG_P0)); 675 writel(0, (rb + PMM_1T_RESET_REG_P1)); 676 } 677 678 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); 679 udelay(1000); 680 r32 = readl(rb + MBIST_STAT_REG); 681 writel(0, (rb + MBIST_CTL_REG)); 682 return BFA_STATUS_OK; 683 } 684 685 static void 686 bfa_ioc_ct2_sclk_init(void __iomem *rb) 687 { 688 u32 r32; 689 690 /* 691 * put s_clk PLL and PLL FSM in reset 692 */ 693 r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); 694 r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN); 695 r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS | 696 __APP_PLL_SCLK_LOGIC_SOFT_RESET); 697 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); 698 699 /* 700 * Ignore mode and program for the max clock (which is FC16) 701 * Firmware/NFC will do the PLL init appropriately 702 */ 703 r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); 704 r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); 705 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); 706 707 /* 708 * while doing PLL init dont clock gate ethernet subsystem 709 */ 710 r32 = readl(rb + CT2_CHIP_MISC_PRG); 711 writel(r32 | __ETH_CLK_ENABLE_PORT0, 712 rb + CT2_CHIP_MISC_PRG); 713 714 r32 = readl(rb + CT2_PCIE_MISC_REG); 715 writel(r32 | __ETH_CLK_ENABLE_PORT1, 716 rb + CT2_PCIE_MISC_REG); 717 718 /* 719 * set sclk value 720 */ 721 r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); 722 r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL | 723 __APP_PLL_SCLK_CLK_DIV2); 724 writel(r32 | 0x1061731b, rb + CT2_APP_PLL_SCLK_CTL_REG); 725 726 /* 727 * poll for s_clk lock or delay 1ms 728 */ 729 udelay(1000); 730 731 /* 732 * Dont do clock gating for ethernet subsystem, firmware/NFC will 733 * do this appropriately 734 */ 735 } 736 737 static void 738 bfa_ioc_ct2_lclk_init(void __iomem *rb) 739 { 740 u32 r32; 741 742 /* 743 * put l_clk PLL and PLL FSM in reset 744 */ 745 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); 746 r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN); 747 r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS | 748 __APP_PLL_LCLK_LOGIC_SOFT_RESET); 749 writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG); 750 751 /* 752 * set LPU speed (set for FC16 which will work for other modes) 753 */ 754 r32 = readl(rb + CT2_CHIP_MISC_PRG); 755 writel(r32, (rb + CT2_CHIP_MISC_PRG)); 756 757 /* 758 * set LPU half speed (set for FC16 which will work for other modes) 759 */ 760 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); 761 writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG); 762 763 /* 764 * set lclk for mode (set for FC16) 765 */ 766 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); 767 r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); 768 r32 |= 0x20c1731b; 769 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); 770 771 /* 772 * poll for s_clk lock or delay 1ms 773 */ 774 udelay(1000); 775 } 776 777 static void 778 bfa_ioc_ct2_mem_init(void __iomem *rb) 779 { 780 u32 r32; 781 782 r32 = readl(rb + PSS_CTL_REG); 783 r32 &= ~__PSS_LMEM_RESET; 784 writel(r32, rb + PSS_CTL_REG); 785 udelay(1000); 786 787 writel(__EDRAM_BISTR_START, rb + CT2_MBIST_CTL_REG); 788 udelay(1000); 789 writel(0, rb + CT2_MBIST_CTL_REG); 790 } 791 792 static void 793 bfa_ioc_ct2_mac_reset(void __iomem *rb) 794 { 795 volatile u32 r32; 796 797 bfa_ioc_ct2_sclk_init(rb); 798 bfa_ioc_ct2_lclk_init(rb); 799 800 /* 801 * release soft reset on s_clk & l_clk 802 */ 803 r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); 804 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, 805 rb + CT2_APP_PLL_SCLK_CTL_REG); 806 807 /* 808 * release soft reset on s_clk & l_clk 809 */ 810 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); 811 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, 812 rb + CT2_APP_PLL_LCLK_CTL_REG); 813 814 /* put port0, port1 MAC & AHB in reset */ 815 writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET, 816 rb + CT2_CSI_MAC_CONTROL_REG(0)); 817 writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET, 818 rb + CT2_CSI_MAC_CONTROL_REG(1)); 819 } 820 821 #define CT2_NFC_MAX_DELAY 1000 822 #define CT2_NFC_VER_VALID 0x143 823 #define BFA_IOC_PLL_POLL 1000000 824 825 static bool 826 bfa_ioc_ct2_nfc_halted(void __iomem *rb) 827 { 828 volatile u32 r32; 829 830 r32 = readl(rb + CT2_NFC_CSR_SET_REG); 831 if (r32 & __NFC_CONTROLLER_HALTED) 832 return true; 833 834 return false; 835 } 836 837 static void 838 bfa_ioc_ct2_nfc_resume(void __iomem *rb) 839 { 840 volatile u32 r32; 841 int i; 842 843 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG); 844 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { 845 r32 = readl(rb + CT2_NFC_CSR_SET_REG); 846 if (!(r32 & __NFC_CONTROLLER_HALTED)) 847 return; 848 udelay(1000); 849 } 850 BUG_ON(1); 851 } 852 853 static enum bfa_status 854 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) 855 { 856 volatile u32 wgn, r32; 857 u32 nfc_ver, i; 858 859 wgn = readl(rb + CT2_WGN_STATUS); 860 861 nfc_ver = readl(rb + CT2_RSC_GPR15_REG); 862 863 if (wgn == (__A2T_AHB_LOAD | __WGN_READY) && 864 nfc_ver >= CT2_NFC_VER_VALID) { 865 if (bfa_ioc_ct2_nfc_halted(rb)) 866 bfa_ioc_ct2_nfc_resume(rb); 867 writel(__RESET_AND_START_SCLK_LCLK_PLLS, 868 rb + CT2_CSI_FW_CTL_SET_REG); 869 870 for (i = 0; i < BFA_IOC_PLL_POLL; i++) { 871 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); 872 if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS) 873 break; 874 } 875 BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); 876 877 for (i = 0; i < BFA_IOC_PLL_POLL; i++) { 878 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); 879 if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)) 880 break; 881 } 882 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); 883 udelay(1000); 884 885 r32 = readl(rb + CT2_CSI_FW_CTL_REG); 886 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); 887 } else { 888 writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG)); 889 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { 890 r32 = readl(rb + CT2_NFC_CSR_SET_REG); 891 if (r32 & __NFC_CONTROLLER_HALTED) 892 break; 893 udelay(1000); 894 } 895 896 bfa_ioc_ct2_mac_reset(rb); 897 bfa_ioc_ct2_sclk_init(rb); 898 bfa_ioc_ct2_lclk_init(rb); 899 900 /* release soft reset on s_clk & l_clk */ 901 r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); 902 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, 903 rb + CT2_APP_PLL_SCLK_CTL_REG); 904 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); 905 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, 906 rb + CT2_APP_PLL_LCLK_CTL_REG); 907 } 908 909 /* Announce flash device presence, if flash was corrupted. */ 910 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { 911 r32 = readl(rb + PSS_GPIO_OUT_REG); 912 writel(r32 & ~1, rb + PSS_GPIO_OUT_REG); 913 r32 = readl(rb + PSS_GPIO_OE_REG); 914 writel(r32 | 1, rb + PSS_GPIO_OE_REG); 915 } 916 917 /* 918 * Mask the interrupts and clear any 919 * pending interrupts left by BIOS/EFI 920 */ 921 writel(1, rb + CT2_LPU0_HOSTFN_MBOX0_MSK); 922 writel(1, rb + CT2_LPU1_HOSTFN_MBOX0_MSK); 923 924 /* For first time initialization, no need to clear interrupts */ 925 r32 = readl(rb + HOST_SEM5_REG); 926 if (r32 & 0x1) { 927 r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT); 928 if (r32 == 1) { 929 writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT); 930 readl(rb + CT2_LPU0_HOSTFN_CMD_STAT); 931 } 932 r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); 933 if (r32 == 1) { 934 writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT); 935 readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); 936 } 937 } 938 939 bfa_ioc_ct2_mem_init(rb); 940 941 writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG); 942 writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG); 943 return BFA_STATUS_OK; 944 } 945