Lines Matching refs:ioc
29 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
30 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
31 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
32 static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
33 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
34 static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
35 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
36 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
37 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
38 static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
39 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
40 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
41 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
42 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
44 struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
45 static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
47 struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
48 static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
53 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
98 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) in bfa_nw_ioc_set_ct_hwif() argument
100 ioc->ioc_hwif = &nw_hwif_ct; in bfa_nw_ioc_set_ct_hwif()
104 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc) in bfa_nw_ioc_set_ct2_hwif() argument
106 ioc->ioc_hwif = &nw_hwif_ct2; in bfa_nw_ioc_set_ct2_hwif()
111 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) in bfa_ioc_ct_firmware_lock() argument
120 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < in bfa_ioc_ct_firmware_lock()
124 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_lock()
125 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_lock()
131 writel(1, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_lock()
132 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_lock()
133 writel(0, ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_firmware_lock()
137 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_firmware_lock()
147 bfa_nw_ioc_fwver_get(ioc, &fwhdr); in bfa_ioc_ct_firmware_lock()
148 if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) { in bfa_ioc_ct_firmware_lock()
149 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_lock()
157 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_lock()
158 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_lock()
163 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) in bfa_ioc_ct_firmware_unlock() argument
170 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < in bfa_ioc_ct_firmware_unlock()
177 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_unlock()
178 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_unlock()
182 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_firmware_unlock()
184 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_firmware_unlock()
189 bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) in bfa_ioc_ct_notify_fail() argument
191 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); in bfa_ioc_ct_notify_fail()
192 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); in bfa_ioc_ct_notify_fail()
194 readl(ioc->ioc_regs.ll_halt); in bfa_ioc_ct_notify_fail()
195 readl(ioc->ioc_regs.alt_ll_halt); in bfa_ioc_ct_notify_fail()
249 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) in bfa_ioc_ct_reg_init() argument
252 int pcifn = bfa_ioc_pcifn(ioc); in bfa_ioc_ct_reg_init()
254 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init()
256 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; in bfa_ioc_ct_reg_init()
257 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; in bfa_ioc_ct_reg_init()
258 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; in bfa_ioc_ct_reg_init()
260 if (ioc->port_id == 0) { in bfa_ioc_ct_reg_init()
261 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_ct_reg_init()
262 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init()
263 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init()
264 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; in bfa_ioc_ct_reg_init()
265 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; in bfa_ioc_ct_reg_init()
266 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; in bfa_ioc_ct_reg_init()
267 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; in bfa_ioc_ct_reg_init()
269 ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG; in bfa_ioc_ct_reg_init()
270 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init()
271 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init()
272 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; in bfa_ioc_ct_reg_init()
273 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; in bfa_ioc_ct_reg_init()
274 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; in bfa_ioc_ct_reg_init()
275 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; in bfa_ioc_ct_reg_init()
281 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; in bfa_ioc_ct_reg_init()
282 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; in bfa_ioc_ct_reg_init()
283 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG; in bfa_ioc_ct_reg_init()
284 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG; in bfa_ioc_ct_reg_init()
289 ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG; in bfa_ioc_ct_reg_init()
290 ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG; in bfa_ioc_ct_reg_init()
291 ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG; in bfa_ioc_ct_reg_init()
292 ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT; in bfa_ioc_ct_reg_init()
293 ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC; in bfa_ioc_ct_reg_init()
298 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; in bfa_ioc_ct_reg_init()
299 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; in bfa_ioc_ct_reg_init()
304 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); in bfa_ioc_ct_reg_init()
308 bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc) in bfa_ioc_ct2_reg_init() argument
311 int port = bfa_ioc_portid(ioc); in bfa_ioc_ct2_reg_init()
313 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct2_reg_init()
315 ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; in bfa_ioc_ct2_reg_init()
316 ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; in bfa_ioc_ct2_reg_init()
317 ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; in bfa_ioc_ct2_reg_init()
318 ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; in bfa_ioc_ct2_reg_init()
319 ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; in bfa_ioc_ct2_reg_init()
320 ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; in bfa_ioc_ct2_reg_init()
323 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; in bfa_ioc_ct2_reg_init()
324 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; in bfa_ioc_ct2_reg_init()
325 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; in bfa_ioc_ct2_reg_init()
326 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; in bfa_ioc_ct2_reg_init()
327 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; in bfa_ioc_ct2_reg_init()
329 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG; in bfa_ioc_ct2_reg_init()
330 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; in bfa_ioc_ct2_reg_init()
331 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; in bfa_ioc_ct2_reg_init()
332 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; in bfa_ioc_ct2_reg_init()
333 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; in bfa_ioc_ct2_reg_init()
339 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; in bfa_ioc_ct2_reg_init()
340 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; in bfa_ioc_ct2_reg_init()
341 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG; in bfa_ioc_ct2_reg_init()
342 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG; in bfa_ioc_ct2_reg_init()
347 ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG; in bfa_ioc_ct2_reg_init()
348 ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG; in bfa_ioc_ct2_reg_init()
349 ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG; in bfa_ioc_ct2_reg_init()
350 ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT; in bfa_ioc_ct2_reg_init()
351 ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC; in bfa_ioc_ct2_reg_init()
356 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; in bfa_ioc_ct2_reg_init()
357 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; in bfa_ioc_ct2_reg_init()
362 ioc->ioc_regs.err_set = rb + ERR_SET_REG; in bfa_ioc_ct2_reg_init()
369 bfa_ioc_ct_map_port(struct bfa_ioc *ioc) in bfa_ioc_ct_map_port() argument
371 void __iomem *rb = ioc->pcidev.pci_bar_kva; in bfa_ioc_ct_map_port()
378 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); in bfa_ioc_ct_map_port()
379 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; in bfa_ioc_ct_map_port()
384 bfa_ioc_ct2_map_port(struct bfa_ioc *ioc) in bfa_ioc_ct2_map_port() argument
386 void __iomem *rb = ioc->pcidev.pci_bar_kva; in bfa_ioc_ct2_map_port()
390 ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); in bfa_ioc_ct2_map_port()
395 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) in bfa_ioc_ct_isr_mode_set() argument
397 void __iomem *rb = ioc->pcidev.pci_bar_kva; in bfa_ioc_ct_isr_mode_set()
402 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & in bfa_ioc_ct_isr_mode_set()
416 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); in bfa_ioc_ct_isr_mode_set()
417 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); in bfa_ioc_ct_isr_mode_set()
423 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc) in bfa_ioc_ct2_lpu_read_stat() argument
427 r32 = readl(ioc->ioc_regs.lpu_read_stat); in bfa_ioc_ct2_lpu_read_stat()
429 writel(1, ioc->ioc_regs.lpu_read_stat); in bfa_ioc_ct2_lpu_read_stat()
445 bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc) in bfa_nw_ioc_ct2_poweron() argument
447 void __iomem *rb = ioc->pcidev.pci_bar_kva; in bfa_nw_ioc_ct2_poweron()
458 HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), in bfa_nw_ioc_ct2_poweron()
460 writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), in bfa_nw_ioc_ct2_poweron()
466 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) in bfa_ioc_ct_ownership_reset() argument
468 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_ownership_reset()
469 writel(0, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_ownership_reset()
470 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); in bfa_ioc_ct_ownership_reset()
477 readl(ioc->ioc_regs.ioc_sem_reg); in bfa_ioc_ct_ownership_reset()
478 bfa_nw_ioc_hw_sem_release(ioc); in bfa_ioc_ct_ownership_reset()
483 bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_start() argument
485 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_start()
495 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { in bfa_ioc_ct_sync_start()
496 writel(0, ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_start()
497 writel(1, ioc->ioc_regs.ioc_usage_reg); in bfa_ioc_ct_sync_start()
498 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_sync_start()
499 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_ct_sync_start()
503 return bfa_ioc_ct_sync_complete(ioc); in bfa_ioc_ct_sync_start()
507 bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_join() argument
509 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_join()
510 u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); in bfa_ioc_ct_sync_join()
512 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_join()
516 bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_leave() argument
518 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_leave()
519 u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | in bfa_ioc_ct_sync_leave()
520 bfa_ioc_ct_sync_pos(ioc); in bfa_ioc_ct_sync_leave()
522 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_leave()
526 bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_ack() argument
528 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_ack()
530 writel(r32 | bfa_ioc_ct_sync_pos(ioc), ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_ack()
534 bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) in bfa_ioc_ct_sync_complete() argument
536 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_complete()
551 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && in bfa_ioc_ct_sync_complete()
552 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) in bfa_ioc_ct_sync_complete()
553 sync_ackd |= bfa_ioc_ct_sync_pos(ioc); in bfa_ioc_ct_sync_complete()
557 ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_complete()
558 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_sync_complete()
559 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_ct_sync_complete()
569 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); in bfa_ioc_ct_sync_complete()
575 bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc, in bfa_ioc_ct_set_cur_ioc_fwstate() argument
578 writel(fwstate, ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_set_cur_ioc_fwstate()
582 bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc) in bfa_ioc_ct_get_cur_ioc_fwstate() argument
584 return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate); in bfa_ioc_ct_get_cur_ioc_fwstate()
588 bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc, in bfa_ioc_ct_set_alt_ioc_fwstate() argument
591 writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_ct_set_alt_ioc_fwstate()
595 bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc) in bfa_ioc_ct_get_alt_ioc_fwstate() argument
597 return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_ct_get_alt_ioc_fwstate()