Lines Matching +full:m +full:- +full:phy

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4 * Copyright (c) 2014- QLogic Corporation.
8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
31 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
33 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
38 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
55 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
57 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
58 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
59 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
61 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
63 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
65 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
67 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
69 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
71 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
73 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
75 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
77 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
79 ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
82 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
83 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
161 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
163 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
166 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
170 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
172 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
279 * Reset entry actions -- initialize state machine
284 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); in bfa_ioc_sm_reset_entry()
317 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); in bfa_ioc_sm_enabling_entry()
337 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_enabling()
340 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); in bfa_ioc_sm_enabling()
344 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_enabling()
354 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); in bfa_ioc_sm_enabling()
392 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_getattr()
395 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); in bfa_ioc_sm_getattr()
414 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_sm_op_entry()
416 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); in bfa_ioc_sm_op_entry()
442 if (ioc->iocpf.auto_recover) in bfa_ioc_sm_op()
450 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); in bfa_ioc_sm_op()
462 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_sm_disabling_entry()
463 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); in bfa_ioc_sm_disabling_entry()
487 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); in bfa_ioc_sm_disabling()
520 ioc->cbfn->disable_cbfn(ioc->bfa); in bfa_ioc_sm_disabled()
525 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); in bfa_ioc_sm_disabled()
558 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_fail_retry()
561 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); in bfa_ioc_sm_fail_retry()
565 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_fail_retry()
578 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); in bfa_ioc_sm_fail_retry()
604 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_fail()
613 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); in bfa_ioc_sm_fail()
640 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_hwfail()
644 ioc->cbfn->disable_cbfn(ioc->bfa); in bfa_ioc_sm_hwfail()
652 /* Ignore - already in hwfail state */ in bfa_ioc_sm_hwfail()
665 * Reset entry actions -- initialize state machine
670 iocpf->fw_mismatch_notified = BFA_FALSE; in bfa_iocpf_sm_reset_entry()
671 iocpf->auto_recover = bfa_auto_recover; in bfa_iocpf_sm_reset_entry()
680 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_reset()
710 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg); in bfa_iocpf_sm_fwcheck_entry()
713 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg); in bfa_iocpf_sm_fwcheck_entry()
717 fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc); in bfa_iocpf_sm_fwcheck_entry()
719 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); in bfa_iocpf_sm_fwcheck_entry()
723 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr); in bfa_iocpf_sm_fwcheck_entry()
726 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); in bfa_iocpf_sm_fwcheck_entry()
733 pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff); in bfa_iocpf_sm_fwcheck_entry()
734 writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn); in bfa_iocpf_sm_fwcheck_entry()
737 bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0); in bfa_iocpf_sm_fwcheck_entry()
741 bfa_trc(iocpf->ioc, fwstate); in bfa_iocpf_sm_fwcheck_entry()
742 bfa_trc(iocpf->ioc, swab32(fwhdr.exec)); in bfa_iocpf_sm_fwcheck_entry()
743 bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT); in bfa_iocpf_sm_fwcheck_entry()
744 bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT); in bfa_iocpf_sm_fwcheck_entry()
749 bfa_ioc_ownership_reset(iocpf->ioc); in bfa_iocpf_sm_fwcheck_entry()
754 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); in bfa_iocpf_sm_fwcheck_entry()
757 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_fwcheck_entry()
766 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_fwcheck()
778 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fwcheck()
782 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fwcheck()
817 if (iocpf->fw_mismatch_notified == BFA_FALSE) in bfa_iocpf_sm_mismatch_entry()
818 bfa_ioc_pf_fwmismatch(iocpf->ioc); in bfa_iocpf_sm_mismatch_entry()
820 iocpf->fw_mismatch_notified = BFA_TRUE; in bfa_iocpf_sm_mismatch_entry()
821 bfa_iocpf_timer_start(iocpf->ioc); in bfa_iocpf_sm_mismatch_entry()
830 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_mismatch()
861 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_semwait_entry()
870 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_semwait()
880 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_semwait()
903 iocpf->poll_time = 0; in bfa_iocpf_sm_hwinit_entry()
904 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE); in bfa_iocpf_sm_hwinit_entry()
914 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_hwinit()
924 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_hwinit()
932 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_hwinit()
944 bfa_iocpf_timer_start(iocpf->ioc); in bfa_iocpf_sm_enabling_entry()
948 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); in bfa_iocpf_sm_enabling_entry()
949 bfa_ioc_send_enable(iocpf->ioc); in bfa_iocpf_sm_enabling_entry()
959 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_enabling()
966 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_enabling()
975 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_enabling()
983 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_enabling()
995 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED); in bfa_iocpf_sm_ready_entry()
1001 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_ready()
1026 bfa_iocpf_timer_start(iocpf->ioc); in bfa_iocpf_sm_disabling_entry()
1027 bfa_ioc_send_disable(iocpf->ioc); in bfa_iocpf_sm_disabling_entry()
1036 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_disabling()
1066 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_disabling_sync_entry()
1075 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_disabling_sync()
1082 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_disabling_sync()
1105 bfa_ioc_mbox_flush(iocpf->ioc); in bfa_iocpf_sm_disabled_entry()
1106 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED); in bfa_iocpf_sm_disabled_entry()
1112 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_disabled()
1134 bfa_ioc_debug_save_ftrc(iocpf->ioc); in bfa_iocpf_sm_initfail_sync_entry()
1135 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_initfail_sync_entry()
1144 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_initfail_sync()
1153 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_initfail_sync()
1184 bfa_trc(iocpf->ioc, 0); in bfa_iocpf_sm_initfail_entry()
1193 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_initfail()
1218 bfa_ioc_lpu_stop(iocpf->ioc); in bfa_iocpf_sm_fail_sync_entry()
1223 bfa_ioc_mbox_flush(iocpf->ioc); in bfa_iocpf_sm_fail_sync_entry()
1225 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_fail_sync_entry()
1231 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_fail_sync()
1239 if (!iocpf->auto_recover) { in bfa_iocpf_sm_fail_sync()
1242 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fail_sync()
1248 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fail_sync()
1275 bfa_trc(iocpf->ioc, 0); in bfa_iocpf_sm_fail_entry()
1284 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_fail()
1311 list_for_each(qe, &ioc->notify_q) { in bfa_ioc_event_notify()
1313 notify->cbfn(notify->cbarg, event); in bfa_ioc_event_notify()
1320 ioc->cbfn->disable_cbfn(ioc->bfa); in bfa_ioc_disable_comp()
1354 r32 = readl(ioc->ioc_regs.ioc_sem_reg); in bfa_ioc_hw_sem_get()
1357 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); in bfa_ioc_hw_sem_get()
1361 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); in bfa_ioc_hw_sem_get()
1378 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lmem_init()
1386 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lmem_init()
1393 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lmem_init()
1405 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lmem_init()
1416 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lpu_start()
1419 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lpu_start()
1430 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lpu_stop()
1433 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lpu_stop()
1447 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); in bfa_ioc_fwver_get()
1448 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_fwver_get()
1453 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); in bfa_ioc_fwver_get()
1524 if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) in bfa_ioc_fwver_md5_check()
1538 if (drv_fwhdr->signature != fwhdr_to_cmp->signature) in bfa_ioc_fw_ver_compatible()
1541 if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) in bfa_ioc_fw_ver_compatible()
1544 if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) in bfa_ioc_fw_ver_compatible()
1547 if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) in bfa_ioc_fw_ver_compatible()
1550 if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && in bfa_ioc_fw_ver_compatible()
1551 drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && in bfa_ioc_fw_ver_compatible()
1552 drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) { in bfa_ioc_fw_ver_compatible()
1562 if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) in bfa_ioc_flash_fwver_valid()
1570 if (fwhdr->fwver.phase == 0 && in fwhdr_is_ga()
1571 fwhdr->fwver.build == 0) in fwhdr_is_ga()
1587 if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) in bfa_ioc_fw_ver_patch_cmp()
1590 else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) in bfa_ioc_fw_ver_patch_cmp()
1608 if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) in bfa_ioc_fw_ver_patch_cmp()
1610 else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) in bfa_ioc_fw_ver_patch_cmp()
1613 if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) in bfa_ioc_fw_ver_patch_cmp()
1615 else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) in bfa_ioc_fw_ver_patch_cmp()
1631 return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, in bfa_ioc_flash_img_get_chnk()
1671 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); in bfa_ioc_fwsig_invalidate()
1672 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_fwsig_invalidate()
1673 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN); in bfa_ioc_fwsig_invalidate()
1686 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgflush()
1688 writel(1, ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgflush()
1732 * just re-enable IOC. in bfa_ioc_hwinit()
1741 * When using MSI-X any pending firmware ready event should in bfa_ioc_hwinit()
1742 * be flushed. Otherwise MSI-X interrupts are not delivered. in bfa_ioc_hwinit()
1745 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); in bfa_ioc_hwinit()
1781 ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); in bfa_ioc_mbox_send()
1784 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); in bfa_ioc_mbox_send()
1789 writel(1, ioc->ioc_regs.hfn_mbox_cmd); in bfa_ioc_mbox_send()
1790 (void) readl(ioc->ioc_regs.hfn_mbox_cmd); in bfa_ioc_mbox_send()
1800 enable_req.clscode = cpu_to_be16(ioc->clscode); in bfa_ioc_send_enable()
1801 /* unsigned 32-bit time_t overflow in y2106 */ in bfa_ioc_send_enable()
1813 disable_req.clscode = cpu_to_be16(ioc->clscode); in bfa_ioc_send_disable()
1814 /* unsigned 32-bit time_t overflow in y2106 */ in bfa_ioc_send_disable()
1826 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); in bfa_ioc_send_getattr()
1836 hb_count = readl(ioc->ioc_regs.heartbeat); in bfa_ioc_hb_check()
1837 if (ioc->hb_count == hb_count) { in bfa_ioc_hb_check()
1841 ioc->hb_count = hb_count; in bfa_ioc_hb_check()
1851 ioc->hb_count = readl(ioc->ioc_regs.heartbeat); in bfa_ioc_hb_monitor()
1891 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); in bfa_ioc_download_fw()
1892 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_download_fw()
1918 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, in bfa_ioc_download_fw()
1929 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_download_fw()
1933 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), in bfa_ioc_download_fw()
1934 ioc->ioc_regs.host_page_num_fn); in bfa_ioc_download_fw()
1943 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, in bfa_ioc_download_fw()
1944 ioc->port0_mode, ioc->port1_mode); in bfa_ioc_download_fw()
1945 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF, in bfa_ioc_download_fw()
1947 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF, in bfa_ioc_download_fw()
1949 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF, in bfa_ioc_download_fw()
1961 struct bfi_ioc_attr_s *attr = ioc->attr; in bfa_ioc_getattr_reply()
1963 attr->adapter_prop = be32_to_cpu(attr->adapter_prop); in bfa_ioc_getattr_reply()
1964 attr->card_type = be32_to_cpu(attr->card_type); in bfa_ioc_getattr_reply()
1965 attr->maxfrsize = be16_to_cpu(attr->maxfrsize); in bfa_ioc_getattr_reply()
1966 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC); in bfa_ioc_getattr_reply()
1967 attr->mfg_year = be16_to_cpu(attr->mfg_year); in bfa_ioc_getattr_reply()
1978 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_attach()
1981 INIT_LIST_HEAD(&mod->cmd_q); in bfa_ioc_mbox_attach()
1983 mod->mbhdlr[mc].cbfn = NULL; in bfa_ioc_mbox_attach()
1984 mod->mbhdlr[mc].cbarg = ioc->bfa; in bfa_ioc_mbox_attach()
1989 * Mbox poll timer -- restarts any pending mailbox requests.
1994 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_poll()
2001 if (list_empty(&mod->cmd_q)) in bfa_ioc_mbox_poll()
2007 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); in bfa_ioc_mbox_poll()
2014 bfa_q_deq(&mod->cmd_q, &cmd); in bfa_ioc_mbox_poll()
2015 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); in bfa_ioc_mbox_poll()
2024 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_flush()
2027 while (!list_empty(&mod->cmd_q)) in bfa_ioc_mbox_flush()
2028 bfa_q_deq(&mod->cmd_q, &cmd); in bfa_ioc_mbox_flush()
2047 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); in bfa_ioc_smem_read()
2056 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { in bfa_ioc_smem_read()
2061 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_read()
2066 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); in bfa_ioc_smem_read()
2076 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_read()
2079 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), in bfa_ioc_smem_read()
2080 ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_read()
2084 readl(ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_smem_read()
2085 writel(1, ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_smem_read()
2104 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); in bfa_ioc_smem_clr()
2113 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { in bfa_ioc_smem_clr()
2118 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_clr()
2123 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0); in bfa_ioc_smem_clr()
2132 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_clr()
2135 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), in bfa_ioc_smem_clr()
2136 ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_clr()
2141 readl(ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_smem_clr()
2142 writel(1, ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_smem_clr()
2150 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_fail_notify()
2155 ioc->cbfn->hbfail_cbfn(ioc->bfa); in bfa_ioc_fail_notify()
2169 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_pf_fwmismatch()
2173 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_pf_fwmismatch()
2187 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_pll_init()
2191 ioc->pllinit = BFA_TRUE; in bfa_ioc_pll_init()
2201 readl(ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_pll_init()
2202 writel(1, ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_pll_init()
2292 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgget()
2301 r32 = readl(ioc->ioc_regs.lpu_mbox + in bfa_ioc_msgget()
2309 writel(1, ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgget()
2310 readl(ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgget()
2316 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) in bfa_ioc_isr() argument
2319 struct bfa_iocpf_s *iocpf = &ioc->iocpf; in bfa_ioc_isr()
2321 msg = (union bfi_ioc_i2h_msg_u *) m; in bfa_ioc_isr()
2325 switch (msg->mh.msg_id) { in bfa_ioc_isr()
2330 ioc->port_mode = ioc->port_mode_cfg = in bfa_ioc_isr()
2331 (enum bfa_mode_s)msg->fw_event.port_mode; in bfa_ioc_isr()
2332 ioc->ad_cap_bm = msg->fw_event.cap_bm; in bfa_ioc_isr()
2345 bfa_trc(ioc, msg->mh.msg_id); in bfa_ioc_isr()
2360 ioc->bfa = bfa; in bfa_ioc_attach()
2361 ioc->cbfn = cbfn; in bfa_ioc_attach()
2362 ioc->timer_mod = timer_mod; in bfa_ioc_attach()
2363 ioc->fcmode = BFA_FALSE; in bfa_ioc_attach()
2364 ioc->pllinit = BFA_FALSE; in bfa_ioc_attach()
2365 ioc->dbg_fwsave_once = BFA_TRUE; in bfa_ioc_attach()
2366 ioc->iocpf.ioc = ioc; in bfa_ioc_attach()
2369 INIT_LIST_HEAD(&ioc->notify_q); in bfa_ioc_attach()
2382 INIT_LIST_HEAD(&ioc->notify_q); in bfa_ioc_detach()
2394 ioc->clscode = clscode; in bfa_ioc_pci_init()
2395 ioc->pcidev = *pcidev; in bfa_ioc_pci_init()
2400 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; in bfa_ioc_pci_init()
2401 ioc->asic_mode = BFI_ASIC_MODE_FC; in bfa_ioc_pci_init()
2403 switch (pcidev->device_id) { in bfa_ioc_pci_init()
2406 ioc->asic_gen = BFI_ASIC_GEN_CB; in bfa_ioc_pci_init()
2407 ioc->fcmode = BFA_TRUE; in bfa_ioc_pci_init()
2408 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; in bfa_ioc_pci_init()
2409 ioc->ad_cap_bm = BFA_CM_HBA; in bfa_ioc_pci_init()
2413 ioc->asic_gen = BFI_ASIC_GEN_CT; in bfa_ioc_pci_init()
2414 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; in bfa_ioc_pci_init()
2415 ioc->asic_mode = BFI_ASIC_MODE_ETH; in bfa_ioc_pci_init()
2416 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; in bfa_ioc_pci_init()
2417 ioc->ad_cap_bm = BFA_CM_CNA; in bfa_ioc_pci_init()
2421 ioc->asic_gen = BFI_ASIC_GEN_CT; in bfa_ioc_pci_init()
2422 ioc->fcmode = BFA_TRUE; in bfa_ioc_pci_init()
2423 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; in bfa_ioc_pci_init()
2424 ioc->ad_cap_bm = BFA_CM_HBA; in bfa_ioc_pci_init()
2429 ioc->asic_gen = BFI_ASIC_GEN_CT2; in bfa_ioc_pci_init()
2431 pcidev->ssid == BFA_PCI_CT2_SSID_FC) { in bfa_ioc_pci_init()
2432 ioc->asic_mode = BFI_ASIC_MODE_FC16; in bfa_ioc_pci_init()
2433 ioc->fcmode = BFA_TRUE; in bfa_ioc_pci_init()
2434 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; in bfa_ioc_pci_init()
2435 ioc->ad_cap_bm = BFA_CM_HBA; in bfa_ioc_pci_init()
2437 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; in bfa_ioc_pci_init()
2438 ioc->asic_mode = BFI_ASIC_MODE_ETH; in bfa_ioc_pci_init()
2439 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { in bfa_ioc_pci_init()
2440 ioc->port_mode = in bfa_ioc_pci_init()
2441 ioc->port_mode_cfg = BFA_MODE_CNA; in bfa_ioc_pci_init()
2442 ioc->ad_cap_bm = BFA_CM_CNA; in bfa_ioc_pci_init()
2444 ioc->port_mode = in bfa_ioc_pci_init()
2445 ioc->port_mode_cfg = BFA_MODE_NIC; in bfa_ioc_pci_init()
2446 ioc->ad_cap_bm = BFA_CM_NIC; in bfa_ioc_pci_init()
2458 if (ioc->asic_gen == BFI_ASIC_GEN_CB) in bfa_ioc_pci_init()
2460 else if (ioc->asic_gen == BFI_ASIC_GEN_CT) in bfa_ioc_pci_init()
2463 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); in bfa_ioc_pci_init()
2484 ioc->attr_dma.kva = dm_kva; in bfa_ioc_mem_claim()
2485 ioc->attr_dma.pa = dm_pa; in bfa_ioc_mem_claim()
2486 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; in bfa_ioc_mem_claim()
2493 ioc->dbg_fwsave_once = BFA_TRUE; in bfa_ioc_enable()
2508 ioc->dbg_fwsave_once = BFA_TRUE; in bfa_ioc_suspend()
2519 ioc->dbg_fwsave = dbg_fwsave; in bfa_ioc_debug_memclaim()
2520 ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN; in bfa_ioc_debug_memclaim()
2532 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_register()
2536 mod->mbhdlr[mc].cbfn = mcfuncs[mc]; in bfa_ioc_mbox_register()
2546 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_regisr()
2548 mod->mbhdlr[mc].cbfn = cbfn; in bfa_ioc_mbox_regisr()
2549 mod->mbhdlr[mc].cbarg = cbarg; in bfa_ioc_mbox_regisr()
2562 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_queue()
2568 if (!list_empty(&mod->cmd_q)) { in bfa_ioc_mbox_queue()
2569 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_ioc_mbox_queue()
2576 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); in bfa_ioc_mbox_queue()
2578 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_ioc_mbox_queue()
2583 * mailbox is free -- queue command to firmware in bfa_ioc_mbox_queue()
2585 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); in bfa_ioc_mbox_queue()
2594 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_isr()
2595 struct bfi_mbmsg_s m; in bfa_ioc_mbox_isr() local
2598 if (bfa_ioc_msgget(ioc, &m)) { in bfa_ioc_mbox_isr()
2602 mc = m.mh.msg_class; in bfa_ioc_mbox_isr()
2604 bfa_ioc_isr(ioc, &m); in bfa_ioc_mbox_isr()
2608 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) in bfa_ioc_mbox_isr()
2611 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); in bfa_ioc_mbox_isr()
2626 ioc->stats.hb_count = ioc->hb_count; in bfa_ioc_error_isr()
2647 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) || in bfa_ioc_fw_mismatch()
2648 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch); in bfa_ioc_fw_mismatch()
2652 * Check if adapter is disabled -- both IOCs should be in a disabled
2667 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { in bfa_ioc_adapter_is_disabled()
2693 ioc_attr = ioc->attr; in bfa_ioc_get_adapter_attr()
2695 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); in bfa_ioc_get_adapter_attr()
2696 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); in bfa_ioc_get_adapter_attr()
2697 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); in bfa_ioc_get_adapter_attr()
2698 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); in bfa_ioc_get_adapter_attr()
2699 memcpy(&ad_attr->vpd, &ioc_attr->vpd, in bfa_ioc_get_adapter_attr()
2702 ad_attr->nports = bfa_ioc_get_nports(ioc); in bfa_ioc_get_adapter_attr()
2703 ad_attr->max_speed = bfa_ioc_speed_sup(ioc); in bfa_ioc_get_adapter_attr()
2705 bfa_ioc_get_adapter_model(ioc, ad_attr->model); in bfa_ioc_get_adapter_attr()
2707 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); in bfa_ioc_get_adapter_attr()
2709 ad_attr->card_type = ioc_attr->card_type; in bfa_ioc_get_adapter_attr()
2710 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); in bfa_ioc_get_adapter_attr()
2712 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) in bfa_ioc_get_adapter_attr()
2713 ad_attr->prototype = 1; in bfa_ioc_get_adapter_attr()
2715 ad_attr->prototype = 0; in bfa_ioc_get_adapter_attr()
2717 ad_attr->pwwn = ioc->attr->pwwn; in bfa_ioc_get_adapter_attr()
2718 ad_attr->mac = bfa_ioc_get_mac(ioc); in bfa_ioc_get_adapter_attr()
2720 ad_attr->pcie_gen = ioc_attr->pcie_gen; in bfa_ioc_get_adapter_attr()
2721 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; in bfa_ioc_get_adapter_attr()
2722 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; in bfa_ioc_get_adapter_attr()
2723 ad_attr->asic_rev = ioc_attr->asic_rev; in bfa_ioc_get_adapter_attr()
2725 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); in bfa_ioc_get_adapter_attr()
2727 ad_attr->cna_capable = bfa_ioc_is_cna(ioc); in bfa_ioc_get_adapter_attr()
2728 ad_attr->trunk_capable = (ad_attr->nports > 1) && in bfa_ioc_get_adapter_attr()
2729 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz; in bfa_ioc_get_adapter_attr()
2730 ad_attr->mfg_day = ioc_attr->mfg_day; in bfa_ioc_get_adapter_attr()
2731 ad_attr->mfg_month = ioc_attr->mfg_month; in bfa_ioc_get_adapter_attr()
2732 ad_attr->mfg_year = ioc_attr->mfg_year; in bfa_ioc_get_adapter_attr()
2733 memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN); in bfa_ioc_get_adapter_attr()
2739 if (ioc->clscode == BFI_PCIFN_CLASS_ETH) in bfa_ioc_get_type()
2742 WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC); in bfa_ioc_get_type()
2744 return (ioc->attr->port_mode == BFI_PORT_MODE_FC) in bfa_ioc_get_type()
2753 (void *)ioc->attr->brcd_serialnum, in bfa_ioc_get_adapter_serial_num()
2761 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); in bfa_ioc_get_adapter_fw_ver()
2774 chip_rev[3] = '-'; in bfa_ioc_get_pci_chip_rev()
2775 chip_rev[4] = ioc->attr->asic_rev; in bfa_ioc_get_pci_chip_rev()
2783 memcpy(optrom_ver, ioc->attr->optrom_version, in bfa_ioc_get_adapter_optrom_ver()
2803 ioc_attr = ioc->attr; in bfa_ioc_get_adapter_model()
2805 if (bfa_asic_id_ct2(ioc->pcidev.device_id) && in bfa_ioc_get_adapter_model()
2806 (!bfa_mfg_is_mezz(ioc_attr->card_type))) in bfa_ioc_get_adapter_model()
2807 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s", in bfa_ioc_get_adapter_model()
2808 BFA_MFG_NAME, ioc_attr->card_type, nports, "p"); in bfa_ioc_get_adapter_model()
2810 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", in bfa_ioc_get_adapter_model()
2811 BFA_MFG_NAME, ioc_attr->card_type); in bfa_ioc_get_adapter_model()
2818 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); in bfa_ioc_get_state()
2823 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); in bfa_ioc_get_state()
2859 ioc_attr->state = bfa_ioc_get_state(ioc); in bfa_ioc_get_attr()
2860 ioc_attr->port_id = bfa_ioc_portid(ioc); in bfa_ioc_get_attr()
2861 ioc_attr->port_mode = ioc->port_mode; in bfa_ioc_get_attr()
2862 ioc_attr->port_mode_cfg = ioc->port_mode_cfg; in bfa_ioc_get_attr()
2863 ioc_attr->cap_bm = ioc->ad_cap_bm; in bfa_ioc_get_attr()
2865 ioc_attr->ioc_type = bfa_ioc_get_type(ioc); in bfa_ioc_get_attr()
2867 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); in bfa_ioc_get_attr()
2869 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); in bfa_ioc_get_attr()
2870 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); in bfa_ioc_get_attr()
2871 ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc)); in bfa_ioc_get_attr()
2872 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); in bfa_ioc_get_attr()
2882 return ioc->attr->fcoe_mac; in bfa_ioc_get_mac()
2884 return ioc->attr->mac; in bfa_ioc_get_mac()
2890 mac_t m; in bfa_ioc_get_mfg_mac() local
2892 m = ioc->attr->mfg_mac; in bfa_ioc_get_mfg_mac()
2893 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type)) in bfa_ioc_get_mfg_mac()
2894 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); in bfa_ioc_get_mfg_mac()
2896 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]), in bfa_ioc_get_mfg_mac()
2899 return m; in bfa_ioc_get_mfg_mac()
2908 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_aen_post()
2919 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn; in bfa_ioc_aen_post()
2922 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn; in bfa_ioc_aen_post()
2923 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc); in bfa_ioc_aen_post()
2926 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc); in bfa_ioc_aen_post()
2934 aen_entry->aen_data.ioc.ioc_type = ioc_type; in bfa_ioc_aen_post()
2935 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq, in bfa_ioc_aen_post()
2947 if (ioc->dbg_fwsave_len == 0) in bfa_ioc_debug_fwsave()
2951 if (tlen > ioc->dbg_fwsave_len) in bfa_ioc_debug_fwsave()
2952 tlen = ioc->dbg_fwsave_len; in bfa_ioc_debug_fwsave()
2954 memcpy(trcdata, ioc->dbg_fwsave, tlen); in bfa_ioc_debug_fwsave()
2987 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC, in bfa_ioc_send_fwsync()
2989 req->clscode = cpu_to_be16(ioc->clscode); in bfa_ioc_send_fwsync()
3012 fwsync_iter--; in bfa_ioc_fwsync()
3043 dlen = smem_len - loff; in bfa_ioc_debug_fwcore()
3073 if (ioc->stats_busy) { in bfa_ioc_fw_stats_get()
3074 bfa_trc(ioc, ioc->stats_busy); in bfa_ioc_fw_stats_get()
3077 ioc->stats_busy = BFA_TRUE; in bfa_ioc_fw_stats_get()
3082 ioc->stats_busy = BFA_FALSE; in bfa_ioc_fw_stats_get()
3094 if (ioc->stats_busy) { in bfa_ioc_fw_stats_clear()
3095 bfa_trc(ioc, ioc->stats_busy); in bfa_ioc_fw_stats_clear()
3098 ioc->stats_busy = BFA_TRUE; in bfa_ioc_fw_stats_clear()
3103 ioc->stats_busy = BFA_FALSE; in bfa_ioc_fw_stats_clear()
3115 if (ioc->dbg_fwsave_once) { in bfa_ioc_debug_save_ftrc()
3116 ioc->dbg_fwsave_once = BFA_FALSE; in bfa_ioc_debug_save_ftrc()
3117 if (ioc->dbg_fwsave_len) { in bfa_ioc_debug_save_ftrc()
3118 tlen = ioc->dbg_fwsave_len; in bfa_ioc_debug_save_ftrc()
3119 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); in bfa_ioc_debug_save_ftrc()
3131 ioc->stats.hb_count = ioc->hb_count; in bfa_ioc_recover()
3144 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); in bfa_iocpf_timeout()
3163 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); in bfa_ioc_poll_fwinit()
3167 if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV)) in bfa_ioc_poll_fwinit()
3170 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; in bfa_ioc_poll_fwinit()
3189 struct list_head *qh = &mod->timer_q; in bfa_timer_beat()
3202 if (elem->timeout <= BFA_TIMER_FREQ) { in bfa_timer_beat()
3203 elem->timeout = 0; in bfa_timer_beat()
3204 list_del(&elem->qe); in bfa_timer_beat()
3205 list_add_tail(&elem->qe, &timedout_q); in bfa_timer_beat()
3207 elem->timeout -= BFA_TIMER_FREQ; in bfa_timer_beat()
3218 elem->timercb(elem->arg); in bfa_timer_beat()
3231 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer)); in bfa_timer_begin()
3233 timer->timeout = timeout; in bfa_timer_begin()
3234 timer->timercb = timercb; in bfa_timer_begin()
3235 timer->arg = arg; in bfa_timer_begin()
3237 list_add_tail(&timer->qe, &mod->timer_q); in bfa_timer_begin()
3246 WARN_ON(list_empty(&timer->qe)); in bfa_timer_stop()
3248 list_del(&timer->qe); in bfa_timer_stop()
3262 cfg_inst = &cfg->inst[i]; in bfa_ablk_config_swap()
3264 be16 = cfg_inst->pf_cfg[j].pers; in bfa_ablk_config_swap()
3265 cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16); in bfa_ablk_config_swap()
3266 be16 = cfg_inst->pf_cfg[j].num_qpairs; in bfa_ablk_config_swap()
3267 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16); in bfa_ablk_config_swap()
3268 be16 = cfg_inst->pf_cfg[j].num_vectors; in bfa_ablk_config_swap()
3269 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16); in bfa_ablk_config_swap()
3270 be16 = cfg_inst->pf_cfg[j].bw_min; in bfa_ablk_config_swap()
3271 cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16); in bfa_ablk_config_swap()
3272 be16 = cfg_inst->pf_cfg[j].bw_max; in bfa_ablk_config_swap()
3273 cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16); in bfa_ablk_config_swap()
3285 WARN_ON(msg->mh.msg_class != BFI_MC_ABLK); in bfa_ablk_isr()
3286 bfa_trc(ablk->ioc, msg->mh.msg_id); in bfa_ablk_isr()
3288 switch (msg->mh.msg_id) { in bfa_ablk_isr()
3290 if (rsp->status == BFA_STATUS_OK) { in bfa_ablk_isr()
3291 memcpy(ablk->cfg, ablk->dma_addr.kva, in bfa_ablk_isr()
3293 bfa_ablk_config_swap(ablk->cfg); in bfa_ablk_isr()
3294 ablk->cfg = NULL; in bfa_ablk_isr()
3301 ablk->ioc->port_mode_cfg = rsp->port_mode; in bfa_ablk_isr()
3308 /* No-op */ in bfa_ablk_isr()
3312 *(ablk->pcifn) = rsp->pcifn; in bfa_ablk_isr()
3313 ablk->pcifn = NULL; in bfa_ablk_isr()
3320 ablk->busy = BFA_FALSE; in bfa_ablk_isr()
3321 if (ablk->cbfn) { in bfa_ablk_isr()
3322 cbfn = ablk->cbfn; in bfa_ablk_isr()
3323 ablk->cbfn = NULL; in bfa_ablk_isr()
3324 cbfn(ablk->cbarg, rsp->status); in bfa_ablk_isr()
3333 bfa_trc(ablk->ioc, event); in bfa_ablk_notify()
3337 WARN_ON(ablk->busy != BFA_FALSE); in bfa_ablk_notify()
3343 ablk->pcifn = NULL; in bfa_ablk_notify()
3344 if (ablk->busy) { in bfa_ablk_notify()
3345 if (ablk->cbfn) in bfa_ablk_notify()
3346 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED); in bfa_ablk_notify()
3347 ablk->cbfn = NULL; in bfa_ablk_notify()
3348 ablk->busy = BFA_FALSE; in bfa_ablk_notify()
3367 ablk->dma_addr.kva = dma_kva; in bfa_ablk_memclaim()
3368 ablk->dma_addr.pa = dma_pa; in bfa_ablk_memclaim()
3374 ablk->ioc = ioc; in bfa_ablk_attach()
3376 bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk); in bfa_ablk_attach()
3377 bfa_q_qe_init(&ablk->ioc_notify); in bfa_ablk_attach()
3378 bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk); in bfa_ablk_attach()
3379 list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q); in bfa_ablk_attach()
3386 struct bfi_ablk_h2i_query_s *m; in bfa_ablk_query() local
3390 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_query()
3391 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_query()
3395 if (ablk->busy) { in bfa_ablk_query()
3396 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_query()
3400 ablk->cfg = ablk_cfg; in bfa_ablk_query()
3401 ablk->cbfn = cbfn; in bfa_ablk_query()
3402 ablk->cbarg = cbarg; in bfa_ablk_query()
3403 ablk->busy = BFA_TRUE; in bfa_ablk_query()
3405 m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg; in bfa_ablk_query()
3406 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY, in bfa_ablk_query()
3407 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_query()
3408 bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa); in bfa_ablk_query()
3409 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_query()
3420 struct bfi_ablk_h2i_pf_req_s *m; in bfa_ablk_pf_create() local
3422 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_pf_create()
3423 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_pf_create()
3427 if (ablk->busy) { in bfa_ablk_pf_create()
3428 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_pf_create()
3432 ablk->pcifn = pcifn; in bfa_ablk_pf_create()
3433 ablk->cbfn = cbfn; in bfa_ablk_pf_create()
3434 ablk->cbarg = cbarg; in bfa_ablk_pf_create()
3435 ablk->busy = BFA_TRUE; in bfa_ablk_pf_create()
3437 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; in bfa_ablk_pf_create()
3438 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE, in bfa_ablk_pf_create()
3439 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_pf_create()
3440 m->pers = cpu_to_be16((u16)personality); in bfa_ablk_pf_create()
3441 m->bw_min = cpu_to_be16(bw_min); in bfa_ablk_pf_create()
3442 m->bw_max = cpu_to_be16(bw_max); in bfa_ablk_pf_create()
3443 m->port = port; in bfa_ablk_pf_create()
3444 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_pf_create()
3453 struct bfi_ablk_h2i_pf_req_s *m; in bfa_ablk_pf_delete() local
3455 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_pf_delete()
3456 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_pf_delete()
3460 if (ablk->busy) { in bfa_ablk_pf_delete()
3461 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_pf_delete()
3465 ablk->cbfn = cbfn; in bfa_ablk_pf_delete()
3466 ablk->cbarg = cbarg; in bfa_ablk_pf_delete()
3467 ablk->busy = BFA_TRUE; in bfa_ablk_pf_delete()
3469 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; in bfa_ablk_pf_delete()
3470 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE, in bfa_ablk_pf_delete()
3471 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_pf_delete()
3472 m->pcifn = (u8)pcifn; in bfa_ablk_pf_delete()
3473 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_pf_delete()
3482 struct bfi_ablk_h2i_cfg_req_s *m; in bfa_ablk_adapter_config() local
3484 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_adapter_config()
3485 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_adapter_config()
3489 if (ablk->busy) { in bfa_ablk_adapter_config()
3490 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_adapter_config()
3494 ablk->cbfn = cbfn; in bfa_ablk_adapter_config()
3495 ablk->cbarg = cbarg; in bfa_ablk_adapter_config()
3496 ablk->busy = BFA_TRUE; in bfa_ablk_adapter_config()
3498 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg; in bfa_ablk_adapter_config()
3499 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG, in bfa_ablk_adapter_config()
3500 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_adapter_config()
3501 m->mode = (u8)mode; in bfa_ablk_adapter_config()
3502 m->max_pf = (u8)max_pf; in bfa_ablk_adapter_config()
3503 m->max_vf = (u8)max_vf; in bfa_ablk_adapter_config()
3504 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_adapter_config()
3513 struct bfi_ablk_h2i_cfg_req_s *m; in bfa_ablk_port_config() local
3515 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_port_config()
3516 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_port_config()
3520 if (ablk->busy) { in bfa_ablk_port_config()
3521 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_port_config()
3525 ablk->cbfn = cbfn; in bfa_ablk_port_config()
3526 ablk->cbarg = cbarg; in bfa_ablk_port_config()
3527 ablk->busy = BFA_TRUE; in bfa_ablk_port_config()
3529 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg; in bfa_ablk_port_config()
3530 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG, in bfa_ablk_port_config()
3531 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_port_config()
3532 m->port = (u8)port; in bfa_ablk_port_config()
3533 m->mode = (u8)mode; in bfa_ablk_port_config()
3534 m->max_pf = (u8)max_pf; in bfa_ablk_port_config()
3535 m->max_vf = (u8)max_vf; in bfa_ablk_port_config()
3536 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_port_config()
3545 struct bfi_ablk_h2i_pf_req_s *m; in bfa_ablk_pf_update() local
3547 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_pf_update()
3548 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_pf_update()
3552 if (ablk->busy) { in bfa_ablk_pf_update()
3553 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_pf_update()
3557 ablk->cbfn = cbfn; in bfa_ablk_pf_update()
3558 ablk->cbarg = cbarg; in bfa_ablk_pf_update()
3559 ablk->busy = BFA_TRUE; in bfa_ablk_pf_update()
3561 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; in bfa_ablk_pf_update()
3562 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE, in bfa_ablk_pf_update()
3563 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_pf_update()
3564 m->pcifn = (u8)pcifn; in bfa_ablk_pf_update()
3565 m->bw_min = cpu_to_be16(bw_min); in bfa_ablk_pf_update()
3566 m->bw_max = cpu_to_be16(bw_max); in bfa_ablk_pf_update()
3567 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_pf_update()
3575 struct bfi_ablk_h2i_optrom_s *m; in bfa_ablk_optrom_en() local
3577 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_optrom_en()
3578 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_optrom_en()
3582 if (ablk->busy) { in bfa_ablk_optrom_en()
3583 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_optrom_en()
3587 ablk->cbfn = cbfn; in bfa_ablk_optrom_en()
3588 ablk->cbarg = cbarg; in bfa_ablk_optrom_en()
3589 ablk->busy = BFA_TRUE; in bfa_ablk_optrom_en()
3591 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg; in bfa_ablk_optrom_en()
3592 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE, in bfa_ablk_optrom_en()
3593 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_optrom_en()
3594 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_optrom_en()
3602 struct bfi_ablk_h2i_optrom_s *m; in bfa_ablk_optrom_dis() local
3604 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_optrom_dis()
3605 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_optrom_dis()
3609 if (ablk->busy) { in bfa_ablk_optrom_dis()
3610 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_optrom_dis()
3614 ablk->cbfn = cbfn; in bfa_ablk_optrom_dis()
3615 ablk->cbarg = cbarg; in bfa_ablk_optrom_dis()
3616 ablk->busy = BFA_TRUE; in bfa_ablk_optrom_dis()
3618 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg; in bfa_ablk_optrom_dis()
3619 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE, in bfa_ablk_optrom_dis()
3620 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_optrom_dis()
3621 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_optrom_dis()
3639 bfa_trc(sfp, sfp->lock); in bfa_cb_sfp_show()
3640 if (sfp->cbfn) in bfa_cb_sfp_show()
3641 sfp->cbfn(sfp->cbarg, sfp->status); in bfa_cb_sfp_show()
3642 sfp->lock = 0; in bfa_cb_sfp_show()
3643 sfp->cbfn = NULL; in bfa_cb_sfp_show()
3649 bfa_trc(sfp, sfp->portspeed); in bfa_cb_sfp_state_query()
3650 if (sfp->media) { in bfa_cb_sfp_state_query()
3652 if (sfp->state_query_cbfn) in bfa_cb_sfp_state_query()
3653 sfp->state_query_cbfn(sfp->state_query_cbarg, in bfa_cb_sfp_state_query()
3654 sfp->status); in bfa_cb_sfp_state_query()
3655 sfp->media = NULL; in bfa_cb_sfp_state_query()
3658 if (sfp->portspeed) { in bfa_cb_sfp_state_query()
3659 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed); in bfa_cb_sfp_state_query()
3660 if (sfp->state_query_cbfn) in bfa_cb_sfp_state_query()
3661 sfp->state_query_cbfn(sfp->state_query_cbarg, in bfa_cb_sfp_state_query()
3662 sfp->status); in bfa_cb_sfp_state_query()
3663 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; in bfa_cb_sfp_state_query()
3666 sfp->state_query_lock = 0; in bfa_cb_sfp_state_query()
3667 sfp->state_query_cbfn = NULL; in bfa_cb_sfp_state_query()
3679 bfa_trc(sfp, sfp->lock); in bfa_sfp_notify()
3680 bfa_trc(sfp, sfp->state_query_lock); in bfa_sfp_notify()
3685 if (sfp->lock) { in bfa_sfp_notify()
3686 sfp->status = BFA_STATUS_IOC_FAILURE; in bfa_sfp_notify()
3690 if (sfp->state_query_lock) { in bfa_sfp_notify()
3691 sfp->status = BFA_STATUS_IOC_FAILURE; in bfa_sfp_notify()
3707 struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad; in bfa_sfp_scn_aen_post()
3711 bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) | in bfa_sfp_scn_aen_post()
3712 ((u64)rsp->event)); in bfa_sfp_scn_aen_post()
3718 aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc); in bfa_sfp_scn_aen_post()
3719 aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn; in bfa_sfp_scn_aen_post()
3720 aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc); in bfa_sfp_scn_aen_post()
3722 switch (rsp->event) { in bfa_sfp_scn_aen_post()
3737 aen_entry->aen_data.port.level = rsp->pomlvl; in bfa_sfp_scn_aen_post()
3740 bfa_trc(sfp, rsp->event); in bfa_sfp_scn_aen_post()
3745 bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq, in bfa_sfp_scn_aen_post()
3755 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; in bfa_sfp_getdata_send()
3757 bfa_trc(sfp, req->memtype); in bfa_sfp_getdata_send()
3760 bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW, in bfa_sfp_getdata_send()
3761 bfa_ioc_portid(sfp->ioc)); in bfa_sfp_getdata_send()
3764 bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd); in bfa_sfp_getdata_send()
3773 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; in bfa_sfp_getdata()
3775 WARN_ON(sfp->lock != 0); in bfa_sfp_getdata()
3776 bfa_trc(sfp, sfp->state); in bfa_sfp_getdata()
3778 sfp->lock = 1; in bfa_sfp_getdata()
3779 sfp->memtype = memtype; in bfa_sfp_getdata()
3780 req->memtype = memtype; in bfa_sfp_getdata()
3783 bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa); in bfa_sfp_getdata()
3796 switch (rsp->event) { in bfa_sfp_scn()
3798 sfp->state = BFA_SFP_STATE_INSERTED; in bfa_sfp_scn()
3799 sfp->data_valid = 0; in bfa_sfp_scn()
3803 sfp->state = BFA_SFP_STATE_REMOVED; in bfa_sfp_scn()
3804 sfp->data_valid = 0; in bfa_sfp_scn()
3808 sfp->state = BFA_SFP_STATE_FAILED; in bfa_sfp_scn()
3809 sfp->data_valid = 0; in bfa_sfp_scn()
3813 sfp->state = BFA_SFP_STATE_UNSUPPORT; in bfa_sfp_scn()
3815 if (!sfp->lock) in bfa_sfp_scn()
3822 sfp->state = BFA_SFP_STATE_VALID; in bfa_sfp_scn()
3823 if (!sfp->lock) in bfa_sfp_scn()
3827 bfa_trc(sfp, rsp->event); in bfa_sfp_scn()
3840 if (!sfp->lock) { in bfa_sfp_show_comp()
3844 bfa_trc(sfp, sfp->lock); in bfa_sfp_show_comp()
3848 bfa_trc(sfp, rsp->status); in bfa_sfp_show_comp()
3849 if (rsp->status == BFA_STATUS_OK) { in bfa_sfp_show_comp()
3850 sfp->data_valid = 1; in bfa_sfp_show_comp()
3851 if (sfp->state == BFA_SFP_STATE_VALID) in bfa_sfp_show_comp()
3852 sfp->status = BFA_STATUS_OK; in bfa_sfp_show_comp()
3853 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT) in bfa_sfp_show_comp()
3854 sfp->status = BFA_STATUS_SFP_UNSUPP; in bfa_sfp_show_comp()
3856 bfa_trc(sfp, sfp->state); in bfa_sfp_show_comp()
3858 sfp->data_valid = 0; in bfa_sfp_show_comp()
3859 sfp->status = rsp->status; in bfa_sfp_show_comp()
3863 bfa_trc(sfp, sfp->memtype); in bfa_sfp_show_comp()
3864 if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) { in bfa_sfp_show_comp()
3865 bfa_trc(sfp, sfp->data_valid); in bfa_sfp_show_comp()
3866 if (sfp->data_valid) { in bfa_sfp_show_comp()
3868 u8 *des = (u8 *)(sfp->sfpmem); in bfa_sfp_show_comp()
3869 memcpy(des, sfp->dbuf_kva, size); in bfa_sfp_show_comp()
3876 sfp->lock = 0; in bfa_sfp_show_comp()
3878 bfa_trc(sfp, sfp->state_query_lock); in bfa_sfp_show_comp()
3879 if (sfp->state_query_lock) { in bfa_sfp_show_comp()
3880 sfp->state = rsp->state; in bfa_sfp_show_comp()
3892 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; in bfa_sfp_state_query()
3895 WARN_ON(sfp->state != BFA_SFP_STATE_INIT); in bfa_sfp_state_query()
3896 WARN_ON(sfp->state_query_lock != 0); in bfa_sfp_state_query()
3897 bfa_trc(sfp, sfp->state); in bfa_sfp_state_query()
3899 sfp->state_query_lock = 1; in bfa_sfp_state_query()
3900 req->memtype = 0; in bfa_sfp_state_query()
3902 if (!sfp->lock) in bfa_sfp_state_query()
3909 enum bfa_defs_sfp_media_e *media = sfp->media; in bfa_sfp_media_get()
3913 if (sfp->state == BFA_SFP_STATE_UNSUPPORT) in bfa_sfp_media_get()
3915 else if (sfp->state == BFA_SFP_STATE_VALID) { in bfa_sfp_media_get()
3917 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva; in bfa_sfp_media_get()
3918 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 | in bfa_sfp_media_get()
3919 (sfpmem->srlid_base.xcvr[5] >> 1); in bfa_sfp_media_get()
3921 e10g.b = sfpmem->srlid_base.xcvr[0]; in bfa_sfp_media_get()
3949 bfa_trc(sfp, sfp->state); in bfa_sfp_media_get()
3955 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva; in bfa_sfp_speed_valid()
3956 struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr; in bfa_sfp_speed_valid()
3957 union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3; in bfa_sfp_speed_valid()
3958 union sfp_xcvr_e10g_code_u e10g = xcvr->e10g; in bfa_sfp_speed_valid()
3990 switch (msg->mh.msg_id) { in bfa_sfp_intr()
4000 bfa_trc(sfp, msg->mh.msg_id); in bfa_sfp_intr()
4021 sfp->dev = dev; in bfa_sfp_attach()
4022 sfp->ioc = ioc; in bfa_sfp_attach()
4023 sfp->trcmod = trcmod; in bfa_sfp_attach()
4025 sfp->cbfn = NULL; in bfa_sfp_attach()
4026 sfp->cbarg = NULL; in bfa_sfp_attach()
4027 sfp->sfpmem = NULL; in bfa_sfp_attach()
4028 sfp->lock = 0; in bfa_sfp_attach()
4029 sfp->data_valid = 0; in bfa_sfp_attach()
4030 sfp->state = BFA_SFP_STATE_INIT; in bfa_sfp_attach()
4031 sfp->state_query_lock = 0; in bfa_sfp_attach()
4032 sfp->state_query_cbfn = NULL; in bfa_sfp_attach()
4033 sfp->state_query_cbarg = NULL; in bfa_sfp_attach()
4034 sfp->media = NULL; in bfa_sfp_attach()
4035 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; in bfa_sfp_attach()
4036 sfp->is_elb = BFA_FALSE; in bfa_sfp_attach()
4038 bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp); in bfa_sfp_attach()
4039 bfa_q_qe_init(&sfp->ioc_notify); in bfa_sfp_attach()
4040 bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp); in bfa_sfp_attach()
4041 list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q); in bfa_sfp_attach()
4050 sfp->dbuf_kva = dm_kva; in bfa_sfp_memclaim()
4051 sfp->dbuf_pa = dm_pa; in bfa_sfp_memclaim()
4052 memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s)); in bfa_sfp_memclaim()
4061 * @param[in] sfp - bfa sfp module
4063 * @param[out] sfpmem - sfp eeprom data
4071 if (!bfa_ioc_is_operational(sfp->ioc)) { in bfa_sfp_show()
4076 if (sfp->lock) { in bfa_sfp_show()
4081 sfp->cbfn = cbfn; in bfa_sfp_show()
4082 sfp->cbarg = cbarg; in bfa_sfp_show()
4083 sfp->sfpmem = sfpmem; in bfa_sfp_show()
4092 * @param[in] sfp - bfa sfp module
4094 * @param[out] media - port speed from user
4101 if (!bfa_ioc_is_operational(sfp->ioc)) { in bfa_sfp_media()
4106 sfp->media = media; in bfa_sfp_media()
4107 if (sfp->state == BFA_SFP_STATE_INIT) { in bfa_sfp_media()
4108 if (sfp->state_query_lock) { in bfa_sfp_media()
4112 sfp->state_query_cbfn = cbfn; in bfa_sfp_media()
4113 sfp->state_query_cbarg = cbarg; in bfa_sfp_media()
4126 * @param[in] sfp - bfa sfp module
4127 * @param[in] portspeed - port speed from user
4136 if (!bfa_ioc_is_operational(sfp->ioc)) in bfa_sfp_speed()
4140 if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type)) in bfa_sfp_speed()
4144 sfp->portspeed = portspeed; in bfa_sfp_speed()
4145 if (sfp->state == BFA_SFP_STATE_INIT) { in bfa_sfp_speed()
4146 if (sfp->state_query_lock) { in bfa_sfp_speed()
4150 sfp->state_query_cbfn = cbfn; in bfa_sfp_speed()
4151 sfp->state_query_cbarg = cbarg; in bfa_sfp_speed()
4157 if (sfp->state == BFA_SFP_STATE_REMOVED || in bfa_sfp_speed()
4158 sfp->state == BFA_SFP_STATE_FAILED) { in bfa_sfp_speed()
4159 bfa_trc(sfp, sfp->state); in bfa_sfp_speed()
4163 if (sfp->state == BFA_SFP_STATE_INSERTED) { in bfa_sfp_speed()
4164 bfa_trc(sfp, sfp->state); in bfa_sfp_speed()
4169 if (sfp->is_elb) in bfa_sfp_speed()
4192 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_flash_aen_audit_post()
4199 aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn; in bfa_flash_aen_audit_post()
4200 aen_entry->aen_data.audit.partition_inst = inst; in bfa_flash_aen_audit_post()
4201 aen_entry->aen_data.audit.partition_type = type; in bfa_flash_aen_audit_post()
4204 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq, in bfa_flash_aen_audit_post()
4211 flash->op_busy = 0; in bfa_flash_cb()
4212 if (flash->cbfn) in bfa_flash_cb()
4213 flash->cbfn(flash->cbarg, flash->status); in bfa_flash_cb()
4225 if (flash->op_busy) { in bfa_flash_notify()
4226 flash->status = BFA_STATUS_IOC_FAILURE; in bfa_flash_notify()
4227 flash->cbfn(flash->cbarg, flash->status); in bfa_flash_notify()
4228 flash->op_busy = 0; in bfa_flash_notify()
4240 * @param[in] cbarg - callback argument
4247 (struct bfi_flash_query_req_s *) flash->mb.msg; in bfa_flash_query_send()
4249 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, in bfa_flash_query_send()
4250 bfa_ioc_portid(flash->ioc)); in bfa_flash_query_send()
4251 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s), in bfa_flash_query_send()
4252 flash->dbuf_pa); in bfa_flash_query_send()
4253 bfa_ioc_mbox_queue(flash->ioc, &flash->mb); in bfa_flash_query_send()
4259 * @param[in] cbarg - callback argument
4265 (struct bfi_flash_write_req_s *) flash->mb.msg; in bfa_flash_write_send()
4268 msg->type = be32_to_cpu(flash->type); in bfa_flash_write_send()
4269 msg->instance = flash->instance; in bfa_flash_write_send()
4270 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); in bfa_flash_write_send()
4271 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? in bfa_flash_write_send()
4272 flash->residue : BFA_FLASH_DMA_BUF_SZ; in bfa_flash_write_send()
4273 msg->length = be32_to_cpu(len); in bfa_flash_write_send()
4276 msg->last = (len == flash->residue) ? 1 : 0; in bfa_flash_write_send()
4278 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, in bfa_flash_write_send()
4279 bfa_ioc_portid(flash->ioc)); in bfa_flash_write_send()
4280 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); in bfa_flash_write_send()
4281 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); in bfa_flash_write_send()
4282 bfa_ioc_mbox_queue(flash->ioc, &flash->mb); in bfa_flash_write_send()
4284 flash->residue -= len; in bfa_flash_write_send()
4285 flash->offset += len; in bfa_flash_write_send()
4291 * @param[in] cbarg - callback argument
4298 (struct bfi_flash_read_req_s *) flash->mb.msg; in bfa_flash_read_send()
4301 msg->type = be32_to_cpu(flash->type); in bfa_flash_read_send()
4302 msg->instance = flash->instance; in bfa_flash_read_send()
4303 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); in bfa_flash_read_send()
4304 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? in bfa_flash_read_send()
4305 flash->residue : BFA_FLASH_DMA_BUF_SZ; in bfa_flash_read_send()
4306 msg->length = be32_to_cpu(len); in bfa_flash_read_send()
4307 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, in bfa_flash_read_send()
4308 bfa_ioc_portid(flash->ioc)); in bfa_flash_read_send()
4309 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); in bfa_flash_read_send()
4310 bfa_ioc_mbox_queue(flash->ioc, &flash->mb); in bfa_flash_read_send()
4316 * @param[in] cbarg - callback argument
4323 (struct bfi_flash_erase_req_s *) flash->mb.msg; in bfa_flash_erase_send()
4325 msg->type = be32_to_cpu(flash->type); in bfa_flash_erase_send()
4326 msg->instance = flash->instance; in bfa_flash_erase_send()
4327 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ, in bfa_flash_erase_send()
4328 bfa_ioc_portid(flash->ioc)); in bfa_flash_erase_send()
4329 bfa_ioc_mbox_queue(flash->ioc, &flash->mb); in bfa_flash_erase_send()
4335 * @param[in] flasharg - flash structure
4336 * @param[in] msg - message structure
4351 } m; in bfa_flash_intr() local
4353 m.msg = msg; in bfa_flash_intr()
4354 bfa_trc(flash, msg->mh.msg_id); in bfa_flash_intr()
4356 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) { in bfa_flash_intr()
4362 switch (msg->mh.msg_id) { in bfa_flash_intr()
4364 status = be32_to_cpu(m.query->status); in bfa_flash_intr()
4370 attr = (struct bfa_flash_attr_s *) flash->ubuf; in bfa_flash_intr()
4371 f = (struct bfa_flash_attr_s *) flash->dbuf_kva; in bfa_flash_intr()
4372 attr->status = be32_to_cpu(f->status); in bfa_flash_intr()
4373 attr->npart = be32_to_cpu(f->npart); in bfa_flash_intr()
4374 bfa_trc(flash, attr->status); in bfa_flash_intr()
4375 bfa_trc(flash, attr->npart); in bfa_flash_intr()
4376 for (i = 0; i < attr->npart; i++) { in bfa_flash_intr()
4377 attr->part[i].part_type = in bfa_flash_intr()
4378 be32_to_cpu(f->part[i].part_type); in bfa_flash_intr()
4379 attr->part[i].part_instance = in bfa_flash_intr()
4380 be32_to_cpu(f->part[i].part_instance); in bfa_flash_intr()
4381 attr->part[i].part_off = in bfa_flash_intr()
4382 be32_to_cpu(f->part[i].part_off); in bfa_flash_intr()
4383 attr->part[i].part_size = in bfa_flash_intr()
4384 be32_to_cpu(f->part[i].part_size); in bfa_flash_intr()
4385 attr->part[i].part_len = in bfa_flash_intr()
4386 be32_to_cpu(f->part[i].part_len); in bfa_flash_intr()
4387 attr->part[i].part_status = in bfa_flash_intr()
4388 be32_to_cpu(f->part[i].part_status); in bfa_flash_intr()
4391 flash->status = status; in bfa_flash_intr()
4395 status = be32_to_cpu(m.erase->status); in bfa_flash_intr()
4397 flash->status = status; in bfa_flash_intr()
4401 status = be32_to_cpu(m.write->status); in bfa_flash_intr()
4403 if (status != BFA_STATUS_OK || flash->residue == 0) { in bfa_flash_intr()
4404 flash->status = status; in bfa_flash_intr()
4407 bfa_trc(flash, flash->offset); in bfa_flash_intr()
4412 status = be32_to_cpu(m.read->status); in bfa_flash_intr()
4415 flash->status = status; in bfa_flash_intr()
4418 u32 len = be32_to_cpu(m.read->length); in bfa_flash_intr()
4419 bfa_trc(flash, flash->offset); in bfa_flash_intr()
4421 memcpy(flash->ubuf + flash->offset, in bfa_flash_intr()
4422 flash->dbuf_kva, len); in bfa_flash_intr()
4423 flash->residue -= len; in bfa_flash_intr()
4424 flash->offset += len; in bfa_flash_intr()
4425 if (flash->residue == 0) { in bfa_flash_intr()
4426 flash->status = status; in bfa_flash_intr()
4435 status = be32_to_cpu(m.event->status); in bfa_flash_intr()
4438 bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR); in bfa_flash_intr()
4441 param = be32_to_cpu(m.event->param); in bfa_flash_intr()
4443 bfa_ioc_aen_post(flash->ioc, in bfa_flash_intr()
4456 * @param[in] mincfg - minimal cfg variable
4470 * @param[in] flash - flash structure
4471 * @param[in] ioc - ioc structure
4472 * @param[in] dev - device structure
4473 * @param[in] trcmod - trace module
4474 * @param[in] logmod - log module
4480 flash->ioc = ioc; in bfa_flash_attach()
4481 flash->trcmod = trcmod; in bfa_flash_attach()
4482 flash->cbfn = NULL; in bfa_flash_attach()
4483 flash->cbarg = NULL; in bfa_flash_attach()
4484 flash->op_busy = 0; in bfa_flash_attach()
4486 bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); in bfa_flash_attach()
4487 bfa_q_qe_init(&flash->ioc_notify); in bfa_flash_attach()
4488 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); in bfa_flash_attach()
4489 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); in bfa_flash_attach()
4493 flash->dbuf_kva = NULL; in bfa_flash_attach()
4494 flash->dbuf_pa = 0; in bfa_flash_attach()
4501 * @param[in] flash - flash structure
4502 * @param[in] dm_kva - pointer to virtual memory address
4503 * @param[in] dm_pa - physical memory address
4504 * @param[in] mincfg - minimal cfg variable
4513 flash->dbuf_kva = dm_kva; in bfa_flash_memclaim()
4514 flash->dbuf_pa = dm_pa; in bfa_flash_memclaim()
4515 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); in bfa_flash_memclaim()
4523 * @param[in] flash - flash structure
4524 * @param[in] attr - flash attribute structure
4525 * @param[in] cbfn - callback function
4526 * @param[in] cbarg - callback argument
4536 if (!bfa_ioc_is_operational(flash->ioc)) in bfa_flash_get_attr()
4539 if (flash->op_busy) { in bfa_flash_get_attr()
4540 bfa_trc(flash, flash->op_busy); in bfa_flash_get_attr()
4544 flash->op_busy = 1; in bfa_flash_get_attr()
4545 flash->cbfn = cbfn; in bfa_flash_get_attr()
4546 flash->cbarg = cbarg; in bfa_flash_get_attr()
4547 flash->ubuf = (u8 *) attr; in bfa_flash_get_attr()
4556 * @param[in] flash - flash structure
4557 * @param[in] type - flash partition type
4558 * @param[in] instance - flash partition instance
4559 * @param[in] cbfn - callback function
4560 * @param[in] cbarg - callback argument
4572 if (!bfa_ioc_is_operational(flash->ioc)) in bfa_flash_erase_part()
4575 if (flash->op_busy) { in bfa_flash_erase_part()
4576 bfa_trc(flash, flash->op_busy); in bfa_flash_erase_part()
4580 flash->op_busy = 1; in bfa_flash_erase_part()
4581 flash->cbfn = cbfn; in bfa_flash_erase_part()
4582 flash->cbarg = cbarg; in bfa_flash_erase_part()
4583 flash->type = type; in bfa_flash_erase_part()
4584 flash->instance = instance; in bfa_flash_erase_part()
4587 bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE, in bfa_flash_erase_part()
4595 * @param[in] flash - flash structure
4596 * @param[in] type - flash partition type
4597 * @param[in] instance - flash partition instance
4598 * @param[in] buf - update data buffer
4599 * @param[in] len - data buffer length
4600 * @param[in] offset - offset relative to the partition starting address
4601 * @param[in] cbfn - callback function
4602 * @param[in] cbarg - callback argument
4617 if (!bfa_ioc_is_operational(flash->ioc)) in bfa_flash_update_part()
4621 * 'len' must be in word (4-byte) boundary in bfa_flash_update_part()
4630 if (flash->op_busy) { in bfa_flash_update_part()
4631 bfa_trc(flash, flash->op_busy); in bfa_flash_update_part()
4635 flash->op_busy = 1; in bfa_flash_update_part()
4636 flash->cbfn = cbfn; in bfa_flash_update_part()
4637 flash->cbarg = cbarg; in bfa_flash_update_part()
4638 flash->type = type; in bfa_flash_update_part()
4639 flash->instance = instance; in bfa_flash_update_part()
4640 flash->residue = len; in bfa_flash_update_part()
4641 flash->offset = 0; in bfa_flash_update_part()
4642 flash->addr_off = offset; in bfa_flash_update_part()
4643 flash->ubuf = buf; in bfa_flash_update_part()
4652 * @param[in] flash - flash structure
4653 * @param[in] type - flash partition type
4654 * @param[in] instance - flash partition instance
4655 * @param[in] buf - read data buffer
4656 * @param[in] len - data buffer length
4657 * @param[in] offset - offset relative to the partition starting address
4658 * @param[in] cbfn - callback function
4659 * @param[in] cbarg - callback argument
4674 if (!bfa_ioc_is_operational(flash->ioc)) in bfa_flash_read_part()
4678 * 'len' must be in word (4-byte) boundary in bfa_flash_read_part()
4684 if (flash->op_busy) { in bfa_flash_read_part()
4685 bfa_trc(flash, flash->op_busy); in bfa_flash_read_part()
4689 flash->op_busy = 1; in bfa_flash_read_part()
4690 flash->cbfn = cbfn; in bfa_flash_read_part()
4691 flash->cbarg = cbarg; in bfa_flash_read_part()
4692 flash->type = type; in bfa_flash_read_part()
4693 flash->instance = instance; in bfa_flash_read_part()
4694 flash->residue = len; in bfa_flash_read_part()
4695 flash->offset = 0; in bfa_flash_read_part()
4696 flash->addr_off = offset; in bfa_flash_read_part()
4697 flash->ubuf = buf; in bfa_flash_read_part()
4717 bfa_trc(diag, diag->block); in bfa_diag_notify()
4718 bfa_trc(diag, diag->fwping.lock); in bfa_diag_notify()
4719 bfa_trc(diag, diag->tsensor.lock); in bfa_diag_notify()
4724 if (diag->fwping.lock) { in bfa_diag_notify()
4725 diag->fwping.status = BFA_STATUS_IOC_FAILURE; in bfa_diag_notify()
4726 diag->fwping.cbfn(diag->fwping.cbarg, in bfa_diag_notify()
4727 diag->fwping.status); in bfa_diag_notify()
4728 diag->fwping.lock = 0; in bfa_diag_notify()
4731 if (diag->tsensor.lock) { in bfa_diag_notify()
4732 diag->tsensor.status = BFA_STATUS_IOC_FAILURE; in bfa_diag_notify()
4733 diag->tsensor.cbfn(diag->tsensor.cbarg, in bfa_diag_notify()
4734 diag->tsensor.status); in bfa_diag_notify()
4735 diag->tsensor.lock = 0; in bfa_diag_notify()
4738 if (diag->block) { in bfa_diag_notify()
4739 if (diag->timer_active) { in bfa_diag_notify()
4740 bfa_timer_stop(&diag->timer); in bfa_diag_notify()
4741 diag->timer_active = 0; in bfa_diag_notify()
4744 diag->status = BFA_STATUS_IOC_FAILURE; in bfa_diag_notify()
4745 diag->cbfn(diag->cbarg, diag->status); in bfa_diag_notify()
4746 diag->block = 0; in bfa_diag_notify()
4759 struct bfa_ioc_s *ioc = diag->ioc; in bfa_diag_memtest_done()
4760 struct bfa_diag_memtest_result *res = diag->result; in bfa_diag_memtest_done()
4764 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); in bfa_diag_memtest_done()
4765 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_diag_memtest_done()
4771 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); in bfa_diag_memtest_done()
4778 res->status = swab32(res->status); in bfa_diag_memtest_done()
4779 bfa_trc(diag, res->status); in bfa_diag_memtest_done()
4781 if (res->status == BFI_BOOT_MEMTEST_RES_SIG) in bfa_diag_memtest_done()
4782 diag->status = BFA_STATUS_OK; in bfa_diag_memtest_done()
4784 diag->status = BFA_STATUS_MEMTEST_FAILED; in bfa_diag_memtest_done()
4785 res->addr = swab32(res->addr); in bfa_diag_memtest_done()
4786 res->exp = swab32(res->exp); in bfa_diag_memtest_done()
4787 res->act = swab32(res->act); in bfa_diag_memtest_done()
4788 res->err_status = swab32(res->err_status); in bfa_diag_memtest_done()
4789 res->err_status1 = swab32(res->err_status1); in bfa_diag_memtest_done()
4790 res->err_addr = swab32(res->err_addr); in bfa_diag_memtest_done()
4791 bfa_trc(diag, res->addr); in bfa_diag_memtest_done()
4792 bfa_trc(diag, res->exp); in bfa_diag_memtest_done()
4793 bfa_trc(diag, res->act); in bfa_diag_memtest_done()
4794 bfa_trc(diag, res->err_status); in bfa_diag_memtest_done()
4795 bfa_trc(diag, res->err_status1); in bfa_diag_memtest_done()
4796 bfa_trc(diag, res->err_addr); in bfa_diag_memtest_done()
4798 diag->timer_active = 0; in bfa_diag_memtest_done()
4799 diag->cbfn(diag->cbarg, diag->status); in bfa_diag_memtest_done()
4800 diag->block = 0; in bfa_diag_memtest_done()
4816 bfa_trc(diag, diag->fwping.dbuf_pa); in diag_fwping_send()
4820 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data; in diag_fwping_send()
4823 fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg; in diag_fwping_send()
4826 bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ, in diag_fwping_send()
4827 diag->fwping.dbuf_pa); in diag_fwping_send()
4829 fwping_req->count = cpu_to_be32(diag->fwping.count); in diag_fwping_send()
4831 fwping_req->data = diag->fwping.data; in diag_fwping_send()
4834 bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING, in diag_fwping_send()
4835 bfa_ioc_portid(diag->ioc)); in diag_fwping_send()
4838 bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd); in diag_fwping_send()
4845 u32 rsp_data = diag_rsp->data; in diag_fwping_comp()
4846 u8 rsp_dma_status = diag_rsp->dma_status; in diag_fwping_comp()
4853 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) : in diag_fwping_comp()
4854 diag->fwping.data; in diag_fwping_comp()
4856 if (diag->fwping.data != rsp_data) { in diag_fwping_comp()
4858 diag->fwping.result->dmastatus = in diag_fwping_comp()
4860 diag->fwping.status = BFA_STATUS_DATACORRUPTED; in diag_fwping_comp()
4861 diag->fwping.cbfn(diag->fwping.cbarg, in diag_fwping_comp()
4862 diag->fwping.status); in diag_fwping_comp()
4863 diag->fwping.lock = 0; in diag_fwping_comp()
4868 if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) { in diag_fwping_comp()
4872 *((u32 *)diag->fwping.dbuf_kva + i)); in diag_fwping_comp()
4873 diag->fwping.result->dmastatus = in diag_fwping_comp()
4875 diag->fwping.status = BFA_STATUS_DATACORRUPTED; in diag_fwping_comp()
4876 diag->fwping.cbfn(diag->fwping.cbarg, in diag_fwping_comp()
4877 diag->fwping.status); in diag_fwping_comp()
4878 diag->fwping.lock = 0; in diag_fwping_comp()
4882 diag->fwping.result->dmastatus = BFA_STATUS_OK; in diag_fwping_comp()
4883 diag->fwping.status = BFA_STATUS_OK; in diag_fwping_comp()
4884 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); in diag_fwping_comp()
4885 diag->fwping.lock = 0; in diag_fwping_comp()
4887 diag->fwping.status = BFA_STATUS_HDMA_FAILED; in diag_fwping_comp()
4888 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); in diag_fwping_comp()
4889 diag->fwping.lock = 0; in diag_fwping_comp()
4902 msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg; in diag_tempsensor_send()
4903 bfa_trc(diag, msg->temp); in diag_tempsensor_send()
4905 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR, in diag_tempsensor_send()
4906 bfa_ioc_portid(diag->ioc)); in diag_tempsensor_send()
4908 bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd); in diag_tempsensor_send()
4914 if (!diag->tsensor.lock) { in diag_tempsensor_comp()
4916 bfa_trc(diag, diag->tsensor.lock); in diag_tempsensor_comp()
4924 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp); in diag_tempsensor_comp()
4925 diag->tsensor.temp->ts_junc = rsp->ts_junc; in diag_tempsensor_comp()
4926 diag->tsensor.temp->ts_brd = rsp->ts_brd; in diag_tempsensor_comp()
4928 if (rsp->ts_brd) { in diag_tempsensor_comp()
4929 /* tsensor.temp->status is brd_temp status */ in diag_tempsensor_comp()
4930 diag->tsensor.temp->status = rsp->status; in diag_tempsensor_comp()
4931 if (rsp->status == BFA_STATUS_OK) { in diag_tempsensor_comp()
4932 diag->tsensor.temp->brd_temp = in diag_tempsensor_comp()
4933 be16_to_cpu(rsp->brd_temp); in diag_tempsensor_comp()
4935 diag->tsensor.temp->brd_temp = 0; in diag_tempsensor_comp()
4938 bfa_trc(diag, rsp->status); in diag_tempsensor_comp()
4939 bfa_trc(diag, rsp->ts_junc); in diag_tempsensor_comp()
4940 bfa_trc(diag, rsp->temp); in diag_tempsensor_comp()
4941 bfa_trc(diag, rsp->ts_brd); in diag_tempsensor_comp()
4942 bfa_trc(diag, rsp->brd_temp); in diag_tempsensor_comp()
4945 diag->tsensor.status = BFA_STATUS_OK; in diag_tempsensor_comp()
4946 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status); in diag_tempsensor_comp()
4947 diag->tsensor.lock = 0; in diag_tempsensor_comp()
4958 msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg; in diag_ledtest_send()
4960 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST, in diag_ledtest_send()
4961 bfa_ioc_portid(diag->ioc)); in diag_ledtest_send()
4967 if (ledtest->freq) in diag_ledtest_send()
4968 ledtest->freq = 500 / ledtest->freq; in diag_ledtest_send()
4970 if (ledtest->freq == 0) in diag_ledtest_send()
4971 ledtest->freq = 1; in diag_ledtest_send()
4973 bfa_trc(diag, ledtest->freq); in diag_ledtest_send()
4974 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */ in diag_ledtest_send()
4975 msg->cmd = (u8) ledtest->cmd; in diag_ledtest_send()
4976 msg->color = (u8) ledtest->color; in diag_ledtest_send()
4977 msg->portid = bfa_ioc_portid(diag->ioc); in diag_ledtest_send()
4978 msg->led = ledtest->led; in diag_ledtest_send()
4979 msg->freq = cpu_to_be16(ledtest->freq); in diag_ledtest_send()
4982 bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd); in diag_ledtest_send()
4988 bfa_trc(diag, diag->ledtest.lock); in diag_ledtest_comp()
4989 diag->ledtest.lock = BFA_FALSE; in diag_ledtest_comp()
5001 msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg; in diag_portbeacon_send()
5003 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON, in diag_portbeacon_send()
5004 bfa_ioc_portid(diag->ioc)); in diag_portbeacon_send()
5005 msg->beacon = beacon; in diag_portbeacon_send()
5006 msg->period = cpu_to_be32(sec); in diag_portbeacon_send()
5008 bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd); in diag_portbeacon_send()
5014 bfa_trc(diag, diag->beacon.state); in diag_portbeacon_comp()
5015 diag->beacon.state = BFA_FALSE; in diag_portbeacon_comp()
5016 if (diag->cbfn_beacon) in diag_portbeacon_comp()
5017 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e); in diag_portbeacon_comp()
5028 switch (msg->mh.msg_id) { in bfa_diag_intr()
5042 bfa_trc(diag, msg->mh.msg_id); in bfa_diag_intr()
5050 * @param[in] *diag - diag data struct
5051 * @param[in] *memtest - mem test params input from upper layer,
5052 * @param[in] pattern - mem test pattern
5053 * @param[in] *result - mem test result
5054 * @param[in] cbfn - mem test callback functioin
5055 * @param[in] cbarg - callback functioin arg
5068 if (!bfa_ioc_adapter_is_disabled(diag->ioc)) in bfa_diag_memtest()
5072 if (diag->block) { in bfa_diag_memtest()
5073 bfa_trc(diag, diag->block); in bfa_diag_memtest()
5076 diag->block = 1; in bfa_diag_memtest()
5078 diag->result = result; in bfa_diag_memtest()
5079 diag->cbfn = cbfn; in bfa_diag_memtest()
5080 diag->cbarg = cbarg; in bfa_diag_memtest()
5083 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS); in bfa_diag_memtest()
5085 memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ? in bfa_diag_memtest()
5087 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer, in bfa_diag_memtest()
5089 diag->timer_active = 1; in bfa_diag_memtest()
5096 * @param[in] *diag - diag data struct
5097 * @param[in] cnt - dma loop count for testing PCIE
5098 * @param[in] data - data pattern to pass in fw
5099 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
5100 * @param[in] cbfn - callback function
5101 * @param[in] *cbarg - callback functioin arg
5113 if (!bfa_ioc_is_operational(diag->ioc)) in bfa_diag_fwping()
5116 if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) && in bfa_diag_fwping()
5117 ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH)) in bfa_diag_fwping()
5121 if (diag->block || diag->fwping.lock) { in bfa_diag_fwping()
5122 bfa_trc(diag, diag->block); in bfa_diag_fwping()
5123 bfa_trc(diag, diag->fwping.lock); in bfa_diag_fwping()
5128 diag->fwping.lock = 1; in bfa_diag_fwping()
5129 diag->fwping.cbfn = cbfn; in bfa_diag_fwping()
5130 diag->fwping.cbarg = cbarg; in bfa_diag_fwping()
5131 diag->fwping.result = result; in bfa_diag_fwping()
5132 diag->fwping.data = data; in bfa_diag_fwping()
5133 diag->fwping.count = cnt; in bfa_diag_fwping()
5136 diag->fwping.result->data = 0; in bfa_diag_fwping()
5137 diag->fwping.result->status = BFA_STATUS_OK; in bfa_diag_fwping()
5147 * @param[in] *diag - diag data struct
5148 * @param[in] *result - pt to bfa_diag_temp_t data struct
5149 * @param[in] cbfn - callback function
5150 * @param[in] *cbarg - callback functioin arg
5160 if (diag->block || diag->tsensor.lock) { in bfa_diag_tsensor_query()
5161 bfa_trc(diag, diag->block); in bfa_diag_tsensor_query()
5162 bfa_trc(diag, diag->tsensor.lock); in bfa_diag_tsensor_query()
5166 if (!bfa_ioc_is_operational(diag->ioc)) in bfa_diag_tsensor_query()
5170 diag->tsensor.lock = 1; in bfa_diag_tsensor_query()
5171 diag->tsensor.temp = result; in bfa_diag_tsensor_query()
5172 diag->tsensor.cbfn = cbfn; in bfa_diag_tsensor_query()
5173 diag->tsensor.cbarg = cbarg; in bfa_diag_tsensor_query()
5174 diag->tsensor.status = BFA_STATUS_OK; in bfa_diag_tsensor_query()
5185 * @param[in] *diag - diag data struct
5186 * @param[in] *ledtest - pt to ledtest data structure
5193 bfa_trc(diag, ledtest->cmd); in bfa_diag_ledtest()
5195 if (!bfa_ioc_is_operational(diag->ioc)) in bfa_diag_ledtest()
5198 if (diag->beacon.state) in bfa_diag_ledtest()
5201 if (diag->ledtest.lock) in bfa_diag_ledtest()
5205 diag->ledtest.lock = BFA_TRUE; in bfa_diag_ledtest()
5214 * @param[in] *diag - diag data struct
5215 * @param[in] beacon - port beaconing 1:ON 0:OFF
5216 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
5217 * @param[in] sec - beaconing duration in seconds
5229 if (!bfa_ioc_is_operational(diag->ioc)) in bfa_diag_beacon_port()
5232 if (diag->ledtest.lock) in bfa_diag_beacon_port()
5235 if (diag->beacon.state && beacon) /* beacon alread on */ in bfa_diag_beacon_port()
5238 diag->beacon.state = beacon; in bfa_diag_beacon_port()
5239 diag->beacon.link_e2e = link_e2e_beacon; in bfa_diag_beacon_port()
5240 if (diag->cbfn_beacon) in bfa_diag_beacon_port()
5241 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon); in bfa_diag_beacon_port()
5265 diag->dev = dev; in bfa_diag_attach()
5266 diag->ioc = ioc; in bfa_diag_attach()
5267 diag->trcmod = trcmod; in bfa_diag_attach()
5269 diag->block = 0; in bfa_diag_attach()
5270 diag->cbfn = NULL; in bfa_diag_attach()
5271 diag->cbarg = NULL; in bfa_diag_attach()
5272 diag->result = NULL; in bfa_diag_attach()
5273 diag->cbfn_beacon = cbfn_beacon; in bfa_diag_attach()
5275 bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag); in bfa_diag_attach()
5276 bfa_q_qe_init(&diag->ioc_notify); in bfa_diag_attach()
5277 bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag); in bfa_diag_attach()
5278 list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q); in bfa_diag_attach()
5284 diag->fwping.dbuf_kva = dm_kva; in bfa_diag_memclaim()
5285 diag->fwping.dbuf_pa = dm_pa; in bfa_diag_memclaim()
5286 memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ); in bfa_diag_memclaim()
5290 * PHY module specific
5293 #define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
5298 int i, m = sz >> 2; in bfa_phy_ntoh32() local
5300 for (i = 0; i < m; i++) in bfa_phy_ntoh32()
5305 bfa_phy_present(struct bfa_phy_s *phy) in bfa_phy_present() argument
5307 return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING); in bfa_phy_present()
5313 struct bfa_phy_s *phy = cbarg; in bfa_phy_notify() local
5315 bfa_trc(phy, event); in bfa_phy_notify()
5320 if (phy->op_busy) { in bfa_phy_notify()
5321 phy->status = BFA_STATUS_IOC_FAILURE; in bfa_phy_notify()
5322 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_notify()
5323 phy->op_busy = 0; in bfa_phy_notify()
5333 * Send phy attribute query request.
5335 * @param[in] cbarg - callback argument
5340 struct bfa_phy_s *phy = cbarg; in bfa_phy_query_send() local
5342 (struct bfi_phy_query_req_s *) phy->mb.msg; in bfa_phy_query_send()
5344 msg->instance = phy->instance; in bfa_phy_query_send()
5345 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ, in bfa_phy_query_send()
5346 bfa_ioc_portid(phy->ioc)); in bfa_phy_query_send()
5347 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa); in bfa_phy_query_send()
5348 bfa_ioc_mbox_queue(phy->ioc, &phy->mb); in bfa_phy_query_send()
5352 * Send phy write request.
5354 * @param[in] cbarg - callback argument
5359 struct bfa_phy_s *phy = cbarg; in bfa_phy_write_send() local
5361 (struct bfi_phy_write_req_s *) phy->mb.msg; in bfa_phy_write_send()
5366 msg->instance = phy->instance; in bfa_phy_write_send()
5367 msg->offset = cpu_to_be32(phy->addr_off + phy->offset); in bfa_phy_write_send()
5368 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ? in bfa_phy_write_send()
5369 phy->residue : BFA_PHY_DMA_BUF_SZ; in bfa_phy_write_send()
5370 msg->length = cpu_to_be32(len); in bfa_phy_write_send()
5373 msg->last = (len == phy->residue) ? 1 : 0; in bfa_phy_write_send()
5375 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ, in bfa_phy_write_send()
5376 bfa_ioc_portid(phy->ioc)); in bfa_phy_write_send()
5377 bfa_alen_set(&msg->alen, len, phy->dbuf_pa); in bfa_phy_write_send()
5379 buf = (u16 *) (phy->ubuf + phy->offset); in bfa_phy_write_send()
5380 dbuf = (u16 *)phy->dbuf_kva; in bfa_phy_write_send()
5385 bfa_ioc_mbox_queue(phy->ioc, &phy->mb); in bfa_phy_write_send()
5387 phy->residue -= len; in bfa_phy_write_send()
5388 phy->offset += len; in bfa_phy_write_send()
5392 * Send phy read request.
5394 * @param[in] cbarg - callback argument
5399 struct bfa_phy_s *phy = cbarg; in bfa_phy_read_send() local
5401 (struct bfi_phy_read_req_s *) phy->mb.msg; in bfa_phy_read_send()
5404 msg->instance = phy->instance; in bfa_phy_read_send()
5405 msg->offset = cpu_to_be32(phy->addr_off + phy->offset); in bfa_phy_read_send()
5406 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ? in bfa_phy_read_send()
5407 phy->residue : BFA_PHY_DMA_BUF_SZ; in bfa_phy_read_send()
5408 msg->length = cpu_to_be32(len); in bfa_phy_read_send()
5409 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ, in bfa_phy_read_send()
5410 bfa_ioc_portid(phy->ioc)); in bfa_phy_read_send()
5411 bfa_alen_set(&msg->alen, len, phy->dbuf_pa); in bfa_phy_read_send()
5412 bfa_ioc_mbox_queue(phy->ioc, &phy->mb); in bfa_phy_read_send()
5416 * Send phy stats request.
5418 * @param[in] cbarg - callback argument
5423 struct bfa_phy_s *phy = cbarg; in bfa_phy_stats_send() local
5425 (struct bfi_phy_stats_req_s *) phy->mb.msg; in bfa_phy_stats_send()
5427 msg->instance = phy->instance; in bfa_phy_stats_send()
5428 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ, in bfa_phy_stats_send()
5429 bfa_ioc_portid(phy->ioc)); in bfa_phy_stats_send()
5430 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa); in bfa_phy_stats_send()
5431 bfa_ioc_mbox_queue(phy->ioc, &phy->mb); in bfa_phy_stats_send()
5437 * @param[in] mincfg - minimal cfg variable
5442 /* min driver doesn't need phy */ in bfa_phy_meminfo()
5452 * @param[in] phy - phy structure
5453 * @param[in] ioc - ioc structure
5454 * @param[in] dev - device structure
5455 * @param[in] trcmod - trace module
5456 * @param[in] logmod - log module
5459 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev, in bfa_phy_attach() argument
5462 phy->ioc = ioc; in bfa_phy_attach()
5463 phy->trcmod = trcmod; in bfa_phy_attach()
5464 phy->cbfn = NULL; in bfa_phy_attach()
5465 phy->cbarg = NULL; in bfa_phy_attach()
5466 phy->op_busy = 0; in bfa_phy_attach()
5468 bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy); in bfa_phy_attach()
5469 bfa_q_qe_init(&phy->ioc_notify); in bfa_phy_attach()
5470 bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy); in bfa_phy_attach()
5471 list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q); in bfa_phy_attach()
5473 /* min driver doesn't need phy */ in bfa_phy_attach()
5475 phy->dbuf_kva = NULL; in bfa_phy_attach()
5476 phy->dbuf_pa = 0; in bfa_phy_attach()
5481 * Claim memory for phy
5483 * @param[in] phy - phy structure
5484 * @param[in] dm_kva - pointer to virtual memory address
5485 * @param[in] dm_pa - physical memory address
5486 * @param[in] mincfg - minimal cfg variable
5489 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa, in bfa_phy_memclaim() argument
5495 phy->dbuf_kva = dm_kva; in bfa_phy_memclaim()
5496 phy->dbuf_pa = dm_pa; in bfa_phy_memclaim()
5497 memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ); in bfa_phy_memclaim()
5512 * Get phy attribute.
5514 * @param[in] phy - phy structure
5515 * @param[in] attr - phy attribute structure
5516 * @param[in] cbfn - callback function
5517 * @param[in] cbarg - callback argument
5522 bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance, in bfa_phy_get_attr() argument
5525 bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ); in bfa_phy_get_attr()
5526 bfa_trc(phy, instance); in bfa_phy_get_attr()
5528 if (!bfa_phy_present(phy)) in bfa_phy_get_attr()
5531 if (!bfa_ioc_is_operational(phy->ioc)) in bfa_phy_get_attr()
5534 if (phy->op_busy || bfa_phy_busy(phy->ioc)) { in bfa_phy_get_attr()
5535 bfa_trc(phy, phy->op_busy); in bfa_phy_get_attr()
5539 phy->op_busy = 1; in bfa_phy_get_attr()
5540 phy->cbfn = cbfn; in bfa_phy_get_attr()
5541 phy->cbarg = cbarg; in bfa_phy_get_attr()
5542 phy->instance = instance; in bfa_phy_get_attr()
5543 phy->ubuf = (uint8_t *) attr; in bfa_phy_get_attr()
5544 bfa_phy_query_send(phy); in bfa_phy_get_attr()
5550 * Get phy stats.
5552 * @param[in] phy - phy structure
5553 * @param[in] instance - phy image instance
5554 * @param[in] stats - pointer to phy stats
5555 * @param[in] cbfn - callback function
5556 * @param[in] cbarg - callback argument
5561 bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance, in bfa_phy_get_stats() argument
5565 bfa_trc(phy, BFI_PHY_H2I_STATS_REQ); in bfa_phy_get_stats()
5566 bfa_trc(phy, instance); in bfa_phy_get_stats()
5568 if (!bfa_phy_present(phy)) in bfa_phy_get_stats()
5571 if (!bfa_ioc_is_operational(phy->ioc)) in bfa_phy_get_stats()
5574 if (phy->op_busy || bfa_phy_busy(phy->ioc)) { in bfa_phy_get_stats()
5575 bfa_trc(phy, phy->op_busy); in bfa_phy_get_stats()
5579 phy->op_busy = 1; in bfa_phy_get_stats()
5580 phy->cbfn = cbfn; in bfa_phy_get_stats()
5581 phy->cbarg = cbarg; in bfa_phy_get_stats()
5582 phy->instance = instance; in bfa_phy_get_stats()
5583 phy->ubuf = (u8 *) stats; in bfa_phy_get_stats()
5584 bfa_phy_stats_send(phy); in bfa_phy_get_stats()
5590 * Update phy image.
5592 * @param[in] phy - phy structure
5593 * @param[in] instance - phy image instance
5594 * @param[in] buf - update data buffer
5595 * @param[in] len - data buffer length
5596 * @param[in] offset - offset relative to starting address
5597 * @param[in] cbfn - callback function
5598 * @param[in] cbarg - callback argument
5603 bfa_phy_update(struct bfa_phy_s *phy, u8 instance, in bfa_phy_update() argument
5607 bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ); in bfa_phy_update()
5608 bfa_trc(phy, instance); in bfa_phy_update()
5609 bfa_trc(phy, len); in bfa_phy_update()
5610 bfa_trc(phy, offset); in bfa_phy_update()
5612 if (!bfa_phy_present(phy)) in bfa_phy_update()
5615 if (!bfa_ioc_is_operational(phy->ioc)) in bfa_phy_update()
5618 /* 'len' must be in word (4-byte) boundary */ in bfa_phy_update()
5622 if (phy->op_busy || bfa_phy_busy(phy->ioc)) { in bfa_phy_update()
5623 bfa_trc(phy, phy->op_busy); in bfa_phy_update()
5627 phy->op_busy = 1; in bfa_phy_update()
5628 phy->cbfn = cbfn; in bfa_phy_update()
5629 phy->cbarg = cbarg; in bfa_phy_update()
5630 phy->instance = instance; in bfa_phy_update()
5631 phy->residue = len; in bfa_phy_update()
5632 phy->offset = 0; in bfa_phy_update()
5633 phy->addr_off = offset; in bfa_phy_update()
5634 phy->ubuf = buf; in bfa_phy_update()
5636 bfa_phy_write_send(phy); in bfa_phy_update()
5641 * Read phy image.
5643 * @param[in] phy - phy structure
5644 * @param[in] instance - phy image instance
5645 * @param[in] buf - read data buffer
5646 * @param[in] len - data buffer length
5647 * @param[in] offset - offset relative to starting address
5648 * @param[in] cbfn - callback function
5649 * @param[in] cbarg - callback argument
5654 bfa_phy_read(struct bfa_phy_s *phy, u8 instance, in bfa_phy_read() argument
5658 bfa_trc(phy, BFI_PHY_H2I_READ_REQ); in bfa_phy_read()
5659 bfa_trc(phy, instance); in bfa_phy_read()
5660 bfa_trc(phy, len); in bfa_phy_read()
5661 bfa_trc(phy, offset); in bfa_phy_read()
5663 if (!bfa_phy_present(phy)) in bfa_phy_read()
5666 if (!bfa_ioc_is_operational(phy->ioc)) in bfa_phy_read()
5669 /* 'len' must be in word (4-byte) boundary */ in bfa_phy_read()
5673 if (phy->op_busy || bfa_phy_busy(phy->ioc)) { in bfa_phy_read()
5674 bfa_trc(phy, phy->op_busy); in bfa_phy_read()
5678 phy->op_busy = 1; in bfa_phy_read()
5679 phy->cbfn = cbfn; in bfa_phy_read()
5680 phy->cbarg = cbarg; in bfa_phy_read()
5681 phy->instance = instance; in bfa_phy_read()
5682 phy->residue = len; in bfa_phy_read()
5683 phy->offset = 0; in bfa_phy_read()
5684 phy->addr_off = offset; in bfa_phy_read()
5685 phy->ubuf = buf; in bfa_phy_read()
5686 bfa_phy_read_send(phy); in bfa_phy_read()
5692 * Process phy response messages upon receiving interrupts.
5694 * @param[in] phyarg - phy structure
5695 * @param[in] msg - message structure
5700 struct bfa_phy_s *phy = phyarg; in bfa_phy_intr() local
5709 } m; in bfa_phy_intr() local
5711 m.msg = msg; in bfa_phy_intr()
5712 bfa_trc(phy, msg->mh.msg_id); in bfa_phy_intr()
5714 if (!phy->op_busy) { in bfa_phy_intr()
5716 bfa_trc(phy, 0x9999); in bfa_phy_intr()
5720 switch (msg->mh.msg_id) { in bfa_phy_intr()
5722 status = be32_to_cpu(m.query->status); in bfa_phy_intr()
5723 bfa_trc(phy, status); in bfa_phy_intr()
5727 (struct bfa_phy_attr_s *) phy->ubuf; in bfa_phy_intr()
5728 bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva, in bfa_phy_intr()
5730 bfa_trc(phy, attr->status); in bfa_phy_intr()
5731 bfa_trc(phy, attr->length); in bfa_phy_intr()
5734 phy->status = status; in bfa_phy_intr()
5735 phy->op_busy = 0; in bfa_phy_intr()
5736 if (phy->cbfn) in bfa_phy_intr()
5737 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5740 status = be32_to_cpu(m.stats->status); in bfa_phy_intr()
5741 bfa_trc(phy, status); in bfa_phy_intr()
5745 (struct bfa_phy_stats_s *) phy->ubuf; in bfa_phy_intr()
5746 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva, in bfa_phy_intr()
5748 bfa_trc(phy, stats->status); in bfa_phy_intr()
5751 phy->status = status; in bfa_phy_intr()
5752 phy->op_busy = 0; in bfa_phy_intr()
5753 if (phy->cbfn) in bfa_phy_intr()
5754 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5757 status = be32_to_cpu(m.write->status); in bfa_phy_intr()
5758 bfa_trc(phy, status); in bfa_phy_intr()
5760 if (status != BFA_STATUS_OK || phy->residue == 0) { in bfa_phy_intr()
5761 phy->status = status; in bfa_phy_intr()
5762 phy->op_busy = 0; in bfa_phy_intr()
5763 if (phy->cbfn) in bfa_phy_intr()
5764 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5766 bfa_trc(phy, phy->offset); in bfa_phy_intr()
5767 bfa_phy_write_send(phy); in bfa_phy_intr()
5771 status = be32_to_cpu(m.read->status); in bfa_phy_intr()
5772 bfa_trc(phy, status); in bfa_phy_intr()
5775 phy->status = status; in bfa_phy_intr()
5776 phy->op_busy = 0; in bfa_phy_intr()
5777 if (phy->cbfn) in bfa_phy_intr()
5778 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5780 u32 len = be32_to_cpu(m.read->length); in bfa_phy_intr()
5781 u16 *buf = (u16 *)(phy->ubuf + phy->offset); in bfa_phy_intr()
5782 u16 *dbuf = (u16 *)phy->dbuf_kva; in bfa_phy_intr()
5785 bfa_trc(phy, phy->offset); in bfa_phy_intr()
5786 bfa_trc(phy, len); in bfa_phy_intr()
5791 phy->residue -= len; in bfa_phy_intr()
5792 phy->offset += len; in bfa_phy_intr()
5794 if (phy->residue == 0) { in bfa_phy_intr()
5795 phy->status = status; in bfa_phy_intr()
5796 phy->op_busy = 0; in bfa_phy_intr()
5797 if (phy->cbfn) in bfa_phy_intr()
5798 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5800 bfa_phy_read_send(phy); in bfa_phy_intr()
5848 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_uninit()
5852 if (dconf->min_cfg) { in bfa_dconf_sm_uninit()
5853 bfa_trc(dconf->bfa, dconf->min_cfg); in bfa_dconf_sm_uninit()
5854 bfa_fsm_send_event(&dconf->bfa->iocfc, in bfa_dconf_sm_uninit()
5859 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_uninit()
5861 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), in bfa_dconf_sm_uninit()
5862 BFA_FLASH_PART_DRV, dconf->instance, in bfa_dconf_sm_uninit()
5863 dconf->dconf, in bfa_dconf_sm_uninit()
5865 bfa_dconf_init_cb, dconf->bfa); in bfa_dconf_sm_uninit()
5867 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_uninit()
5868 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED); in bfa_dconf_sm_uninit()
5874 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); in bfa_dconf_sm_uninit()
5881 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_uninit()
5892 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_flash_read()
5896 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_flash_read()
5901 bfa_ioc_suspend(&dconf->bfa->ioc); in bfa_dconf_sm_flash_read()
5904 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_flash_read()
5906 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); in bfa_dconf_sm_flash_read()
5909 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_flash_read()
5913 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_flash_read()
5923 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_ready()
5927 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_ready()
5933 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); in bfa_dconf_sm_ready()
5939 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_ready()
5950 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_dirty()
5958 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_dirty()
5959 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_dirty()
5963 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_dirty()
5964 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_dirty()
5972 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_dirty()
5976 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_dirty()
5987 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_final_sync()
5992 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_final_sync()
5996 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); in bfa_dconf_sm_final_sync()
5999 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_final_sync()
6006 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_sync()
6013 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_sync()
6018 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_sync()
6026 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_sync()
6034 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_iocdown_dirty()
6038 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_iocdown_dirty()
6044 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); in bfa_dconf_sm_iocdown_dirty()
6049 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_iocdown_dirty()
6062 if (cfg->drvcfg.min_cfg) in bfa_dconf_meminfo()
6075 dconf->bfad = bfad; in bfa_dconf_attach()
6076 dconf->bfa = bfa; in bfa_dconf_attach()
6077 dconf->instance = bfa->ioc.port_id; in bfa_dconf_attach()
6078 bfa_trc(bfa, dconf->instance); in bfa_dconf_attach()
6080 dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf); in bfa_dconf_attach()
6081 if (cfg->drvcfg.min_cfg) { in bfa_dconf_attach()
6083 dconf->min_cfg = BFA_TRUE; in bfa_dconf_attach()
6085 dconf->min_cfg = BFA_FALSE; in bfa_dconf_attach()
6101 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) in bfa_dconf_init_cb()
6102 dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE; in bfa_dconf_init_cb()
6103 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) in bfa_dconf_init_cb()
6104 dconf->dconf->hdr.version = BFI_DCONF_VERSION; in bfa_dconf_init_cb()
6107 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE); in bfa_dconf_init_cb()
6134 bfa_trc(dconf->bfa, 0); in bfa_dconf_flash_write()
6136 bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa), in bfa_dconf_flash_write()
6137 BFA_FLASH_PART_DRV, dconf->instance, in bfa_dconf_flash_write()
6138 dconf->dconf, sizeof(struct bfa_dconf_s), 0, in bfa_dconf_flash_write()
6142 bfa_trc(dconf->bfa, bfa_status); in bfa_dconf_flash_write()
6151 bfa_trc(dconf->bfa, 0); in bfa_dconf_update()
6155 if (dconf->min_cfg) { in bfa_dconf_update()
6156 bfa_trc(dconf->bfa, dconf->min_cfg); in bfa_dconf_update()
6197 if (fru->op_busy) { in bfa_fru_notify()
6198 fru->status = BFA_STATUS_IOC_FAILURE; in bfa_fru_notify()
6199 fru->cbfn(fru->cbarg, fru->status); in bfa_fru_notify()
6200 fru->op_busy = 0; in bfa_fru_notify()
6212 * @param[in] cbarg - callback argument
6219 (struct bfi_fru_write_req_s *) fru->mb.msg; in bfa_fru_write_send()
6222 msg->offset = cpu_to_be32(fru->addr_off + fru->offset); in bfa_fru_write_send()
6223 len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? in bfa_fru_write_send()
6224 fru->residue : BFA_FRU_DMA_BUF_SZ; in bfa_fru_write_send()
6225 msg->length = cpu_to_be32(len); in bfa_fru_write_send()
6230 msg->last = (len == fru->residue) ? 1 : 0; in bfa_fru_write_send()
6232 msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0; in bfa_fru_write_send()
6233 bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); in bfa_fru_write_send()
6234 bfa_alen_set(&msg->alen, len, fru->dbuf_pa); in bfa_fru_write_send()
6236 memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len); in bfa_fru_write_send()
6237 bfa_ioc_mbox_queue(fru->ioc, &fru->mb); in bfa_fru_write_send()
6239 fru->residue -= len; in bfa_fru_write_send()
6240 fru->offset += len; in bfa_fru_write_send()
6246 * @param[in] cbarg - callback argument
6253 (struct bfi_fru_read_req_s *) fru->mb.msg; in bfa_fru_read_send()
6256 msg->offset = cpu_to_be32(fru->addr_off + fru->offset); in bfa_fru_read_send()
6257 len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? in bfa_fru_read_send()
6258 fru->residue : BFA_FRU_DMA_BUF_SZ; in bfa_fru_read_send()
6259 msg->length = cpu_to_be32(len); in bfa_fru_read_send()
6260 bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); in bfa_fru_read_send()
6261 bfa_alen_set(&msg->alen, len, fru->dbuf_pa); in bfa_fru_read_send()
6262 bfa_ioc_mbox_queue(fru->ioc, &fru->mb); in bfa_fru_read_send()
6268 * @param[in] mincfg - minimal cfg variable
6283 * @param[in] fru - fru structure
6284 * @param[in] ioc - ioc structure
6285 * @param[in] dev - device structure
6286 * @param[in] trcmod - trace module
6287 * @param[in] logmod - log module
6293 fru->ioc = ioc; in bfa_fru_attach()
6294 fru->trcmod = trcmod; in bfa_fru_attach()
6295 fru->cbfn = NULL; in bfa_fru_attach()
6296 fru->cbarg = NULL; in bfa_fru_attach()
6297 fru->op_busy = 0; in bfa_fru_attach()
6299 bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru); in bfa_fru_attach()
6300 bfa_q_qe_init(&fru->ioc_notify); in bfa_fru_attach()
6301 bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru); in bfa_fru_attach()
6302 list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q); in bfa_fru_attach()
6306 fru->dbuf_kva = NULL; in bfa_fru_attach()
6307 fru->dbuf_pa = 0; in bfa_fru_attach()
6314 * @param[in] fru - fru structure
6315 * @param[in] dm_kva - pointer to virtual memory address
6316 * @param[in] dm_pa - frusical memory address
6317 * @param[in] mincfg - minimal cfg variable
6326 fru->dbuf_kva = dm_kva; in bfa_fru_memclaim()
6327 fru->dbuf_pa = dm_pa; in bfa_fru_memclaim()
6328 memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ); in bfa_fru_memclaim()
6336 * @param[in] fru - fru structure
6337 * @param[in] buf - update data buffer
6338 * @param[in] len - data buffer length
6339 * @param[in] offset - offset relative to starting address
6340 * @param[in] cbfn - callback function
6341 * @param[in] cbarg - callback argument
6353 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 && in bfa_fruvpd_update()
6354 fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2) in bfa_fruvpd_update()
6357 if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK) in bfa_fruvpd_update()
6360 if (!bfa_ioc_is_operational(fru->ioc)) in bfa_fruvpd_update()
6363 if (fru->op_busy) { in bfa_fruvpd_update()
6364 bfa_trc(fru, fru->op_busy); in bfa_fruvpd_update()
6368 fru->op_busy = 1; in bfa_fruvpd_update()
6370 fru->cbfn = cbfn; in bfa_fruvpd_update()
6371 fru->cbarg = cbarg; in bfa_fruvpd_update()
6372 fru->residue = len; in bfa_fruvpd_update()
6373 fru->offset = 0; in bfa_fruvpd_update()
6374 fru->addr_off = offset; in bfa_fruvpd_update()
6375 fru->ubuf = buf; in bfa_fruvpd_update()
6376 fru->trfr_cmpl = trfr_cmpl; in bfa_fruvpd_update()
6386 * @param[in] fru - fru structure
6387 * @param[in] buf - read data buffer
6388 * @param[in] len - data buffer length
6389 * @param[in] offset - offset relative to starting address
6390 * @param[in] cbfn - callback function
6391 * @param[in] cbarg - callback argument
6403 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) in bfa_fruvpd_read()
6406 if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK && in bfa_fruvpd_read()
6407 fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2) in bfa_fruvpd_read()
6410 if (!bfa_ioc_is_operational(fru->ioc)) in bfa_fruvpd_read()
6413 if (fru->op_busy) { in bfa_fruvpd_read()
6414 bfa_trc(fru, fru->op_busy); in bfa_fruvpd_read()
6418 fru->op_busy = 1; in bfa_fruvpd_read()
6420 fru->cbfn = cbfn; in bfa_fruvpd_read()
6421 fru->cbarg = cbarg; in bfa_fruvpd_read()
6422 fru->residue = len; in bfa_fruvpd_read()
6423 fru->offset = 0; in bfa_fruvpd_read()
6424 fru->addr_off = offset; in bfa_fruvpd_read()
6425 fru->ubuf = buf; in bfa_fruvpd_read()
6434 * @param[in] fru - fru structure
6435 * @param[out] size - maximum size of fru vpd data
6442 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) in bfa_fruvpd_get_max_size()
6445 if (!bfa_ioc_is_operational(fru->ioc)) in bfa_fruvpd_get_max_size()
6448 if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK || in bfa_fruvpd_get_max_size()
6449 fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2) in bfa_fruvpd_get_max_size()
6458 * @param[in] fru - fru structure
6459 * @param[in] buf - update data buffer
6460 * @param[in] len - data buffer length
6461 * @param[in] offset - offset relative to starting address
6462 * @param[in] cbfn - callback function
6463 * @param[in] cbarg - callback argument
6476 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) in bfa_tfru_write()
6479 if (!bfa_ioc_is_operational(fru->ioc)) in bfa_tfru_write()
6482 if (fru->op_busy) { in bfa_tfru_write()
6483 bfa_trc(fru, fru->op_busy); in bfa_tfru_write()
6487 fru->op_busy = 1; in bfa_tfru_write()
6489 fru->cbfn = cbfn; in bfa_tfru_write()
6490 fru->cbarg = cbarg; in bfa_tfru_write()
6491 fru->residue = len; in bfa_tfru_write()
6492 fru->offset = 0; in bfa_tfru_write()
6493 fru->addr_off = offset; in bfa_tfru_write()
6494 fru->ubuf = buf; in bfa_tfru_write()
6504 * @param[in] fru - fru structure
6505 * @param[in] buf - read data buffer
6506 * @param[in] len - data buffer length
6507 * @param[in] offset - offset relative to starting address
6508 * @param[in] cbfn - callback function
6509 * @param[in] cbarg - callback argument
6521 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) in bfa_tfru_read()
6524 if (!bfa_ioc_is_operational(fru->ioc)) in bfa_tfru_read()
6527 if (fru->op_busy) { in bfa_tfru_read()
6528 bfa_trc(fru, fru->op_busy); in bfa_tfru_read()
6532 fru->op_busy = 1; in bfa_tfru_read()
6534 fru->cbfn = cbfn; in bfa_tfru_read()
6535 fru->cbarg = cbarg; in bfa_tfru_read()
6536 fru->residue = len; in bfa_tfru_read()
6537 fru->offset = 0; in bfa_tfru_read()
6538 fru->addr_off = offset; in bfa_tfru_read()
6539 fru->ubuf = buf; in bfa_tfru_read()
6548 * @param[in] fruarg - fru structure
6549 * @param[in] msg - message structure
6558 bfa_trc(fru, msg->mh.msg_id); in bfa_fru_intr()
6560 if (!fru->op_busy) { in bfa_fru_intr()
6568 switch (msg->mh.msg_id) { in bfa_fru_intr()
6571 status = be32_to_cpu(rsp->status); in bfa_fru_intr()
6574 if (status != BFA_STATUS_OK || fru->residue == 0) { in bfa_fru_intr()
6575 fru->status = status; in bfa_fru_intr()
6576 fru->op_busy = 0; in bfa_fru_intr()
6577 if (fru->cbfn) in bfa_fru_intr()
6578 fru->cbfn(fru->cbarg, fru->status); in bfa_fru_intr()
6580 bfa_trc(fru, fru->offset); in bfa_fru_intr()
6581 if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP) in bfa_fru_intr()
6591 status = be32_to_cpu(rsp->status); in bfa_fru_intr()
6595 fru->status = status; in bfa_fru_intr()
6596 fru->op_busy = 0; in bfa_fru_intr()
6597 if (fru->cbfn) in bfa_fru_intr()
6598 fru->cbfn(fru->cbarg, fru->status); in bfa_fru_intr()
6600 u32 len = be32_to_cpu(rsp->length); in bfa_fru_intr()
6602 bfa_trc(fru, fru->offset); in bfa_fru_intr()
6605 memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len); in bfa_fru_intr()
6606 fru->residue -= len; in bfa_fru_intr()
6607 fru->offset += len; in bfa_fru_intr()
6609 if (fru->residue == 0) { in bfa_fru_intr()
6610 fru->status = status; in bfa_fru_intr()
6611 fru->op_busy = 0; in bfa_fru_intr()
6612 if (fru->cbfn) in bfa_fru_intr()
6613 fru->cbfn(fru->cbarg, fru->status); in bfa_fru_intr()
6615 if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP) in bfa_fru_intr()
6651 BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
6652 BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
6653 BFA_FLASH_BAD = -3, /*!< flash bad */
6654 BFA_FLASH_BUSY = -4, /*!< flash busy */
6655 BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
6656 BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
6657 BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
6658 BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
6659 BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
6773 * @param[in] pci_bar - pci bar address
6774 * @param[in] dev_status - device status
6812 * @param[in] pci_bar - pci bar address
6857 * @param[in] pci_bar - pci bar address
6858 * @param[in] offset - flash address offset
6859 * @param[in] len - read data length
6860 * @param[in] buf - read data buffer
6887 * check if write-in-progress bit is cleared in bfa_flash_read_start()
6903 * @param[in] pci_bar - pci bar address
6920 * @param[in] pci_bar - pci bar address
6921 * @param[in] len - read data length
6922 * @param[in] buf - read data buffer
6946 * @param[in] pci_bar - pci bar address
6947 * @param[in] offset - flash partition address offset
6948 * @param[in] buf - read data buffer
6949 * @param[in] len - read data length
6974 if (--n <= 0) in bfa_flash_sem_get()
7005 l = (n + 1) * fifo_sz - s; in bfa_flash_raw_read()
7018 if (--n <= 0) { in bfa_flash_raw_read()
7026 residue -= l; in bfa_flash_raw_read()