1f844a0eaSJeff Kirsher /* 2f844a0eaSJeff Kirsher * Linux network driver for Brocade Converged Network Adapter. 3f844a0eaSJeff Kirsher * 4f844a0eaSJeff Kirsher * This program is free software; you can redistribute it and/or modify it 5f844a0eaSJeff Kirsher * under the terms of the GNU General Public License (GPL) Version 2 as 6f844a0eaSJeff Kirsher * published by the Free Software Foundation 7f844a0eaSJeff Kirsher * 8f844a0eaSJeff Kirsher * This program is distributed in the hope that it will be useful, but 9f844a0eaSJeff Kirsher * WITHOUT ANY WARRANTY; without even the implied warranty of 10f844a0eaSJeff Kirsher * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11f844a0eaSJeff Kirsher * General Public License for more details. 12f844a0eaSJeff Kirsher */ 13f844a0eaSJeff Kirsher /* 14f844a0eaSJeff Kirsher * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 15f844a0eaSJeff Kirsher * All rights reserved 16f844a0eaSJeff Kirsher * www.brocade.com 17f844a0eaSJeff Kirsher */ 18f844a0eaSJeff Kirsher 19f844a0eaSJeff Kirsher #include "bfa_ioc.h" 20f844a0eaSJeff Kirsher #include "cna.h" 21f844a0eaSJeff Kirsher #include "bfi.h" 22f844a0eaSJeff Kirsher #include "bfi_reg.h" 23f844a0eaSJeff Kirsher #include "bfa_defs.h" 24f844a0eaSJeff Kirsher 25f844a0eaSJeff Kirsher #define bfa_ioc_ct_sync_pos(__ioc) \ 26f844a0eaSJeff Kirsher ((u32) (1 << bfa_ioc_pcifn(__ioc))) 27f844a0eaSJeff Kirsher #define BFA_IOC_SYNC_REQD_SH 16 28f844a0eaSJeff Kirsher #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) 29f844a0eaSJeff Kirsher #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) 30f844a0eaSJeff Kirsher #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) 31f844a0eaSJeff Kirsher #define bfa_ioc_ct_sync_reqd_pos(__ioc) \ 32f844a0eaSJeff Kirsher (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) 33f844a0eaSJeff Kirsher 34f844a0eaSJeff Kirsher /* 35f844a0eaSJeff Kirsher * forward declarations 36f844a0eaSJeff Kirsher */ 37f844a0eaSJeff Kirsher static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc); 38f844a0eaSJeff Kirsher static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc); 39f844a0eaSJeff Kirsher static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); 40be3a84d1SRasesh Mody static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc); 41f844a0eaSJeff Kirsher static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); 42be3a84d1SRasesh Mody static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc); 43f844a0eaSJeff Kirsher static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); 44f844a0eaSJeff Kirsher static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); 45f844a0eaSJeff Kirsher static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); 46f844a0eaSJeff Kirsher static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc); 47f844a0eaSJeff Kirsher static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); 48f844a0eaSJeff Kirsher static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); 49f844a0eaSJeff Kirsher static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); 50f844a0eaSJeff Kirsher static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); 51078086f3SRasesh Mody static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, 52078086f3SRasesh Mody enum bfi_asic_mode asic_mode); 53be3a84d1SRasesh Mody static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, 54be3a84d1SRasesh Mody enum bfi_asic_mode asic_mode); 55be3a84d1SRasesh Mody static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc); 56f844a0eaSJeff Kirsher 57d91d25d5Sstephen hemminger static const struct bfa_ioc_hwif nw_hwif_ct = { 58d91d25d5Sstephen hemminger .ioc_pll_init = bfa_ioc_ct_pll_init, 59d91d25d5Sstephen hemminger .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, 60d91d25d5Sstephen hemminger .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, 61d91d25d5Sstephen hemminger .ioc_reg_init = bfa_ioc_ct_reg_init, 62d91d25d5Sstephen hemminger .ioc_map_port = bfa_ioc_ct_map_port, 63d91d25d5Sstephen hemminger .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set, 64d91d25d5Sstephen hemminger .ioc_notify_fail = bfa_ioc_ct_notify_fail, 65d91d25d5Sstephen hemminger .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, 66d91d25d5Sstephen hemminger .ioc_sync_start = bfa_ioc_ct_sync_start, 67d91d25d5Sstephen hemminger .ioc_sync_join = bfa_ioc_ct_sync_join, 68d91d25d5Sstephen hemminger .ioc_sync_leave = bfa_ioc_ct_sync_leave, 69d91d25d5Sstephen hemminger .ioc_sync_ack = bfa_ioc_ct_sync_ack, 70d91d25d5Sstephen hemminger .ioc_sync_complete = bfa_ioc_ct_sync_complete, 71d91d25d5Sstephen hemminger }; 72f844a0eaSJeff Kirsher 73be3a84d1SRasesh Mody static const struct bfa_ioc_hwif nw_hwif_ct2 = { 74be3a84d1SRasesh Mody .ioc_pll_init = bfa_ioc_ct2_pll_init, 75be3a84d1SRasesh Mody .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, 76be3a84d1SRasesh Mody .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, 77be3a84d1SRasesh Mody .ioc_reg_init = bfa_ioc_ct2_reg_init, 78be3a84d1SRasesh Mody .ioc_map_port = bfa_ioc_ct2_map_port, 79be3a84d1SRasesh Mody .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat, 80be3a84d1SRasesh Mody .ioc_isr_mode_set = NULL, 81be3a84d1SRasesh Mody .ioc_notify_fail = bfa_ioc_ct_notify_fail, 82be3a84d1SRasesh Mody .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, 83be3a84d1SRasesh Mody .ioc_sync_start = bfa_ioc_ct_sync_start, 84be3a84d1SRasesh Mody .ioc_sync_join = bfa_ioc_ct_sync_join, 85be3a84d1SRasesh Mody .ioc_sync_leave = bfa_ioc_ct_sync_leave, 86be3a84d1SRasesh Mody .ioc_sync_ack = bfa_ioc_ct_sync_ack, 87be3a84d1SRasesh Mody .ioc_sync_complete = bfa_ioc_ct_sync_complete, 88be3a84d1SRasesh Mody }; 89be3a84d1SRasesh Mody 90f844a0eaSJeff Kirsher /** 91f844a0eaSJeff Kirsher * Called from bfa_ioc_attach() to map asic specific calls. 92f844a0eaSJeff Kirsher */ 93f844a0eaSJeff Kirsher void 94f844a0eaSJeff Kirsher bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) 95f844a0eaSJeff Kirsher { 96f844a0eaSJeff Kirsher ioc->ioc_hwif = &nw_hwif_ct; 97f844a0eaSJeff Kirsher } 98f844a0eaSJeff Kirsher 99be3a84d1SRasesh Mody void 100be3a84d1SRasesh Mody bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc) 101be3a84d1SRasesh Mody { 102be3a84d1SRasesh Mody ioc->ioc_hwif = &nw_hwif_ct2; 103be3a84d1SRasesh Mody } 104be3a84d1SRasesh Mody 105f844a0eaSJeff Kirsher /** 106f844a0eaSJeff Kirsher * Return true if firmware of current driver matches the running firmware. 107f844a0eaSJeff Kirsher */ 108f844a0eaSJeff Kirsher static bool 109f844a0eaSJeff Kirsher bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) 110f844a0eaSJeff Kirsher { 111f844a0eaSJeff Kirsher enum bfi_ioc_state ioc_fwstate; 112f844a0eaSJeff Kirsher u32 usecnt; 113f844a0eaSJeff Kirsher struct bfi_ioc_image_hdr fwhdr; 114f844a0eaSJeff Kirsher 115f844a0eaSJeff Kirsher /** 116f844a0eaSJeff Kirsher * If bios boot (flash based) -- do not increment usage count 117f844a0eaSJeff Kirsher */ 118078086f3SRasesh Mody if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < 119f844a0eaSJeff Kirsher BFA_IOC_FWIMG_MINSZ) 120f844a0eaSJeff Kirsher return true; 121f844a0eaSJeff Kirsher 122f844a0eaSJeff Kirsher bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 123f844a0eaSJeff Kirsher usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 124f844a0eaSJeff Kirsher 125f844a0eaSJeff Kirsher /** 126f844a0eaSJeff Kirsher * If usage count is 0, always return TRUE. 127f844a0eaSJeff Kirsher */ 128f844a0eaSJeff Kirsher if (usecnt == 0) { 129f844a0eaSJeff Kirsher writel(1, ioc->ioc_regs.ioc_usage_reg); 130f844a0eaSJeff Kirsher bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 131f844a0eaSJeff Kirsher writel(0, ioc->ioc_regs.ioc_fail_sync); 132f844a0eaSJeff Kirsher return true; 133f844a0eaSJeff Kirsher } 134f844a0eaSJeff Kirsher 135f844a0eaSJeff Kirsher ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 136f844a0eaSJeff Kirsher 137f844a0eaSJeff Kirsher /** 138f844a0eaSJeff Kirsher * Use count cannot be non-zero and chip in uninitialized state. 139f844a0eaSJeff Kirsher */ 140f844a0eaSJeff Kirsher BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT)); 141f844a0eaSJeff Kirsher 142f844a0eaSJeff Kirsher /** 143f844a0eaSJeff Kirsher * Check if another driver with a different firmware is active 144f844a0eaSJeff Kirsher */ 145f844a0eaSJeff Kirsher bfa_nw_ioc_fwver_get(ioc, &fwhdr); 146f844a0eaSJeff Kirsher if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) { 147f844a0eaSJeff Kirsher bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 148f844a0eaSJeff Kirsher return false; 149f844a0eaSJeff Kirsher } 150f844a0eaSJeff Kirsher 151f844a0eaSJeff Kirsher /** 152f844a0eaSJeff Kirsher * Same firmware version. Increment the reference count. 153f844a0eaSJeff Kirsher */ 154f844a0eaSJeff Kirsher usecnt++; 155f844a0eaSJeff Kirsher writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 156f844a0eaSJeff Kirsher bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 157f844a0eaSJeff Kirsher return true; 158f844a0eaSJeff Kirsher } 159f844a0eaSJeff Kirsher 160f844a0eaSJeff Kirsher static void 161f844a0eaSJeff Kirsher bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) 162f844a0eaSJeff Kirsher { 163f844a0eaSJeff Kirsher u32 usecnt; 164f844a0eaSJeff Kirsher 165f844a0eaSJeff Kirsher /** 166f844a0eaSJeff Kirsher * If bios boot (flash based) -- do not decrement usage count 167f844a0eaSJeff Kirsher */ 168078086f3SRasesh Mody if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < 169f844a0eaSJeff Kirsher BFA_IOC_FWIMG_MINSZ) 170f844a0eaSJeff Kirsher return; 171f844a0eaSJeff Kirsher 172f844a0eaSJeff Kirsher /** 173f844a0eaSJeff Kirsher * decrement usage count 174f844a0eaSJeff Kirsher */ 175f844a0eaSJeff Kirsher bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 176f844a0eaSJeff Kirsher usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 177f844a0eaSJeff Kirsher BUG_ON(!(usecnt > 0)); 178f844a0eaSJeff Kirsher 179f844a0eaSJeff Kirsher usecnt--; 180f844a0eaSJeff Kirsher writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 181f844a0eaSJeff Kirsher 182f844a0eaSJeff Kirsher bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 183f844a0eaSJeff Kirsher } 184f844a0eaSJeff Kirsher 185f844a0eaSJeff Kirsher /** 186f844a0eaSJeff Kirsher * Notify other functions on HB failure. 187f844a0eaSJeff Kirsher */ 188f844a0eaSJeff Kirsher static void 189f844a0eaSJeff Kirsher bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) 190f844a0eaSJeff Kirsher { 191f844a0eaSJeff Kirsher writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); 192f844a0eaSJeff Kirsher writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); 193f844a0eaSJeff Kirsher /* Wait for halt to take effect */ 194f844a0eaSJeff Kirsher readl(ioc->ioc_regs.ll_halt); 195f844a0eaSJeff Kirsher readl(ioc->ioc_regs.alt_ll_halt); 196f844a0eaSJeff Kirsher } 197f844a0eaSJeff Kirsher 198f844a0eaSJeff Kirsher /** 199f844a0eaSJeff Kirsher * Host to LPU mailbox message addresses 200f844a0eaSJeff Kirsher */ 201be3a84d1SRasesh Mody static const struct { 202be3a84d1SRasesh Mody u32 hfn_mbox; 203be3a84d1SRasesh Mody u32 lpu_mbox; 204be3a84d1SRasesh Mody u32 hfn_pgn; 205be3a84d1SRasesh Mody } ct_fnreg[] = { 206f844a0eaSJeff Kirsher { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, 207f844a0eaSJeff Kirsher { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, 208f844a0eaSJeff Kirsher { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, 209f844a0eaSJeff Kirsher { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } 210f844a0eaSJeff Kirsher }; 211f844a0eaSJeff Kirsher 212f844a0eaSJeff Kirsher /** 213f844a0eaSJeff Kirsher * Host <-> LPU mailbox command/status registers - port 0 214f844a0eaSJeff Kirsher */ 215be3a84d1SRasesh Mody static const struct { 216be3a84d1SRasesh Mody u32 hfn; 217be3a84d1SRasesh Mody u32 lpu; 218be3a84d1SRasesh Mody } ct_p0reg[] = { 219f844a0eaSJeff Kirsher { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, 220f844a0eaSJeff Kirsher { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, 221f844a0eaSJeff Kirsher { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, 222f844a0eaSJeff Kirsher { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } 223f844a0eaSJeff Kirsher }; 224f844a0eaSJeff Kirsher 225f844a0eaSJeff Kirsher /** 226f844a0eaSJeff Kirsher * Host <-> LPU mailbox command/status registers - port 1 227f844a0eaSJeff Kirsher */ 228be3a84d1SRasesh Mody static const struct { 229be3a84d1SRasesh Mody u32 hfn; 230be3a84d1SRasesh Mody u32 lpu; 231be3a84d1SRasesh Mody } ct_p1reg[] = { 232f844a0eaSJeff Kirsher { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, 233f844a0eaSJeff Kirsher { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, 234f844a0eaSJeff Kirsher { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, 235f844a0eaSJeff Kirsher { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } 236f844a0eaSJeff Kirsher }; 237f844a0eaSJeff Kirsher 238be3a84d1SRasesh Mody static const struct { 239be3a84d1SRasesh Mody u32 hfn_mbox; 240be3a84d1SRasesh Mody u32 lpu_mbox; 241be3a84d1SRasesh Mody u32 hfn_pgn; 242be3a84d1SRasesh Mody u32 hfn; 243be3a84d1SRasesh Mody u32 lpu; 244be3a84d1SRasesh Mody u32 lpu_read; 245be3a84d1SRasesh Mody } ct2_reg[] = { 246be3a84d1SRasesh Mody { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, 247be3a84d1SRasesh Mody CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT, 248be3a84d1SRasesh Mody CT2_HOSTFN_LPU0_READ_STAT}, 249be3a84d1SRasesh Mody { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, 250be3a84d1SRasesh Mody CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT, 251be3a84d1SRasesh Mody CT2_HOSTFN_LPU1_READ_STAT}, 252be3a84d1SRasesh Mody }; 253be3a84d1SRasesh Mody 254f844a0eaSJeff Kirsher static void 255f844a0eaSJeff Kirsher bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) 256f844a0eaSJeff Kirsher { 257f844a0eaSJeff Kirsher void __iomem *rb; 258f844a0eaSJeff Kirsher int pcifn = bfa_ioc_pcifn(ioc); 259f844a0eaSJeff Kirsher 260f844a0eaSJeff Kirsher rb = bfa_ioc_bar0(ioc); 261f844a0eaSJeff Kirsher 262078086f3SRasesh Mody ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; 263078086f3SRasesh Mody ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; 264078086f3SRasesh Mody ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; 265f844a0eaSJeff Kirsher 266f844a0eaSJeff Kirsher if (ioc->port_id == 0) { 267f844a0eaSJeff Kirsher ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 268f844a0eaSJeff Kirsher ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 269f844a0eaSJeff Kirsher ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; 270f844a0eaSJeff Kirsher ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; 271f844a0eaSJeff Kirsher ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; 272f844a0eaSJeff Kirsher ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 273f844a0eaSJeff Kirsher ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; 274f844a0eaSJeff Kirsher } else { 275be3a84d1SRasesh Mody ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG; 276be3a84d1SRasesh Mody ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG; 277f844a0eaSJeff Kirsher ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; 278f844a0eaSJeff Kirsher ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; 279f844a0eaSJeff Kirsher ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; 280f844a0eaSJeff Kirsher ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 281f844a0eaSJeff Kirsher ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; 282f844a0eaSJeff Kirsher } 283f844a0eaSJeff Kirsher 284f844a0eaSJeff Kirsher /* 285f844a0eaSJeff Kirsher * PSS control registers 286f844a0eaSJeff Kirsher */ 287be3a84d1SRasesh Mody ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; 288be3a84d1SRasesh Mody ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; 289be3a84d1SRasesh Mody ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG; 290be3a84d1SRasesh Mody ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG; 291f844a0eaSJeff Kirsher 292f844a0eaSJeff Kirsher /* 293f844a0eaSJeff Kirsher * IOC semaphore registers and serialization 294f844a0eaSJeff Kirsher */ 295be3a84d1SRasesh Mody ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG; 296be3a84d1SRasesh Mody ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG; 297be3a84d1SRasesh Mody ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG; 298be3a84d1SRasesh Mody ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT; 299be3a84d1SRasesh Mody ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC; 300f844a0eaSJeff Kirsher 301f844a0eaSJeff Kirsher /** 302f844a0eaSJeff Kirsher * sram memory access 303f844a0eaSJeff Kirsher */ 304be3a84d1SRasesh Mody ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; 305f844a0eaSJeff Kirsher ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; 306f844a0eaSJeff Kirsher 307f844a0eaSJeff Kirsher /* 308f844a0eaSJeff Kirsher * err set reg : for notification of hb failure in fcmode 309f844a0eaSJeff Kirsher */ 310f844a0eaSJeff Kirsher ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 311f844a0eaSJeff Kirsher } 312f844a0eaSJeff Kirsher 313be3a84d1SRasesh Mody static void 314be3a84d1SRasesh Mody bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc) 315be3a84d1SRasesh Mody { 316be3a84d1SRasesh Mody void __iomem *rb; 317be3a84d1SRasesh Mody int port = bfa_ioc_portid(ioc); 318be3a84d1SRasesh Mody 319be3a84d1SRasesh Mody rb = bfa_ioc_bar0(ioc); 320be3a84d1SRasesh Mody 321be3a84d1SRasesh Mody ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; 322be3a84d1SRasesh Mody ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; 323be3a84d1SRasesh Mody ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; 324be3a84d1SRasesh Mody ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; 325be3a84d1SRasesh Mody ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; 326be3a84d1SRasesh Mody ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; 327be3a84d1SRasesh Mody 328be3a84d1SRasesh Mody if (port == 0) { 329be3a84d1SRasesh Mody ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; 330be3a84d1SRasesh Mody ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; 331be3a84d1SRasesh Mody ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; 332be3a84d1SRasesh Mody ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 333be3a84d1SRasesh Mody ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; 334be3a84d1SRasesh Mody } else { 335be3a84d1SRasesh Mody ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG; 336be3a84d1SRasesh Mody ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; 337be3a84d1SRasesh Mody ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; 338be3a84d1SRasesh Mody ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 339be3a84d1SRasesh Mody ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; 340be3a84d1SRasesh Mody } 341be3a84d1SRasesh Mody 342be3a84d1SRasesh Mody /* 343be3a84d1SRasesh Mody * PSS control registers 344be3a84d1SRasesh Mody */ 345be3a84d1SRasesh Mody ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; 346be3a84d1SRasesh Mody ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; 347be3a84d1SRasesh Mody ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG; 348be3a84d1SRasesh Mody ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG; 349be3a84d1SRasesh Mody 350be3a84d1SRasesh Mody /* 351be3a84d1SRasesh Mody * IOC semaphore registers and serialization 352be3a84d1SRasesh Mody */ 353be3a84d1SRasesh Mody ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG; 354be3a84d1SRasesh Mody ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG; 355be3a84d1SRasesh Mody ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG; 356be3a84d1SRasesh Mody ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT; 357be3a84d1SRasesh Mody ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC; 358be3a84d1SRasesh Mody 359be3a84d1SRasesh Mody /** 360be3a84d1SRasesh Mody * sram memory access 361be3a84d1SRasesh Mody */ 362be3a84d1SRasesh Mody ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; 363be3a84d1SRasesh Mody ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; 364be3a84d1SRasesh Mody 365be3a84d1SRasesh Mody /* 366be3a84d1SRasesh Mody * err set reg : for notification of hb failure in fcmode 367be3a84d1SRasesh Mody */ 368be3a84d1SRasesh Mody ioc->ioc_regs.err_set = rb + ERR_SET_REG; 369be3a84d1SRasesh Mody } 370be3a84d1SRasesh Mody 371f844a0eaSJeff Kirsher /** 372f844a0eaSJeff Kirsher * Initialize IOC to port mapping. 373f844a0eaSJeff Kirsher */ 374f844a0eaSJeff Kirsher 375f844a0eaSJeff Kirsher #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) 376f844a0eaSJeff Kirsher static void 377f844a0eaSJeff Kirsher bfa_ioc_ct_map_port(struct bfa_ioc *ioc) 378f844a0eaSJeff Kirsher { 379f844a0eaSJeff Kirsher void __iomem *rb = ioc->pcidev.pci_bar_kva; 380f844a0eaSJeff Kirsher u32 r32; 381f844a0eaSJeff Kirsher 382f844a0eaSJeff Kirsher /** 383f844a0eaSJeff Kirsher * For catapult, base port id on personality register and IOC type 384f844a0eaSJeff Kirsher */ 385f844a0eaSJeff Kirsher r32 = readl(rb + FNC_PERS_REG); 386f844a0eaSJeff Kirsher r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); 387f844a0eaSJeff Kirsher ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; 388f844a0eaSJeff Kirsher 389f844a0eaSJeff Kirsher } 390f844a0eaSJeff Kirsher 391be3a84d1SRasesh Mody static void 392be3a84d1SRasesh Mody bfa_ioc_ct2_map_port(struct bfa_ioc *ioc) 393be3a84d1SRasesh Mody { 394be3a84d1SRasesh Mody void __iomem *rb = ioc->pcidev.pci_bar_kva; 395be3a84d1SRasesh Mody u32 r32; 396be3a84d1SRasesh Mody 397be3a84d1SRasesh Mody r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); 398be3a84d1SRasesh Mody ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); 399be3a84d1SRasesh Mody } 400be3a84d1SRasesh Mody 401f844a0eaSJeff Kirsher /** 402f844a0eaSJeff Kirsher * Set interrupt mode for a function: INTX or MSIX 403f844a0eaSJeff Kirsher */ 404f844a0eaSJeff Kirsher static void 405f844a0eaSJeff Kirsher bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) 406f844a0eaSJeff Kirsher { 407f844a0eaSJeff Kirsher void __iomem *rb = ioc->pcidev.pci_bar_kva; 408f844a0eaSJeff Kirsher u32 r32, mode; 409f844a0eaSJeff Kirsher 410f844a0eaSJeff Kirsher r32 = readl(rb + FNC_PERS_REG); 411f844a0eaSJeff Kirsher 412f844a0eaSJeff Kirsher mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & 413f844a0eaSJeff Kirsher __F0_INTX_STATUS; 414f844a0eaSJeff Kirsher 415f844a0eaSJeff Kirsher /** 416f844a0eaSJeff Kirsher * If already in desired mode, do not change anything 417f844a0eaSJeff Kirsher */ 418f844a0eaSJeff Kirsher if ((!msix && mode) || (msix && !mode)) 419f844a0eaSJeff Kirsher return; 420f844a0eaSJeff Kirsher 421f844a0eaSJeff Kirsher if (msix) 422f844a0eaSJeff Kirsher mode = __F0_INTX_STATUS_MSIX; 423f844a0eaSJeff Kirsher else 424f844a0eaSJeff Kirsher mode = __F0_INTX_STATUS_INTA; 425f844a0eaSJeff Kirsher 426f844a0eaSJeff Kirsher r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 427f844a0eaSJeff Kirsher r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 428f844a0eaSJeff Kirsher 429f844a0eaSJeff Kirsher writel(r32, rb + FNC_PERS_REG); 430f844a0eaSJeff Kirsher } 431f844a0eaSJeff Kirsher 432be3a84d1SRasesh Mody static bool 433be3a84d1SRasesh Mody bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc) 434be3a84d1SRasesh Mody { 435be3a84d1SRasesh Mody u32 r32; 436be3a84d1SRasesh Mody 437be3a84d1SRasesh Mody r32 = readl(ioc->ioc_regs.lpu_read_stat); 438be3a84d1SRasesh Mody if (r32) { 439be3a84d1SRasesh Mody writel(1, ioc->ioc_regs.lpu_read_stat); 440be3a84d1SRasesh Mody return true; 441be3a84d1SRasesh Mody } 442be3a84d1SRasesh Mody 443be3a84d1SRasesh Mody return false; 444be3a84d1SRasesh Mody } 445be3a84d1SRasesh Mody 446be3a84d1SRasesh Mody /** 447be3a84d1SRasesh Mody * MSI-X resource allocation for 1860 with no asic block 448be3a84d1SRasesh Mody */ 449be3a84d1SRasesh Mody #define HOSTFN_MSIX_DEFAULT 64 450be3a84d1SRasesh Mody #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 451be3a84d1SRasesh Mody #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c 452be3a84d1SRasesh Mody #define __MSIX_VT_NUMVT__MK 0x003ff800 453be3a84d1SRasesh Mody #define __MSIX_VT_NUMVT__SH 11 454be3a84d1SRasesh Mody #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH) 455be3a84d1SRasesh Mody #define __MSIX_VT_OFST_ 0x000007ff 456be3a84d1SRasesh Mody void 457be3a84d1SRasesh Mody bfa_ioc_ct2_poweron(struct bfa_ioc *ioc) 458be3a84d1SRasesh Mody { 459be3a84d1SRasesh Mody void __iomem *rb = ioc->pcidev.pci_bar_kva; 460be3a84d1SRasesh Mody u32 r32; 461be3a84d1SRasesh Mody 462be3a84d1SRasesh Mody r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); 463be3a84d1SRasesh Mody if (r32 & __MSIX_VT_NUMVT__MK) { 464be3a84d1SRasesh Mody writel(r32 & __MSIX_VT_OFST_, 465be3a84d1SRasesh Mody rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); 466be3a84d1SRasesh Mody return; 467be3a84d1SRasesh Mody } 468be3a84d1SRasesh Mody 469be3a84d1SRasesh Mody writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) | 470be3a84d1SRasesh Mody HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), 471be3a84d1SRasesh Mody rb + HOSTFN_MSIX_VT_OFST_NUMVT); 472be3a84d1SRasesh Mody writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), 473be3a84d1SRasesh Mody rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); 474be3a84d1SRasesh Mody } 475be3a84d1SRasesh Mody 476f844a0eaSJeff Kirsher /** 477f844a0eaSJeff Kirsher * Cleanup hw semaphore and usecnt registers 478f844a0eaSJeff Kirsher */ 479f844a0eaSJeff Kirsher static void 480f844a0eaSJeff Kirsher bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) 481f844a0eaSJeff Kirsher { 482f844a0eaSJeff Kirsher bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 483f844a0eaSJeff Kirsher writel(0, ioc->ioc_regs.ioc_usage_reg); 484f844a0eaSJeff Kirsher bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 485f844a0eaSJeff Kirsher 486f844a0eaSJeff Kirsher /* 487f844a0eaSJeff Kirsher * Read the hw sem reg to make sure that it is locked 488f844a0eaSJeff Kirsher * before we clear it. If it is not locked, writing 1 489f844a0eaSJeff Kirsher * will lock it instead of clearing it. 490f844a0eaSJeff Kirsher */ 491f844a0eaSJeff Kirsher readl(ioc->ioc_regs.ioc_sem_reg); 492f844a0eaSJeff Kirsher bfa_nw_ioc_hw_sem_release(ioc); 493f844a0eaSJeff Kirsher } 494f844a0eaSJeff Kirsher 495f844a0eaSJeff Kirsher /** 496f844a0eaSJeff Kirsher * Synchronized IOC failure processing routines 497f844a0eaSJeff Kirsher */ 498f844a0eaSJeff Kirsher static bool 499f844a0eaSJeff Kirsher bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) 500f844a0eaSJeff Kirsher { 501f844a0eaSJeff Kirsher u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 502f844a0eaSJeff Kirsher u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); 503f844a0eaSJeff Kirsher 504f844a0eaSJeff Kirsher /* 505f844a0eaSJeff Kirsher * Driver load time. If the sync required bit for this PCI fn 506f844a0eaSJeff Kirsher * is set, it is due to an unclean exit by the driver for this 507f844a0eaSJeff Kirsher * PCI fn in the previous incarnation. Whoever comes here first 508f844a0eaSJeff Kirsher * should clean it up, no matter which PCI fn. 509f844a0eaSJeff Kirsher */ 510f844a0eaSJeff Kirsher 511f844a0eaSJeff Kirsher if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { 512f844a0eaSJeff Kirsher writel(0, ioc->ioc_regs.ioc_fail_sync); 513f844a0eaSJeff Kirsher writel(1, ioc->ioc_regs.ioc_usage_reg); 514f844a0eaSJeff Kirsher writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); 515f844a0eaSJeff Kirsher writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); 516f844a0eaSJeff Kirsher return true; 517f844a0eaSJeff Kirsher } 518f844a0eaSJeff Kirsher 519f844a0eaSJeff Kirsher return bfa_ioc_ct_sync_complete(ioc); 520f844a0eaSJeff Kirsher } 521f844a0eaSJeff Kirsher /** 522f844a0eaSJeff Kirsher * Synchronized IOC failure processing routines 523f844a0eaSJeff Kirsher */ 524f844a0eaSJeff Kirsher static void 525f844a0eaSJeff Kirsher bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) 526f844a0eaSJeff Kirsher { 527f844a0eaSJeff Kirsher u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 528f844a0eaSJeff Kirsher u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); 529f844a0eaSJeff Kirsher 530f844a0eaSJeff Kirsher writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); 531f844a0eaSJeff Kirsher } 532f844a0eaSJeff Kirsher 533f844a0eaSJeff Kirsher static void 534f844a0eaSJeff Kirsher bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc) 535f844a0eaSJeff Kirsher { 536f844a0eaSJeff Kirsher u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 537f844a0eaSJeff Kirsher u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | 538f844a0eaSJeff Kirsher bfa_ioc_ct_sync_pos(ioc); 539f844a0eaSJeff Kirsher 540f844a0eaSJeff Kirsher writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); 541f844a0eaSJeff Kirsher } 542f844a0eaSJeff Kirsher 543f844a0eaSJeff Kirsher static void 544f844a0eaSJeff Kirsher bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc) 545f844a0eaSJeff Kirsher { 546f844a0eaSJeff Kirsher u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 547f844a0eaSJeff Kirsher 548f844a0eaSJeff Kirsher writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync); 549f844a0eaSJeff Kirsher } 550f844a0eaSJeff Kirsher 551f844a0eaSJeff Kirsher static bool 552f844a0eaSJeff Kirsher bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) 553f844a0eaSJeff Kirsher { 554f844a0eaSJeff Kirsher u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 555f844a0eaSJeff Kirsher u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); 556f844a0eaSJeff Kirsher u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); 557f844a0eaSJeff Kirsher u32 tmp_ackd; 558f844a0eaSJeff Kirsher 559f844a0eaSJeff Kirsher if (sync_ackd == 0) 560f844a0eaSJeff Kirsher return true; 561f844a0eaSJeff Kirsher 562f844a0eaSJeff Kirsher /** 563f844a0eaSJeff Kirsher * The check below is to see whether any other PCI fn 564f844a0eaSJeff Kirsher * has reinitialized the ASIC (reset sync_ackd bits) 565f844a0eaSJeff Kirsher * and failed again while this IOC was waiting for hw 566f844a0eaSJeff Kirsher * semaphore (in bfa_iocpf_sm_semwait()). 567f844a0eaSJeff Kirsher */ 568f844a0eaSJeff Kirsher tmp_ackd = sync_ackd; 569f844a0eaSJeff Kirsher if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && 570f844a0eaSJeff Kirsher !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) 571f844a0eaSJeff Kirsher sync_ackd |= bfa_ioc_ct_sync_pos(ioc); 572f844a0eaSJeff Kirsher 573f844a0eaSJeff Kirsher if (sync_reqd == sync_ackd) { 574f844a0eaSJeff Kirsher writel(bfa_ioc_ct_clear_sync_ackd(r32), 575f844a0eaSJeff Kirsher ioc->ioc_regs.ioc_fail_sync); 576f844a0eaSJeff Kirsher writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 577f844a0eaSJeff Kirsher writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); 578f844a0eaSJeff Kirsher return true; 579f844a0eaSJeff Kirsher } 580f844a0eaSJeff Kirsher 581f844a0eaSJeff Kirsher /** 582f844a0eaSJeff Kirsher * If another PCI fn reinitialized and failed again while 583f844a0eaSJeff Kirsher * this IOC was waiting for hw sem, the sync_ackd bit for 584f844a0eaSJeff Kirsher * this IOC need to be set again to allow reinitialization. 585f844a0eaSJeff Kirsher */ 586f844a0eaSJeff Kirsher if (tmp_ackd != sync_ackd) 587f844a0eaSJeff Kirsher writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); 588f844a0eaSJeff Kirsher 589f844a0eaSJeff Kirsher return false; 590f844a0eaSJeff Kirsher } 591f844a0eaSJeff Kirsher 592f844a0eaSJeff Kirsher static enum bfa_status 593078086f3SRasesh Mody bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) 594f844a0eaSJeff Kirsher { 595f844a0eaSJeff Kirsher u32 pll_sclk, pll_fclk, r32; 596078086f3SRasesh Mody bool fcmode = (asic_mode == BFI_ASIC_MODE_FC); 597f844a0eaSJeff Kirsher 598f844a0eaSJeff Kirsher pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | 599f844a0eaSJeff Kirsher __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | 600f844a0eaSJeff Kirsher __APP_PLL_SCLK_JITLMT0_1(3U) | 601f844a0eaSJeff Kirsher __APP_PLL_SCLK_CNTLMT0_1(1U); 602f844a0eaSJeff Kirsher pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST | 603f844a0eaSJeff Kirsher __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | 604f844a0eaSJeff Kirsher __APP_PLL_LCLK_JITLMT0_1(3U) | 605f844a0eaSJeff Kirsher __APP_PLL_LCLK_CNTLMT0_1(1U); 606f844a0eaSJeff Kirsher 607f844a0eaSJeff Kirsher if (fcmode) { 608f844a0eaSJeff Kirsher writel(0, (rb + OP_MODE)); 609f844a0eaSJeff Kirsher writel(__APP_EMS_CMLCKSEL | 610f844a0eaSJeff Kirsher __APP_EMS_REFCKBUFEN2 | 611f844a0eaSJeff Kirsher __APP_EMS_CHANNEL_SEL, 612f844a0eaSJeff Kirsher (rb + ETH_MAC_SER_REG)); 613f844a0eaSJeff Kirsher } else { 614f844a0eaSJeff Kirsher writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); 615f844a0eaSJeff Kirsher writel(__APP_EMS_REFCKBUFEN1, 616f844a0eaSJeff Kirsher (rb + ETH_MAC_SER_REG)); 617f844a0eaSJeff Kirsher } 618f844a0eaSJeff Kirsher writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); 619f844a0eaSJeff Kirsher writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); 620f844a0eaSJeff Kirsher writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 621f844a0eaSJeff Kirsher writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 622f844a0eaSJeff Kirsher writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 623f844a0eaSJeff Kirsher writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 624f844a0eaSJeff Kirsher writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 625f844a0eaSJeff Kirsher writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 626f844a0eaSJeff Kirsher writel(pll_sclk | 627f844a0eaSJeff Kirsher __APP_PLL_SCLK_LOGIC_SOFT_RESET, 628f844a0eaSJeff Kirsher rb + APP_PLL_SCLK_CTL_REG); 629f844a0eaSJeff Kirsher writel(pll_fclk | 630f844a0eaSJeff Kirsher __APP_PLL_LCLK_LOGIC_SOFT_RESET, 631f844a0eaSJeff Kirsher rb + APP_PLL_LCLK_CTL_REG); 632f844a0eaSJeff Kirsher writel(pll_sclk | 633f844a0eaSJeff Kirsher __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE, 634f844a0eaSJeff Kirsher rb + APP_PLL_SCLK_CTL_REG); 635f844a0eaSJeff Kirsher writel(pll_fclk | 636f844a0eaSJeff Kirsher __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE, 637f844a0eaSJeff Kirsher rb + APP_PLL_LCLK_CTL_REG); 638f844a0eaSJeff Kirsher readl(rb + HOSTFN0_INT_MSK); 639f844a0eaSJeff Kirsher udelay(2000); 640f844a0eaSJeff Kirsher writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 641f844a0eaSJeff Kirsher writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 642f844a0eaSJeff Kirsher writel(pll_sclk | 643f844a0eaSJeff Kirsher __APP_PLL_SCLK_ENABLE, 644f844a0eaSJeff Kirsher rb + APP_PLL_SCLK_CTL_REG); 645f844a0eaSJeff Kirsher writel(pll_fclk | 646f844a0eaSJeff Kirsher __APP_PLL_LCLK_ENABLE, 647f844a0eaSJeff Kirsher rb + APP_PLL_LCLK_CTL_REG); 648f844a0eaSJeff Kirsher 649f844a0eaSJeff Kirsher if (!fcmode) { 650f844a0eaSJeff Kirsher writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); 651f844a0eaSJeff Kirsher writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); 652f844a0eaSJeff Kirsher } 653f844a0eaSJeff Kirsher r32 = readl((rb + PSS_CTL_REG)); 654f844a0eaSJeff Kirsher r32 &= ~__PSS_LMEM_RESET; 655f844a0eaSJeff Kirsher writel(r32, (rb + PSS_CTL_REG)); 656f844a0eaSJeff Kirsher udelay(1000); 657f844a0eaSJeff Kirsher if (!fcmode) { 658f844a0eaSJeff Kirsher writel(0, (rb + PMM_1T_RESET_REG_P0)); 659f844a0eaSJeff Kirsher writel(0, (rb + PMM_1T_RESET_REG_P1)); 660f844a0eaSJeff Kirsher } 661f844a0eaSJeff Kirsher 662f844a0eaSJeff Kirsher writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); 663f844a0eaSJeff Kirsher udelay(1000); 664f844a0eaSJeff Kirsher r32 = readl((rb + MBIST_STAT_REG)); 665f844a0eaSJeff Kirsher writel(0, (rb + MBIST_CTL_REG)); 666f844a0eaSJeff Kirsher return BFA_STATUS_OK; 667f844a0eaSJeff Kirsher } 668be3a84d1SRasesh Mody 669be3a84d1SRasesh Mody static void 670be3a84d1SRasesh Mody bfa_ioc_ct2_sclk_init(void __iomem *rb) 671be3a84d1SRasesh Mody { 672be3a84d1SRasesh Mody u32 r32; 673be3a84d1SRasesh Mody 674be3a84d1SRasesh Mody /* 675be3a84d1SRasesh Mody * put s_clk PLL and PLL FSM in reset 676be3a84d1SRasesh Mody */ 677be3a84d1SRasesh Mody r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 678be3a84d1SRasesh Mody r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN); 679be3a84d1SRasesh Mody r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS | 680be3a84d1SRasesh Mody __APP_PLL_SCLK_LOGIC_SOFT_RESET); 681be3a84d1SRasesh Mody writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); 682be3a84d1SRasesh Mody 683be3a84d1SRasesh Mody /* 684be3a84d1SRasesh Mody * Ignore mode and program for the max clock (which is FC16) 685be3a84d1SRasesh Mody * Firmware/NFC will do the PLL init appropiately 686be3a84d1SRasesh Mody */ 687be3a84d1SRasesh Mody r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 688be3a84d1SRasesh Mody r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); 689be3a84d1SRasesh Mody writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); 690be3a84d1SRasesh Mody 691be3a84d1SRasesh Mody /* 692be3a84d1SRasesh Mody * while doing PLL init dont clock gate ethernet subsystem 693be3a84d1SRasesh Mody */ 694be3a84d1SRasesh Mody r32 = readl((rb + CT2_CHIP_MISC_PRG)); 695be3a84d1SRasesh Mody writel((r32 | __ETH_CLK_ENABLE_PORT0), 696be3a84d1SRasesh Mody (rb + CT2_CHIP_MISC_PRG)); 697be3a84d1SRasesh Mody 698be3a84d1SRasesh Mody r32 = readl((rb + CT2_PCIE_MISC_REG)); 699be3a84d1SRasesh Mody writel((r32 | __ETH_CLK_ENABLE_PORT1), 700be3a84d1SRasesh Mody (rb + CT2_PCIE_MISC_REG)); 701be3a84d1SRasesh Mody 702be3a84d1SRasesh Mody /* 703be3a84d1SRasesh Mody * set sclk value 704be3a84d1SRasesh Mody */ 705be3a84d1SRasesh Mody r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 706be3a84d1SRasesh Mody r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL | 707be3a84d1SRasesh Mody __APP_PLL_SCLK_CLK_DIV2); 708be3a84d1SRasesh Mody writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG)); 709be3a84d1SRasesh Mody 710be3a84d1SRasesh Mody /* 711be3a84d1SRasesh Mody * poll for s_clk lock or delay 1ms 712be3a84d1SRasesh Mody */ 713be3a84d1SRasesh Mody udelay(1000); 714be3a84d1SRasesh Mody 715be3a84d1SRasesh Mody /* 716be3a84d1SRasesh Mody * Dont do clock gating for ethernet subsystem, firmware/NFC will 717be3a84d1SRasesh Mody * do this appropriately 718be3a84d1SRasesh Mody */ 719be3a84d1SRasesh Mody } 720be3a84d1SRasesh Mody 721be3a84d1SRasesh Mody static void 722be3a84d1SRasesh Mody bfa_ioc_ct2_lclk_init(void __iomem *rb) 723be3a84d1SRasesh Mody { 724be3a84d1SRasesh Mody u32 r32; 725be3a84d1SRasesh Mody 726be3a84d1SRasesh Mody /* 727be3a84d1SRasesh Mody * put l_clk PLL and PLL FSM in reset 728be3a84d1SRasesh Mody */ 729be3a84d1SRasesh Mody r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); 730be3a84d1SRasesh Mody r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN); 731be3a84d1SRasesh Mody r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS | 732be3a84d1SRasesh Mody __APP_PLL_LCLK_LOGIC_SOFT_RESET); 733be3a84d1SRasesh Mody writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); 734be3a84d1SRasesh Mody 735be3a84d1SRasesh Mody /* 736be3a84d1SRasesh Mody * set LPU speed (set for FC16 which will work for other modes) 737be3a84d1SRasesh Mody */ 738be3a84d1SRasesh Mody r32 = readl((rb + CT2_CHIP_MISC_PRG)); 739be3a84d1SRasesh Mody writel(r32, (rb + CT2_CHIP_MISC_PRG)); 740be3a84d1SRasesh Mody 741be3a84d1SRasesh Mody /* 742be3a84d1SRasesh Mody * set LPU half speed (set for FC16 which will work for other modes) 743be3a84d1SRasesh Mody */ 744be3a84d1SRasesh Mody r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); 745be3a84d1SRasesh Mody writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); 746be3a84d1SRasesh Mody 747be3a84d1SRasesh Mody /* 748be3a84d1SRasesh Mody * set lclk for mode (set for FC16) 749be3a84d1SRasesh Mody */ 750be3a84d1SRasesh Mody r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); 751be3a84d1SRasesh Mody r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); 752be3a84d1SRasesh Mody r32 |= 0x20c1731b; 753be3a84d1SRasesh Mody writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); 754be3a84d1SRasesh Mody 755be3a84d1SRasesh Mody /* 756be3a84d1SRasesh Mody * poll for s_clk lock or delay 1ms 757be3a84d1SRasesh Mody */ 758be3a84d1SRasesh Mody udelay(1000); 759be3a84d1SRasesh Mody } 760be3a84d1SRasesh Mody 761be3a84d1SRasesh Mody static void 762be3a84d1SRasesh Mody bfa_ioc_ct2_mem_init(void __iomem *rb) 763be3a84d1SRasesh Mody { 764be3a84d1SRasesh Mody u32 r32; 765be3a84d1SRasesh Mody 766be3a84d1SRasesh Mody r32 = readl((rb + PSS_CTL_REG)); 767be3a84d1SRasesh Mody r32 &= ~__PSS_LMEM_RESET; 768be3a84d1SRasesh Mody writel(r32, (rb + PSS_CTL_REG)); 769be3a84d1SRasesh Mody udelay(1000); 770be3a84d1SRasesh Mody 771be3a84d1SRasesh Mody writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); 772be3a84d1SRasesh Mody udelay(1000); 773be3a84d1SRasesh Mody writel(0, (rb + CT2_MBIST_CTL_REG)); 774be3a84d1SRasesh Mody } 775be3a84d1SRasesh Mody 776be3a84d1SRasesh Mody static void 777be3a84d1SRasesh Mody bfa_ioc_ct2_mac_reset(void __iomem *rb) 778be3a84d1SRasesh Mody { 779be3a84d1SRasesh Mody volatile u32 r32; 780be3a84d1SRasesh Mody 781be3a84d1SRasesh Mody bfa_ioc_ct2_sclk_init(rb); 782be3a84d1SRasesh Mody bfa_ioc_ct2_lclk_init(rb); 783be3a84d1SRasesh Mody 784be3a84d1SRasesh Mody /* 785be3a84d1SRasesh Mody * release soft reset on s_clk & l_clk 786be3a84d1SRasesh Mody */ 787be3a84d1SRasesh Mody r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 788be3a84d1SRasesh Mody writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET), 789be3a84d1SRasesh Mody (rb + CT2_APP_PLL_SCLK_CTL_REG)); 790be3a84d1SRasesh Mody 791be3a84d1SRasesh Mody /* 792be3a84d1SRasesh Mody * release soft reset on s_clk & l_clk 793be3a84d1SRasesh Mody */ 794be3a84d1SRasesh Mody r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); 795be3a84d1SRasesh Mody writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET), 796be3a84d1SRasesh Mody (rb + CT2_APP_PLL_LCLK_CTL_REG)); 797be3a84d1SRasesh Mody 798be3a84d1SRasesh Mody /* put port0, port1 MAC & AHB in reset */ 799be3a84d1SRasesh Mody writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), 800be3a84d1SRasesh Mody (rb + CT2_CSI_MAC_CONTROL_REG(0))); 801be3a84d1SRasesh Mody writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), 802be3a84d1SRasesh Mody (rb + CT2_CSI_MAC_CONTROL_REG(1))); 803be3a84d1SRasesh Mody } 804be3a84d1SRasesh Mody 805be3a84d1SRasesh Mody #define CT2_NFC_MAX_DELAY 1000 806be3a84d1SRasesh Mody static enum bfa_status 807be3a84d1SRasesh Mody bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) 808be3a84d1SRasesh Mody { 809be3a84d1SRasesh Mody volatile u32 wgn, r32; 810be3a84d1SRasesh Mody int i; 811be3a84d1SRasesh Mody 812be3a84d1SRasesh Mody /* 813be3a84d1SRasesh Mody * Initialize PLL if not already done by NFC 814be3a84d1SRasesh Mody */ 815be3a84d1SRasesh Mody wgn = readl(rb + CT2_WGN_STATUS); 816be3a84d1SRasesh Mody if (!(wgn & __GLBL_PF_VF_CFG_RDY)) { 817be3a84d1SRasesh Mody writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG)); 818be3a84d1SRasesh Mody for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { 819be3a84d1SRasesh Mody r32 = readl(rb + CT2_NFC_CSR_SET_REG); 820be3a84d1SRasesh Mody if (r32 & __NFC_CONTROLLER_HALTED) 821be3a84d1SRasesh Mody break; 822be3a84d1SRasesh Mody udelay(1000); 823be3a84d1SRasesh Mody } 824be3a84d1SRasesh Mody } 825be3a84d1SRasesh Mody 826be3a84d1SRasesh Mody /* 827be3a84d1SRasesh Mody * Mask the interrupts and clear any 828be3a84d1SRasesh Mody * pending interrupts left by BIOS/EFI 829be3a84d1SRasesh Mody */ 830be3a84d1SRasesh Mody 831be3a84d1SRasesh Mody writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); 832be3a84d1SRasesh Mody writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); 833be3a84d1SRasesh Mody 834be3a84d1SRasesh Mody r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 835be3a84d1SRasesh Mody if (r32 == 1) { 836be3a84d1SRasesh Mody writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); 837be3a84d1SRasesh Mody readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 838be3a84d1SRasesh Mody } 839be3a84d1SRasesh Mody r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); 840be3a84d1SRasesh Mody if (r32 == 1) { 841be3a84d1SRasesh Mody writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); 842be3a84d1SRasesh Mody readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); 843be3a84d1SRasesh Mody } 844be3a84d1SRasesh Mody 845be3a84d1SRasesh Mody bfa_ioc_ct2_mac_reset(rb); 846be3a84d1SRasesh Mody bfa_ioc_ct2_sclk_init(rb); 847be3a84d1SRasesh Mody bfa_ioc_ct2_lclk_init(rb); 848be3a84d1SRasesh Mody 849be3a84d1SRasesh Mody /* 850be3a84d1SRasesh Mody * release soft reset on s_clk & l_clk 851be3a84d1SRasesh Mody */ 852be3a84d1SRasesh Mody r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 853be3a84d1SRasesh Mody writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET), 854be3a84d1SRasesh Mody (rb + CT2_APP_PLL_SCLK_CTL_REG)); 855be3a84d1SRasesh Mody 856be3a84d1SRasesh Mody /* 857be3a84d1SRasesh Mody * Announce flash device presence, if flash was corrupted. 858be3a84d1SRasesh Mody */ 859be3a84d1SRasesh Mody if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { 860be3a84d1SRasesh Mody r32 = readl((rb + PSS_GPIO_OUT_REG)); 861be3a84d1SRasesh Mody writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG)); 862be3a84d1SRasesh Mody r32 = readl((rb + PSS_GPIO_OE_REG)); 863be3a84d1SRasesh Mody writel((r32 | 1), (rb + PSS_GPIO_OE_REG)); 864be3a84d1SRasesh Mody } 865be3a84d1SRasesh Mody 866be3a84d1SRasesh Mody bfa_ioc_ct2_mem_init(rb); 867be3a84d1SRasesh Mody 868be3a84d1SRasesh Mody writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); 869be3a84d1SRasesh Mody writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); 870be3a84d1SRasesh Mody return BFA_STATUS_OK; 871be3a84d1SRasesh Mody } 872