xref: /openbmc/linux/drivers/scsi/bfa/bfa_ioc_ct.c (revision 4b4193256c8d3bc3a5397b5cd9494c2ad386317d)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4   * Copyright (c) 2014- QLogic Corporation.
5   * All rights reserved
6   * www.qlogic.com
7   *
8   * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
9   */
10  
11  #include "bfad_drv.h"
12  #include "bfa_ioc.h"
13  #include "bfi_reg.h"
14  #include "bfa_defs.h"
15  
16  BFA_TRC_FILE(CNA, IOC_CT);
17  
18  #define bfa_ioc_ct_sync_pos(__ioc)      \
19  		((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
20  #define BFA_IOC_SYNC_REQD_SH    16
21  #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
22  #define bfa_ioc_ct_clear_sync_ackd(__val)       (__val & 0xffff0000)
23  #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
24  #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
25  			(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
26  
27  /*
28   * forward declarations
29   */
30  static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
31  static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
32  static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
33  static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
34  static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
35  static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
36  static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
37  static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
38  static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
39  static void bfa_ioc_ct_set_cur_ioc_fwstate(
40  			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
41  static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
42  static void bfa_ioc_ct_set_alt_ioc_fwstate(
43  			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
44  static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
45  
46  static struct bfa_ioc_hwif_s hwif_ct;
47  static struct bfa_ioc_hwif_s hwif_ct2;
48  
49  /*
50   * Return true if firmware of current driver matches the running firmware.
51   */
52  static bfa_boolean_t
bfa_ioc_ct_firmware_lock(struct bfa_ioc_s * ioc)53  bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
54  {
55  	enum bfi_ioc_state ioc_fwstate;
56  	u32 usecnt;
57  	struct bfi_ioc_image_hdr_s fwhdr;
58  
59  	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
60  	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
61  
62  	/*
63  	 * If usage count is 0, always return TRUE.
64  	 */
65  	if (usecnt == 0) {
66  		writel(1, ioc->ioc_regs.ioc_usage_reg);
67  		readl(ioc->ioc_regs.ioc_usage_sem_reg);
68  		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
69  		writel(0, ioc->ioc_regs.ioc_fail_sync);
70  		bfa_trc(ioc, usecnt);
71  		return BFA_TRUE;
72  	}
73  
74  	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
75  	bfa_trc(ioc, ioc_fwstate);
76  
77  	/*
78  	 * Use count cannot be non-zero and chip in uninitialized state.
79  	 */
80  	WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
81  
82  	/*
83  	 * Check if another driver with a different firmware is active
84  	 */
85  	bfa_ioc_fwver_get(ioc, &fwhdr);
86  	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
87  		readl(ioc->ioc_regs.ioc_usage_sem_reg);
88  		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
89  		bfa_trc(ioc, usecnt);
90  		return BFA_FALSE;
91  	}
92  
93  	/*
94  	 * Same firmware version. Increment the reference count.
95  	 */
96  	usecnt++;
97  	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
98  	readl(ioc->ioc_regs.ioc_usage_sem_reg);
99  	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
100  	bfa_trc(ioc, usecnt);
101  	return BFA_TRUE;
102  }
103  
104  static void
bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s * ioc)105  bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
106  {
107  	u32 usecnt;
108  
109  	/*
110  	 * decrement usage count
111  	 */
112  	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
113  	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
114  	WARN_ON(usecnt <= 0);
115  
116  	usecnt--;
117  	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
118  	bfa_trc(ioc, usecnt);
119  
120  	readl(ioc->ioc_regs.ioc_usage_sem_reg);
121  	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
122  }
123  
124  /*
125   * Notify other functions on HB failure.
126   */
127  static void
bfa_ioc_ct_notify_fail(struct bfa_ioc_s * ioc)128  bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
129  {
130  	if (bfa_ioc_is_cna(ioc)) {
131  		writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
132  		writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
133  		/* Wait for halt to take effect */
134  		readl(ioc->ioc_regs.ll_halt);
135  		readl(ioc->ioc_regs.alt_ll_halt);
136  	} else {
137  		writel(~0U, ioc->ioc_regs.err_set);
138  		readl(ioc->ioc_regs.err_set);
139  	}
140  }
141  
142  /*
143   * Host to LPU mailbox message addresses
144   */
145  static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
146  	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
147  	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
148  	{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
149  	{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
150  };
151  
152  /*
153   * Host <-> LPU mailbox command/status registers - port 0
154   */
155  static struct { u32 hfn, lpu; } ct_p0reg[] = {
156  	{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
157  	{ HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
158  	{ HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
159  	{ HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
160  };
161  
162  /*
163   * Host <-> LPU mailbox command/status registers - port 1
164   */
165  static struct { u32 hfn, lpu; } ct_p1reg[] = {
166  	{ HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
167  	{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
168  	{ HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
169  	{ HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
170  };
171  
172  static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
173  	ct2_reg[] = {
174  	{ CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
175  	  CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
176  	  CT2_HOSTFN_LPU0_READ_STAT},
177  	{ CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
178  	  CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
179  	  CT2_HOSTFN_LPU1_READ_STAT},
180  };
181  
182  static void
bfa_ioc_ct_reg_init(struct bfa_ioc_s * ioc)183  bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
184  {
185  	void __iomem *rb;
186  	int		pcifn = bfa_ioc_pcifn(ioc);
187  
188  	rb = bfa_ioc_bar0(ioc);
189  
190  	ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
191  	ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
192  	ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
193  
194  	if (ioc->port_id == 0) {
195  		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
196  		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
197  		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
198  		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
199  		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
200  		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
201  		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
202  	} else {
203  		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
204  		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
205  		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
206  		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
207  		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
208  		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
209  		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
210  	}
211  
212  	/*
213  	 * PSS control registers
214  	 */
215  	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
216  	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
217  	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
218  	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
219  
220  	/*
221  	 * IOC semaphore registers and serialization
222  	 */
223  	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
224  	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
225  	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
226  	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
227  	ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
228  
229  	/*
230  	 * sram memory access
231  	 */
232  	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
233  	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
234  
235  	/*
236  	 * err set reg : for notification of hb failure in fcmode
237  	 */
238  	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
239  }
240  
241  static void
bfa_ioc_ct2_reg_init(struct bfa_ioc_s * ioc)242  bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
243  {
244  	void __iomem *rb;
245  	int	port = bfa_ioc_portid(ioc);
246  
247  	rb = bfa_ioc_bar0(ioc);
248  
249  	ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
250  	ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
251  	ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
252  	ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
253  	ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
254  	ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
255  
256  	if (port == 0) {
257  		ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
258  		ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
259  		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
260  		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
261  		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
262  	} else {
263  		ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
264  		ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
265  		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
266  		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
267  		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
268  	}
269  
270  	/*
271  	 * PSS control registers
272  	 */
273  	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
274  	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
275  	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
276  	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
277  
278  	/*
279  	 * IOC semaphore registers and serialization
280  	 */
281  	ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
282  	ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
283  	ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
284  	ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
285  	ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
286  
287  	/*
288  	 * sram memory access
289  	 */
290  	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
291  	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
292  
293  	/*
294  	 * err set reg : for notification of hb failure in fcmode
295  	 */
296  	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
297  }
298  
299  /*
300   * Initialize IOC to port mapping.
301   */
302  
303  #define FNC_PERS_FN_SHIFT(__fn)	((__fn) * 8)
304  static void
bfa_ioc_ct_map_port(struct bfa_ioc_s * ioc)305  bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
306  {
307  	void __iomem *rb = ioc->pcidev.pci_bar_kva;
308  	u32	r32;
309  
310  	/*
311  	 * For catapult, base port id on personality register and IOC type
312  	 */
313  	r32 = readl(rb + FNC_PERS_REG);
314  	r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
315  	ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
316  
317  	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
318  	bfa_trc(ioc, ioc->port_id);
319  }
320  
321  static void
bfa_ioc_ct2_map_port(struct bfa_ioc_s * ioc)322  bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
323  {
324  	void __iomem	*rb = ioc->pcidev.pci_bar_kva;
325  	u32	r32;
326  
327  	r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
328  	ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
329  
330  	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
331  	bfa_trc(ioc, ioc->port_id);
332  }
333  
334  /*
335   * Set interrupt mode for a function: INTX or MSIX
336   */
337  static void
bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s * ioc,bfa_boolean_t msix)338  bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
339  {
340  	void __iomem *rb = ioc->pcidev.pci_bar_kva;
341  	u32	r32, mode;
342  
343  	r32 = readl(rb + FNC_PERS_REG);
344  	bfa_trc(ioc, r32);
345  
346  	mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
347  		__F0_INTX_STATUS;
348  
349  	/*
350  	 * If already in desired mode, do not change anything
351  	 */
352  	if ((!msix && mode) || (msix && !mode))
353  		return;
354  
355  	if (msix)
356  		mode = __F0_INTX_STATUS_MSIX;
357  	else
358  		mode = __F0_INTX_STATUS_INTA;
359  
360  	r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
361  	r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
362  	bfa_trc(ioc, r32);
363  
364  	writel(r32, rb + FNC_PERS_REG);
365  }
366  
367  static bfa_boolean_t
bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s * ioc)368  bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
369  {
370  	u32	r32;
371  
372  	r32 = readl(ioc->ioc_regs.lpu_read_stat);
373  	if (r32) {
374  		writel(1, ioc->ioc_regs.lpu_read_stat);
375  		return BFA_TRUE;
376  	}
377  
378  	return BFA_FALSE;
379  }
380  
381  /*
382   * Cleanup hw semaphore and usecnt registers
383   */
384  static void
bfa_ioc_ct_ownership_reset(struct bfa_ioc_s * ioc)385  bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
386  {
387  
388  	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
389  	writel(0, ioc->ioc_regs.ioc_usage_reg);
390  	readl(ioc->ioc_regs.ioc_usage_sem_reg);
391  	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
392  
393  	writel(0, ioc->ioc_regs.ioc_fail_sync);
394  	/*
395  	 * Read the hw sem reg to make sure that it is locked
396  	 * before we clear it. If it is not locked, writing 1
397  	 * will lock it instead of clearing it.
398  	 */
399  	readl(ioc->ioc_regs.ioc_sem_reg);
400  	writel(1, ioc->ioc_regs.ioc_sem_reg);
401  }
402  
403  static bfa_boolean_t
bfa_ioc_ct_sync_start(struct bfa_ioc_s * ioc)404  bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
405  {
406  	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
407  	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
408  
409  	/*
410  	 * Driver load time.  If the sync required bit for this PCI fn
411  	 * is set, it is due to an unclean exit by the driver for this
412  	 * PCI fn in the previous incarnation. Whoever comes here first
413  	 * should clean it up, no matter which PCI fn.
414  	 */
415  
416  	if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
417  		writel(0, ioc->ioc_regs.ioc_fail_sync);
418  		writel(1, ioc->ioc_regs.ioc_usage_reg);
419  		writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
420  		writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
421  		return BFA_TRUE;
422  	}
423  
424  	return bfa_ioc_ct_sync_complete(ioc);
425  }
426  
427  /*
428   * Synchronized IOC failure processing routines
429   */
430  static void
bfa_ioc_ct_sync_join(struct bfa_ioc_s * ioc)431  bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
432  {
433  	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
434  	uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
435  
436  	writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
437  }
438  
439  static void
bfa_ioc_ct_sync_leave(struct bfa_ioc_s * ioc)440  bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
441  {
442  	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
443  	uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
444  					bfa_ioc_ct_sync_pos(ioc);
445  
446  	writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
447  }
448  
449  static void
bfa_ioc_ct_sync_ack(struct bfa_ioc_s * ioc)450  bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
451  {
452  	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
453  
454  	writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
455  		ioc->ioc_regs.ioc_fail_sync);
456  }
457  
458  static bfa_boolean_t
bfa_ioc_ct_sync_complete(struct bfa_ioc_s * ioc)459  bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
460  {
461  	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
462  	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
463  	uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
464  	uint32_t tmp_ackd;
465  
466  	if (sync_ackd == 0)
467  		return BFA_TRUE;
468  
469  	/*
470  	 * The check below is to see whether any other PCI fn
471  	 * has reinitialized the ASIC (reset sync_ackd bits)
472  	 * and failed again while this IOC was waiting for hw
473  	 * semaphore (in bfa_iocpf_sm_semwait()).
474  	 */
475  	tmp_ackd = sync_ackd;
476  	if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
477  		!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
478  		sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
479  
480  	if (sync_reqd == sync_ackd) {
481  		writel(bfa_ioc_ct_clear_sync_ackd(r32),
482  			ioc->ioc_regs.ioc_fail_sync);
483  		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
484  		writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
485  		return BFA_TRUE;
486  	}
487  
488  	/*
489  	 * If another PCI fn reinitialized and failed again while
490  	 * this IOC was waiting for hw sem, the sync_ackd bit for
491  	 * this IOC need to be set again to allow reinitialization.
492  	 */
493  	if (tmp_ackd != sync_ackd)
494  		writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
495  
496  	return BFA_FALSE;
497  }
498  
499  /*
500   * Called from bfa_ioc_attach() to map asic specific calls.
501   */
502  static void
bfa_ioc_set_ctx_hwif(struct bfa_ioc_s * ioc,struct bfa_ioc_hwif_s * hwif)503  bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
504  {
505  	hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
506  	hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
507  	hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
508  	hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
509  	hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
510  	hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
511  	hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
512  	hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
513  	hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
514  	hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate;
515  	hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate;
516  	hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate;
517  	hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate;
518  }
519  
520  /*
521   * Called from bfa_ioc_attach() to map asic specific calls.
522   */
523  void
bfa_ioc_set_ct_hwif(struct bfa_ioc_s * ioc)524  bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
525  {
526  	bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
527  
528  	hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
529  	hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
530  	hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
531  	hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
532  	ioc->ioc_hwif = &hwif_ct;
533  }
534  
535  /*
536   * Called from bfa_ioc_attach() to map asic specific calls.
537   */
538  void
bfa_ioc_set_ct2_hwif(struct bfa_ioc_s * ioc)539  bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
540  {
541  	bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
542  
543  	hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
544  	hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
545  	hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
546  	hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
547  	hwif_ct2.ioc_isr_mode_set = NULL;
548  	ioc->ioc_hwif = &hwif_ct2;
549  }
550  
551  /*
552   * Workaround for MSI-X resource allocation for catapult-2 with no asic block
553   */
554  #define HOSTFN_MSIX_DEFAULT		64
555  #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR	0x30138
556  #define HOSTFN_MSIX_VT_OFST_NUMVT	0x3013c
557  #define __MSIX_VT_NUMVT__MK		0x003ff800
558  #define __MSIX_VT_NUMVT__SH		11
559  #define __MSIX_VT_NUMVT_(_v)		((_v) << __MSIX_VT_NUMVT__SH)
560  #define __MSIX_VT_OFST_			0x000007ff
561  void
bfa_ioc_ct2_poweron(struct bfa_ioc_s * ioc)562  bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
563  {
564  	void __iomem *rb = ioc->pcidev.pci_bar_kva;
565  	u32	r32;
566  
567  	r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
568  	if (r32 & __MSIX_VT_NUMVT__MK) {
569  		writel(r32 & __MSIX_VT_OFST_,
570  			rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
571  		return;
572  	}
573  
574  	writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
575  		HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
576  		rb + HOSTFN_MSIX_VT_OFST_NUMVT);
577  	writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
578  		rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
579  }
580  
581  bfa_status_t
bfa_ioc_ct_pll_init(void __iomem * rb,enum bfi_asic_mode mode)582  bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
583  {
584  	u32	pll_sclk, pll_fclk, r32;
585  	bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
586  
587  	pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
588  		__APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
589  		__APP_PLL_SCLK_JITLMT0_1(3U) |
590  		__APP_PLL_SCLK_CNTLMT0_1(1U);
591  	pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
592  		__APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
593  		__APP_PLL_LCLK_JITLMT0_1(3U) |
594  		__APP_PLL_LCLK_CNTLMT0_1(1U);
595  
596  	if (fcmode) {
597  		writel(0, (rb + OP_MODE));
598  		writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
599  			 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
600  	} else {
601  		writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
602  		writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
603  	}
604  	writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
605  	writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
606  	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
607  	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
608  	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
609  	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
610  	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
611  	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
612  	writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
613  			rb + APP_PLL_SCLK_CTL_REG);
614  	writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
615  			rb + APP_PLL_LCLK_CTL_REG);
616  	writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
617  		__APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
618  	writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
619  		__APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
620  	readl(rb + HOSTFN0_INT_MSK);
621  	udelay(2000);
622  	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
623  	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
624  	writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
625  	writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
626  
627  	if (!fcmode) {
628  		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
629  		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
630  	}
631  	r32 = readl((rb + PSS_CTL_REG));
632  	r32 &= ~__PSS_LMEM_RESET;
633  	writel(r32, (rb + PSS_CTL_REG));
634  	udelay(1000);
635  	if (!fcmode) {
636  		writel(0, (rb + PMM_1T_RESET_REG_P0));
637  		writel(0, (rb + PMM_1T_RESET_REG_P1));
638  	}
639  
640  	writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
641  	udelay(1000);
642  	r32 = readl((rb + MBIST_STAT_REG));
643  	writel(0, (rb + MBIST_CTL_REG));
644  	return BFA_STATUS_OK;
645  }
646  
647  static void
bfa_ioc_ct2_sclk_init(void __iomem * rb)648  bfa_ioc_ct2_sclk_init(void __iomem *rb)
649  {
650  	u32 r32;
651  
652  	/*
653  	 * put s_clk PLL and PLL FSM in reset
654  	 */
655  	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
656  	r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
657  	r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
658  		__APP_PLL_SCLK_LOGIC_SOFT_RESET);
659  	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
660  
661  	/*
662  	 * Ignore mode and program for the max clock (which is FC16)
663  	 * Firmware/NFC will do the PLL init appropiately
664  	 */
665  	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
666  	r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
667  	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
668  
669  	/*
670  	 * while doing PLL init dont clock gate ethernet subsystem
671  	 */
672  	r32 = readl((rb + CT2_CHIP_MISC_PRG));
673  	writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
674  
675  	r32 = readl((rb + CT2_PCIE_MISC_REG));
676  	writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
677  
678  	/*
679  	 * set sclk value
680  	 */
681  	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
682  	r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
683  		__APP_PLL_SCLK_CLK_DIV2);
684  	writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
685  
686  	/*
687  	 * poll for s_clk lock or delay 1ms
688  	 */
689  	udelay(1000);
690  }
691  
692  static void
bfa_ioc_ct2_lclk_init(void __iomem * rb)693  bfa_ioc_ct2_lclk_init(void __iomem *rb)
694  {
695  	u32 r32;
696  
697  	/*
698  	 * put l_clk PLL and PLL FSM in reset
699  	 */
700  	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
701  	r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
702  	r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
703  		__APP_PLL_LCLK_LOGIC_SOFT_RESET);
704  	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
705  
706  	/*
707  	 * set LPU speed (set for FC16 which will work for other modes)
708  	 */
709  	r32 = readl((rb + CT2_CHIP_MISC_PRG));
710  	writel(r32, (rb + CT2_CHIP_MISC_PRG));
711  
712  	/*
713  	 * set LPU half speed (set for FC16 which will work for other modes)
714  	 */
715  	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
716  	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
717  
718  	/*
719  	 * set lclk for mode (set for FC16)
720  	 */
721  	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
722  	r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
723  	r32 |= 0x20c1731b;
724  	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
725  
726  	/*
727  	 * poll for s_clk lock or delay 1ms
728  	 */
729  	udelay(1000);
730  }
731  
732  static void
bfa_ioc_ct2_mem_init(void __iomem * rb)733  bfa_ioc_ct2_mem_init(void __iomem *rb)
734  {
735  	u32	r32;
736  
737  	r32 = readl((rb + PSS_CTL_REG));
738  	r32 &= ~__PSS_LMEM_RESET;
739  	writel(r32, (rb + PSS_CTL_REG));
740  	udelay(1000);
741  
742  	writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
743  	udelay(1000);
744  	writel(0, (rb + CT2_MBIST_CTL_REG));
745  }
746  
747  static void
bfa_ioc_ct2_mac_reset(void __iomem * rb)748  bfa_ioc_ct2_mac_reset(void __iomem *rb)
749  {
750  	/* put port0, port1 MAC & AHB in reset */
751  	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
752  		rb + CT2_CSI_MAC_CONTROL_REG(0));
753  	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
754  		rb + CT2_CSI_MAC_CONTROL_REG(1));
755  }
756  
757  static void
bfa_ioc_ct2_enable_flash(void __iomem * rb)758  bfa_ioc_ct2_enable_flash(void __iomem *rb)
759  {
760  	u32 r32;
761  
762  	r32 = readl((rb + PSS_GPIO_OUT_REG));
763  	writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
764  	r32 = readl((rb + PSS_GPIO_OE_REG));
765  	writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
766  }
767  
768  #define CT2_NFC_MAX_DELAY	1000
769  #define CT2_NFC_PAUSE_MAX_DELAY 4000
770  #define CT2_NFC_VER_VALID	0x147
771  #define CT2_NFC_STATE_RUNNING   0x20000001
772  #define BFA_IOC_PLL_POLL	1000000
773  
774  static bfa_boolean_t
bfa_ioc_ct2_nfc_halted(void __iomem * rb)775  bfa_ioc_ct2_nfc_halted(void __iomem *rb)
776  {
777  	u32	r32;
778  
779  	r32 = readl(rb + CT2_NFC_CSR_SET_REG);
780  	if (r32 & __NFC_CONTROLLER_HALTED)
781  		return BFA_TRUE;
782  
783  	return BFA_FALSE;
784  }
785  
786  static void
bfa_ioc_ct2_nfc_halt(void __iomem * rb)787  bfa_ioc_ct2_nfc_halt(void __iomem *rb)
788  {
789  	int	i;
790  
791  	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
792  	for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
793  		if (bfa_ioc_ct2_nfc_halted(rb))
794  			break;
795  		udelay(1000);
796  	}
797  	WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
798  }
799  
800  static void
bfa_ioc_ct2_nfc_resume(void __iomem * rb)801  bfa_ioc_ct2_nfc_resume(void __iomem *rb)
802  {
803  	u32	r32;
804  	int i;
805  
806  	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
807  	for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
808  		r32 = readl(rb + CT2_NFC_CSR_SET_REG);
809  		if (!(r32 & __NFC_CONTROLLER_HALTED))
810  			return;
811  		udelay(1000);
812  	}
813  	WARN_ON(1);
814  }
815  
816  static void
bfa_ioc_ct2_clk_reset(void __iomem * rb)817  bfa_ioc_ct2_clk_reset(void __iomem *rb)
818  {
819  	u32 r32;
820  
821  	bfa_ioc_ct2_sclk_init(rb);
822  	bfa_ioc_ct2_lclk_init(rb);
823  
824  	/*
825  	 * release soft reset on s_clk & l_clk
826  	 */
827  	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
828  	writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
829  			(rb + CT2_APP_PLL_SCLK_CTL_REG));
830  
831  	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
832  	writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
833  			(rb + CT2_APP_PLL_LCLK_CTL_REG));
834  
835  }
836  
837  static void
bfa_ioc_ct2_nfc_clk_reset(void __iomem * rb)838  bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
839  {
840  	u32 r32, i;
841  
842  	r32 = readl((rb + PSS_CTL_REG));
843  	r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
844  	writel(r32, (rb + PSS_CTL_REG));
845  
846  	writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
847  
848  	for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
849  		r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
850  
851  		if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
852  			break;
853  	}
854  	WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
855  
856  	for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
857  		r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
858  
859  		if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
860  			break;
861  	}
862  	WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
863  
864  	r32 = readl(rb + CT2_CSI_FW_CTL_REG);
865  	WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
866  }
867  
868  static void
bfa_ioc_ct2_wait_till_nfc_running(void __iomem * rb)869  bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
870  {
871  	u32 r32;
872  	int i;
873  
874  	if (bfa_ioc_ct2_nfc_halted(rb))
875  		bfa_ioc_ct2_nfc_resume(rb);
876  	for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) {
877  		r32 = readl(rb + CT2_NFC_STS_REG);
878  		if (r32 == CT2_NFC_STATE_RUNNING)
879  			return;
880  		udelay(1000);
881  	}
882  
883  	r32 = readl(rb + CT2_NFC_STS_REG);
884  	WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING));
885  }
886  
887  bfa_status_t
bfa_ioc_ct2_pll_init(void __iomem * rb,enum bfi_asic_mode mode)888  bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
889  {
890  	u32 wgn, r32, nfc_ver;
891  
892  	wgn = readl(rb + CT2_WGN_STATUS);
893  
894  	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
895  		/*
896  		 * If flash is corrupted, enable flash explicitly
897  		 */
898  		bfa_ioc_ct2_clk_reset(rb);
899  		bfa_ioc_ct2_enable_flash(rb);
900  
901  		bfa_ioc_ct2_mac_reset(rb);
902  
903  		bfa_ioc_ct2_clk_reset(rb);
904  		bfa_ioc_ct2_enable_flash(rb);
905  
906  	} else {
907  		nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
908  
909  		if ((nfc_ver >= CT2_NFC_VER_VALID) &&
910  		    (wgn == (__A2T_AHB_LOAD | __WGN_READY))) {
911  
912  			bfa_ioc_ct2_wait_till_nfc_running(rb);
913  
914  			bfa_ioc_ct2_nfc_clk_reset(rb);
915  		} else {
916  			bfa_ioc_ct2_nfc_halt(rb);
917  
918  			bfa_ioc_ct2_clk_reset(rb);
919  			bfa_ioc_ct2_mac_reset(rb);
920  			bfa_ioc_ct2_clk_reset(rb);
921  
922  		}
923  	}
924  	/*
925  	* The very first PCIe DMA Read done by LPU fails with a fatal error,
926  	* when Address Translation Cache (ATC) has been enabled by system BIOS.
927  	*
928  	* Workaround:
929  	* Disable Invalidated Tag Match Enable capability by setting the bit 26
930  	* of CHIP_MISC_PRG to 0, by default it is set to 1.
931  	*/
932  	r32 = readl(rb + CT2_CHIP_MISC_PRG);
933  	writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG));
934  
935  	/*
936  	 * Mask the interrupts and clear any
937  	 * pending interrupts left by BIOS/EFI
938  	 */
939  
940  	writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
941  	writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
942  
943  	/* For first time initialization, no need to clear interrupts */
944  	r32 = readl(rb + HOST_SEM5_REG);
945  	if (r32 & 0x1) {
946  		r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
947  		if (r32 == 1) {
948  			writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
949  			readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
950  		}
951  		r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
952  		if (r32 == 1) {
953  			writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
954  			readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
955  		}
956  	}
957  
958  	bfa_ioc_ct2_mem_init(rb);
959  
960  	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
961  	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
962  
963  	return BFA_STATUS_OK;
964  }
965  
966  static void
bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s * ioc,enum bfi_ioc_state fwstate)967  bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
968  		enum bfi_ioc_state fwstate)
969  {
970  	writel(fwstate, ioc->ioc_regs.ioc_fwstate);
971  }
972  
973  static enum bfi_ioc_state
bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s * ioc)974  bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
975  {
976  	return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
977  }
978  
979  static void
bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s * ioc,enum bfi_ioc_state fwstate)980  bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
981  		enum bfi_ioc_state fwstate)
982  {
983  	writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
984  }
985  
986  static enum bfi_ioc_state
bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s * ioc)987  bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
988  {
989  	return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate);
990  }
991