1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 
19 #include "bfa_ioc.h"
20 #include "cna.h"
21 #include "bfi.h"
22 #include "bfi_reg.h"
23 #include "bfa_defs.h"
24 
25 #define bfa_ioc_ct_sync_pos(__ioc)	\
26 		((u32) (1 << bfa_ioc_pcifn(__ioc)))
27 #define BFA_IOC_SYNC_REQD_SH		16
28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32 		(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
33 
34 /*
35  * forward declarations
36  */
37 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
38 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
39 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
40 static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
41 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
42 static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
43 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
44 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
45 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
46 static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
47 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
48 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
49 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
50 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
51 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
52 				enum bfi_asic_mode asic_mode);
53 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
54 				enum bfi_asic_mode asic_mode);
55 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
56 
57 static const struct bfa_ioc_hwif nw_hwif_ct = {
58 	.ioc_pll_init	     = bfa_ioc_ct_pll_init,
59 	.ioc_firmware_lock   = bfa_ioc_ct_firmware_lock,
60 	.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
61 	.ioc_reg_init	     = bfa_ioc_ct_reg_init,
62 	.ioc_map_port	     = bfa_ioc_ct_map_port,
63 	.ioc_isr_mode_set    = bfa_ioc_ct_isr_mode_set,
64 	.ioc_notify_fail     = bfa_ioc_ct_notify_fail,
65 	.ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
66 	.ioc_sync_start      = bfa_ioc_ct_sync_start,
67 	.ioc_sync_join       = bfa_ioc_ct_sync_join,
68 	.ioc_sync_leave	     = bfa_ioc_ct_sync_leave,
69 	.ioc_sync_ack	     = bfa_ioc_ct_sync_ack,
70 	.ioc_sync_complete   = bfa_ioc_ct_sync_complete,
71 };
72 
73 static const struct bfa_ioc_hwif nw_hwif_ct2 = {
74 	.ioc_pll_init	     = bfa_ioc_ct2_pll_init,
75 	.ioc_firmware_lock   = bfa_ioc_ct_firmware_lock,
76 	.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
77 	.ioc_reg_init	     = bfa_ioc_ct2_reg_init,
78 	.ioc_map_port	     = bfa_ioc_ct2_map_port,
79 	.ioc_lpu_read_stat   = bfa_ioc_ct2_lpu_read_stat,
80 	.ioc_isr_mode_set    = NULL,
81 	.ioc_notify_fail     = bfa_ioc_ct_notify_fail,
82 	.ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
83 	.ioc_sync_start      = bfa_ioc_ct_sync_start,
84 	.ioc_sync_join       = bfa_ioc_ct_sync_join,
85 	.ioc_sync_leave	     = bfa_ioc_ct_sync_leave,
86 	.ioc_sync_ack	     = bfa_ioc_ct_sync_ack,
87 	.ioc_sync_complete   = bfa_ioc_ct_sync_complete,
88 };
89 
90 /* Called from bfa_ioc_attach() to map asic specific calls. */
91 void
92 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
93 {
94 	ioc->ioc_hwif = &nw_hwif_ct;
95 }
96 
97 void
98 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
99 {
100 	ioc->ioc_hwif = &nw_hwif_ct2;
101 }
102 
103 /* Return true if firmware of current driver matches the running firmware. */
104 static bool
105 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
106 {
107 	enum bfi_ioc_state ioc_fwstate;
108 	u32 usecnt;
109 	struct bfi_ioc_image_hdr fwhdr;
110 
111 	/**
112 	 * If bios boot (flash based) -- do not increment usage count
113 	 */
114 	if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
115 						BFA_IOC_FWIMG_MINSZ)
116 		return true;
117 
118 	bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
119 	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
120 
121 	/**
122 	 * If usage count is 0, always return TRUE.
123 	 */
124 	if (usecnt == 0) {
125 		writel(1, ioc->ioc_regs.ioc_usage_reg);
126 		bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
127 		writel(0, ioc->ioc_regs.ioc_fail_sync);
128 		return true;
129 	}
130 
131 	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
132 
133 	/**
134 	 * Use count cannot be non-zero and chip in uninitialized state.
135 	 */
136 	BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
137 
138 	/**
139 	 * Check if another driver with a different firmware is active
140 	 */
141 	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
142 	if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
143 		bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
144 		return false;
145 	}
146 
147 	/**
148 	 * Same firmware version. Increment the reference count.
149 	 */
150 	usecnt++;
151 	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
152 	bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
153 	return true;
154 }
155 
156 static void
157 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
158 {
159 	u32 usecnt;
160 
161 	/**
162 	 * If bios boot (flash based) -- do not decrement usage count
163 	 */
164 	if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
165 						BFA_IOC_FWIMG_MINSZ)
166 		return;
167 
168 	/**
169 	 * decrement usage count
170 	 */
171 	bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
172 	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
173 	BUG_ON(!(usecnt > 0));
174 
175 	usecnt--;
176 	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
177 
178 	bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
179 }
180 
181 /* Notify other functions on HB failure. */
182 static void
183 bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
184 {
185 	writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
186 	writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
187 	/* Wait for halt to take effect */
188 	readl(ioc->ioc_regs.ll_halt);
189 	readl(ioc->ioc_regs.alt_ll_halt);
190 }
191 
192 /* Host to LPU mailbox message addresses */
193 static const struct {
194 	u32	hfn_mbox;
195 	u32	lpu_mbox;
196 	u32	hfn_pgn;
197 } ct_fnreg[] = {
198 	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
199 	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
200 	{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
201 	{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
202 };
203 
204 /* Host <-> LPU mailbox command/status registers - port 0 */
205 static const struct {
206 	u32	hfn;
207 	u32	lpu;
208 } ct_p0reg[] = {
209 	{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
210 	{ HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
211 	{ HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
212 	{ HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
213 };
214 
215 /* Host <-> LPU mailbox command/status registers - port 1 */
216 static const struct {
217 	u32	hfn;
218 	u32	lpu;
219 } ct_p1reg[] = {
220 	{ HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
221 	{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
222 	{ HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
223 	{ HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
224 };
225 
226 static const struct {
227 	u32	hfn_mbox;
228 	u32	lpu_mbox;
229 	u32	hfn_pgn;
230 	u32	hfn;
231 	u32	lpu;
232 	u32	lpu_read;
233 } ct2_reg[] = {
234 	{ CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
235 	  CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
236 	  CT2_HOSTFN_LPU0_READ_STAT},
237 	{ CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
238 	  CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
239 	  CT2_HOSTFN_LPU1_READ_STAT},
240 };
241 
242 static void
243 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
244 {
245 	void __iomem *rb;
246 	int		pcifn = bfa_ioc_pcifn(ioc);
247 
248 	rb = bfa_ioc_bar0(ioc);
249 
250 	ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
251 	ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
252 	ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
253 
254 	if (ioc->port_id == 0) {
255 		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
256 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
257 		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
258 		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
259 		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
260 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
261 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
262 	} else {
263 		ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
264 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
265 		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
266 		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
267 		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
268 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
269 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
270 	}
271 
272 	/*
273 	 * PSS control registers
274 	 */
275 	ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
276 	ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
277 	ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
278 	ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
279 
280 	/*
281 	 * IOC semaphore registers and serialization
282 	 */
283 	ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
284 	ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
285 	ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
286 	ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
287 	ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
288 
289 	/**
290 	 * sram memory access
291 	 */
292 	ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
293 	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
294 
295 	/*
296 	 * err set reg : for notification of hb failure in fcmode
297 	 */
298 	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
299 }
300 
301 static void
302 bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
303 {
304 	void __iomem *rb;
305 	int		port = bfa_ioc_portid(ioc);
306 
307 	rb = bfa_ioc_bar0(ioc);
308 
309 	ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
310 	ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
311 	ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
312 	ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
313 	ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
314 	ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
315 
316 	if (port == 0) {
317 		ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
318 		ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
319 		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
320 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
321 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
322 	} else {
323 		ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
324 		ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
325 		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
326 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
327 		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
328 	}
329 
330 	/*
331 	 * PSS control registers
332 	 */
333 	ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
334 	ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
335 	ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
336 	ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
337 
338 	/*
339 	 * IOC semaphore registers and serialization
340 	 */
341 	ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
342 	ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
343 	ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
344 	ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
345 	ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
346 
347 	/**
348 	 * sram memory access
349 	 */
350 	ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
351 	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
352 
353 	/*
354 	 * err set reg : for notification of hb failure in fcmode
355 	 */
356 	ioc->ioc_regs.err_set = rb + ERR_SET_REG;
357 }
358 
359 /* Initialize IOC to port mapping. */
360 
361 #define FNC_PERS_FN_SHIFT(__fn)	((__fn) * 8)
362 static void
363 bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
364 {
365 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
366 	u32	r32;
367 
368 	/**
369 	 * For catapult, base port id on personality register and IOC type
370 	 */
371 	r32 = readl(rb + FNC_PERS_REG);
372 	r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
373 	ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
374 
375 }
376 
377 static void
378 bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
379 {
380 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
381 	u32	r32;
382 
383 	r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
384 	ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
385 }
386 
387 /* Set interrupt mode for a function: INTX or MSIX */
388 static void
389 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
390 {
391 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
392 	u32	r32, mode;
393 
394 	r32 = readl(rb + FNC_PERS_REG);
395 
396 	mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
397 		__F0_INTX_STATUS;
398 
399 	/**
400 	 * If already in desired mode, do not change anything
401 	 */
402 	if ((!msix && mode) || (msix && !mode))
403 		return;
404 
405 	if (msix)
406 		mode = __F0_INTX_STATUS_MSIX;
407 	else
408 		mode = __F0_INTX_STATUS_INTA;
409 
410 	r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
411 	r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
412 
413 	writel(r32, rb + FNC_PERS_REG);
414 }
415 
416 static bool
417 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
418 {
419 	u32 r32;
420 
421 	r32 = readl(ioc->ioc_regs.lpu_read_stat);
422 	if (r32) {
423 		writel(1, ioc->ioc_regs.lpu_read_stat);
424 		return true;
425 	}
426 
427 	return false;
428 }
429 
430 /* MSI-X resource allocation for 1860 with no asic block */
431 #define HOSTFN_MSIX_DEFAULT		64
432 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR	0x30138
433 #define HOSTFN_MSIX_VT_OFST_NUMVT	0x3013c
434 #define __MSIX_VT_NUMVT__MK		0x003ff800
435 #define __MSIX_VT_NUMVT__SH		11
436 #define __MSIX_VT_NUMVT_(_v)		((_v) << __MSIX_VT_NUMVT__SH)
437 #define __MSIX_VT_OFST_			0x000007ff
438 void
439 bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
440 {
441 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
442 	u32 r32;
443 
444 	r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
445 	if (r32 & __MSIX_VT_NUMVT__MK) {
446 		writel(r32 & __MSIX_VT_OFST_,
447 			rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
448 		return;
449 	}
450 
451 	writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
452 			HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
453 			rb + HOSTFN_MSIX_VT_OFST_NUMVT);
454 	writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
455 			rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
456 }
457 
458 /* Cleanup hw semaphore and usecnt registers */
459 static void
460 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
461 {
462 	bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
463 	writel(0, ioc->ioc_regs.ioc_usage_reg);
464 	bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
465 
466 	/*
467 	 * Read the hw sem reg to make sure that it is locked
468 	 * before we clear it. If it is not locked, writing 1
469 	 * will lock it instead of clearing it.
470 	 */
471 	readl(ioc->ioc_regs.ioc_sem_reg);
472 	bfa_nw_ioc_hw_sem_release(ioc);
473 }
474 
475 /* Synchronized IOC failure processing routines */
476 static bool
477 bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
478 {
479 	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
480 	u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
481 
482 	/*
483 	 * Driver load time.  If the sync required bit for this PCI fn
484 	 * is set, it is due to an unclean exit by the driver for this
485 	 * PCI fn in the previous incarnation. Whoever comes here first
486 	 * should clean it up, no matter which PCI fn.
487 	 */
488 
489 	if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
490 		writel(0, ioc->ioc_regs.ioc_fail_sync);
491 		writel(1, ioc->ioc_regs.ioc_usage_reg);
492 		writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
493 		writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
494 		return true;
495 	}
496 
497 	return bfa_ioc_ct_sync_complete(ioc);
498 }
499 /* Synchronized IOC failure processing routines */
500 static void
501 bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
502 {
503 	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
504 	u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
505 
506 	writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
507 }
508 
509 static void
510 bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
511 {
512 	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
513 	u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
514 					bfa_ioc_ct_sync_pos(ioc);
515 
516 	writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
517 }
518 
519 static void
520 bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
521 {
522 	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
523 
524 	writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
525 }
526 
527 static bool
528 bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
529 {
530 	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
531 	u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
532 	u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
533 	u32 tmp_ackd;
534 
535 	if (sync_ackd == 0)
536 		return true;
537 
538 	/**
539 	 * The check below is to see whether any other PCI fn
540 	 * has reinitialized the ASIC (reset sync_ackd bits)
541 	 * and failed again while this IOC was waiting for hw
542 	 * semaphore (in bfa_iocpf_sm_semwait()).
543 	 */
544 	tmp_ackd = sync_ackd;
545 	if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
546 			!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
547 		sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
548 
549 	if (sync_reqd == sync_ackd) {
550 		writel(bfa_ioc_ct_clear_sync_ackd(r32),
551 				ioc->ioc_regs.ioc_fail_sync);
552 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
553 		writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
554 		return true;
555 	}
556 
557 	/**
558 	 * If another PCI fn reinitialized and failed again while
559 	 * this IOC was waiting for hw sem, the sync_ackd bit for
560 	 * this IOC need to be set again to allow reinitialization.
561 	 */
562 	if (tmp_ackd != sync_ackd)
563 		writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
564 
565 	return false;
566 }
567 
568 static enum bfa_status
569 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
570 {
571 	u32	pll_sclk, pll_fclk, r32;
572 	bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
573 
574 	pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
575 		__APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
576 		__APP_PLL_SCLK_JITLMT0_1(3U) |
577 		__APP_PLL_SCLK_CNTLMT0_1(1U);
578 	pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
579 		__APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
580 		__APP_PLL_LCLK_JITLMT0_1(3U) |
581 		__APP_PLL_LCLK_CNTLMT0_1(1U);
582 
583 	if (fcmode) {
584 		writel(0, (rb + OP_MODE));
585 		writel(__APP_EMS_CMLCKSEL |
586 				__APP_EMS_REFCKBUFEN2 |
587 				__APP_EMS_CHANNEL_SEL,
588 				(rb + ETH_MAC_SER_REG));
589 	} else {
590 		writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
591 		writel(__APP_EMS_REFCKBUFEN1,
592 				(rb + ETH_MAC_SER_REG));
593 	}
594 	writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
595 	writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
596 	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
597 	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
598 	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
599 	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
600 	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
601 	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
602 	writel(pll_sclk |
603 		__APP_PLL_SCLK_LOGIC_SOFT_RESET,
604 		rb + APP_PLL_SCLK_CTL_REG);
605 	writel(pll_fclk |
606 		__APP_PLL_LCLK_LOGIC_SOFT_RESET,
607 		rb + APP_PLL_LCLK_CTL_REG);
608 	writel(pll_sclk |
609 		__APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
610 		rb + APP_PLL_SCLK_CTL_REG);
611 	writel(pll_fclk |
612 		__APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
613 		rb + APP_PLL_LCLK_CTL_REG);
614 	readl(rb + HOSTFN0_INT_MSK);
615 	udelay(2000);
616 	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
617 	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
618 	writel(pll_sclk |
619 		__APP_PLL_SCLK_ENABLE,
620 		rb + APP_PLL_SCLK_CTL_REG);
621 	writel(pll_fclk |
622 		__APP_PLL_LCLK_ENABLE,
623 		rb + APP_PLL_LCLK_CTL_REG);
624 
625 	if (!fcmode) {
626 		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
627 		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
628 	}
629 	r32 = readl((rb + PSS_CTL_REG));
630 	r32 &= ~__PSS_LMEM_RESET;
631 	writel(r32, (rb + PSS_CTL_REG));
632 	udelay(1000);
633 	if (!fcmode) {
634 		writel(0, (rb + PMM_1T_RESET_REG_P0));
635 		writel(0, (rb + PMM_1T_RESET_REG_P1));
636 	}
637 
638 	writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
639 	udelay(1000);
640 	r32 = readl((rb + MBIST_STAT_REG));
641 	writel(0, (rb + MBIST_CTL_REG));
642 	return BFA_STATUS_OK;
643 }
644 
645 static void
646 bfa_ioc_ct2_sclk_init(void __iomem *rb)
647 {
648 	u32 r32;
649 
650 	/*
651 	 * put s_clk PLL and PLL FSM in reset
652 	 */
653 	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
654 	r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
655 	r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
656 		__APP_PLL_SCLK_LOGIC_SOFT_RESET);
657 	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
658 
659 	/*
660 	 * Ignore mode and program for the max clock (which is FC16)
661 	 * Firmware/NFC will do the PLL init appropiately
662 	 */
663 	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
664 	r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
665 	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
666 
667 	/*
668 	 * while doing PLL init dont clock gate ethernet subsystem
669 	 */
670 	r32 = readl((rb + CT2_CHIP_MISC_PRG));
671 	writel((r32 | __ETH_CLK_ENABLE_PORT0),
672 				(rb + CT2_CHIP_MISC_PRG));
673 
674 	r32 = readl((rb + CT2_PCIE_MISC_REG));
675 	writel((r32 | __ETH_CLK_ENABLE_PORT1),
676 				(rb + CT2_PCIE_MISC_REG));
677 
678 	/*
679 	 * set sclk value
680 	 */
681 	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
682 	r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
683 		__APP_PLL_SCLK_CLK_DIV2);
684 	writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
685 
686 	/*
687 	 * poll for s_clk lock or delay 1ms
688 	 */
689 	udelay(1000);
690 
691 	/*
692 	 * Dont do clock gating for ethernet subsystem, firmware/NFC will
693 	 * do this appropriately
694 	 */
695 }
696 
697 static void
698 bfa_ioc_ct2_lclk_init(void __iomem *rb)
699 {
700 	u32 r32;
701 
702 	/*
703 	 * put l_clk PLL and PLL FSM in reset
704 	 */
705 	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
706 	r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
707 	r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
708 		__APP_PLL_LCLK_LOGIC_SOFT_RESET);
709 	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
710 
711 	/*
712 	 * set LPU speed (set for FC16 which will work for other modes)
713 	 */
714 	r32 = readl((rb + CT2_CHIP_MISC_PRG));
715 	writel(r32, (rb + CT2_CHIP_MISC_PRG));
716 
717 	/*
718 	 * set LPU half speed (set for FC16 which will work for other modes)
719 	 */
720 	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
721 	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
722 
723 	/*
724 	 * set lclk for mode (set for FC16)
725 	 */
726 	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
727 	r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
728 	r32 |= 0x20c1731b;
729 	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
730 
731 	/*
732 	 * poll for s_clk lock or delay 1ms
733 	 */
734 	udelay(1000);
735 }
736 
737 static void
738 bfa_ioc_ct2_mem_init(void __iomem *rb)
739 {
740 	u32 r32;
741 
742 	r32 = readl((rb + PSS_CTL_REG));
743 	r32 &= ~__PSS_LMEM_RESET;
744 	writel(r32, (rb + PSS_CTL_REG));
745 	udelay(1000);
746 
747 	writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
748 	udelay(1000);
749 	writel(0, (rb + CT2_MBIST_CTL_REG));
750 }
751 
752 static void
753 bfa_ioc_ct2_mac_reset(void __iomem *rb)
754 {
755 	volatile u32 r32;
756 
757 	bfa_ioc_ct2_sclk_init(rb);
758 	bfa_ioc_ct2_lclk_init(rb);
759 
760 	/*
761 	 * release soft reset on s_clk & l_clk
762 	 */
763 	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
764 	writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
765 			(rb + CT2_APP_PLL_SCLK_CTL_REG));
766 
767 	/*
768 	 * release soft reset on s_clk & l_clk
769 	 */
770 	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
771 	writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
772 			(rb + CT2_APP_PLL_LCLK_CTL_REG));
773 
774 	/* put port0, port1 MAC & AHB in reset */
775 	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
776 			(rb + CT2_CSI_MAC_CONTROL_REG(0)));
777 	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
778 			(rb + CT2_CSI_MAC_CONTROL_REG(1)));
779 }
780 
781 #define CT2_NFC_MAX_DELAY       1000
782 #define CT2_NFC_VER_VALID       0x143
783 #define BFA_IOC_PLL_POLL        1000000
784 
785 static bool
786 bfa_ioc_ct2_nfc_halted(void __iomem *rb)
787 {
788 	volatile u32 r32;
789 
790 	r32 = readl(rb + CT2_NFC_CSR_SET_REG);
791 	if (r32 & __NFC_CONTROLLER_HALTED)
792 		return true;
793 
794 	return false;
795 }
796 
797 static void
798 bfa_ioc_ct2_nfc_resume(void __iomem *rb)
799 {
800 	volatile u32 r32;
801 	int i;
802 
803 	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
804 	for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
805 		r32 = readl(rb + CT2_NFC_CSR_SET_REG);
806 		if (!(r32 & __NFC_CONTROLLER_HALTED))
807 			return;
808 		udelay(1000);
809 	}
810 	BUG_ON(1);
811 }
812 
813 static enum bfa_status
814 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
815 {
816 	volatile u32 wgn, r32;
817 	u32 nfc_ver, i;
818 
819 	wgn = readl(rb + CT2_WGN_STATUS);
820 
821 	nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
822 
823 	if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
824 		(nfc_ver >= CT2_NFC_VER_VALID)) {
825 		if (bfa_ioc_ct2_nfc_halted(rb))
826 			bfa_ioc_ct2_nfc_resume(rb);
827 		writel(__RESET_AND_START_SCLK_LCLK_PLLS,
828 				rb + CT2_CSI_FW_CTL_SET_REG);
829 
830 		for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
831 			r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
832 			if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
833 				break;
834 		}
835 		BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
836 
837 		for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
838 			r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
839 			if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
840 				break;
841 		}
842 		BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
843 		udelay(1000);
844 
845 		r32 = readl(rb + CT2_CSI_FW_CTL_REG);
846 		BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
847 	} else {
848 		writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
849 		for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
850 			r32 = readl(rb + CT2_NFC_CSR_SET_REG);
851 			if (r32 & __NFC_CONTROLLER_HALTED)
852 				break;
853 			udelay(1000);
854 		}
855 
856 		bfa_ioc_ct2_mac_reset(rb);
857 		bfa_ioc_ct2_sclk_init(rb);
858 		bfa_ioc_ct2_lclk_init(rb);
859 
860 		/* release soft reset on s_clk & l_clk */
861 		r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
862 		writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
863 				rb + CT2_APP_PLL_SCLK_CTL_REG);
864 		r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
865 		writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
866 				rb + CT2_APP_PLL_LCLK_CTL_REG);
867 	}
868 
869 	/* Announce flash device presence, if flash was corrupted. */
870 	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
871 		r32 = readl((rb + PSS_GPIO_OUT_REG));
872 		writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
873 		r32 = readl((rb + PSS_GPIO_OE_REG));
874 		writel(r32 | 1, rb + PSS_GPIO_OE_REG);
875 	}
876 
877 	/*
878 	 * Mask the interrupts and clear any
879 	 * pending interrupts left by BIOS/EFI
880 	 */
881 	writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
882 	writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
883 
884 	/* For first time initialization, no need to clear interrupts */
885 	r32 = readl(rb + HOST_SEM5_REG);
886 	if (r32 & 0x1) {
887 		r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
888 		if (r32 == 1) {
889 			writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
890 			readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
891 		}
892 		r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
893 		if (r32 == 1) {
894 			writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
895 			readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
896 		}
897 	}
898 
899 	bfa_ioc_ct2_mem_init(rb);
900 
901 	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
902 	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
903 	return BFA_STATUS_OK;
904 }
905