xref: /openbmc/linux/drivers/scsi/csiostor/csio_init.c (revision a3667aaed5698b84bad2f1b3f71adc86499f4bc6)
1*a3667aaeSNaresh Kumar Inna /*
2*a3667aaeSNaresh Kumar Inna  * This file is part of the Chelsio FCoE driver for Linux.
3*a3667aaeSNaresh Kumar Inna  *
4*a3667aaeSNaresh Kumar Inna  * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5*a3667aaeSNaresh Kumar Inna  *
6*a3667aaeSNaresh Kumar Inna  * This software is available to you under a choice of one of two
7*a3667aaeSNaresh Kumar Inna  * licenses.  You may choose to be licensed under the terms of the GNU
8*a3667aaeSNaresh Kumar Inna  * General Public License (GPL) Version 2, available from the file
9*a3667aaeSNaresh Kumar Inna  * COPYING in the main directory of this source tree, or the
10*a3667aaeSNaresh Kumar Inna  * OpenIB.org BSD license below:
11*a3667aaeSNaresh Kumar Inna  *
12*a3667aaeSNaresh Kumar Inna  *     Redistribution and use in source and binary forms, with or
13*a3667aaeSNaresh Kumar Inna  *     without modification, are permitted provided that the following
14*a3667aaeSNaresh Kumar Inna  *     conditions are met:
15*a3667aaeSNaresh Kumar Inna  *
16*a3667aaeSNaresh Kumar Inna  *      - Redistributions of source code must retain the above
17*a3667aaeSNaresh Kumar Inna  *        copyright notice, this list of conditions and the following
18*a3667aaeSNaresh Kumar Inna  *        disclaimer.
19*a3667aaeSNaresh Kumar Inna  *
20*a3667aaeSNaresh Kumar Inna  *      - Redistributions in binary form must reproduce the above
21*a3667aaeSNaresh Kumar Inna  *        copyright notice, this list of conditions and the following
22*a3667aaeSNaresh Kumar Inna  *        disclaimer in the documentation and/or other materials
23*a3667aaeSNaresh Kumar Inna  *        provided with the distribution.
24*a3667aaeSNaresh Kumar Inna  *
25*a3667aaeSNaresh Kumar Inna  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*a3667aaeSNaresh Kumar Inna  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*a3667aaeSNaresh Kumar Inna  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*a3667aaeSNaresh Kumar Inna  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*a3667aaeSNaresh Kumar Inna  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*a3667aaeSNaresh Kumar Inna  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*a3667aaeSNaresh Kumar Inna  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*a3667aaeSNaresh Kumar Inna  * SOFTWARE.
33*a3667aaeSNaresh Kumar Inna  */
34*a3667aaeSNaresh Kumar Inna 
35*a3667aaeSNaresh Kumar Inna #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36*a3667aaeSNaresh Kumar Inna 
37*a3667aaeSNaresh Kumar Inna #include <linux/kernel.h>
38*a3667aaeSNaresh Kumar Inna #include <linux/module.h>
39*a3667aaeSNaresh Kumar Inna #include <linux/init.h>
40*a3667aaeSNaresh Kumar Inna #include <linux/pci.h>
41*a3667aaeSNaresh Kumar Inna #include <linux/aer.h>
42*a3667aaeSNaresh Kumar Inna #include <linux/mm.h>
43*a3667aaeSNaresh Kumar Inna #include <linux/notifier.h>
44*a3667aaeSNaresh Kumar Inna #include <linux/kdebug.h>
45*a3667aaeSNaresh Kumar Inna #include <linux/seq_file.h>
46*a3667aaeSNaresh Kumar Inna #include <linux/debugfs.h>
47*a3667aaeSNaresh Kumar Inna #include <linux/string.h>
48*a3667aaeSNaresh Kumar Inna #include <linux/export.h>
49*a3667aaeSNaresh Kumar Inna 
50*a3667aaeSNaresh Kumar Inna #include "csio_init.h"
51*a3667aaeSNaresh Kumar Inna #include "csio_defs.h"
52*a3667aaeSNaresh Kumar Inna 
53*a3667aaeSNaresh Kumar Inna #define CSIO_MIN_MEMPOOL_SZ	64
54*a3667aaeSNaresh Kumar Inna 
55*a3667aaeSNaresh Kumar Inna static struct dentry *csio_debugfs_root;
56*a3667aaeSNaresh Kumar Inna 
57*a3667aaeSNaresh Kumar Inna static struct scsi_transport_template *csio_fcoe_transport;
58*a3667aaeSNaresh Kumar Inna static struct scsi_transport_template *csio_fcoe_transport_vport;
59*a3667aaeSNaresh Kumar Inna 
60*a3667aaeSNaresh Kumar Inna /*
61*a3667aaeSNaresh Kumar Inna  * debugfs support
62*a3667aaeSNaresh Kumar Inna  */
63*a3667aaeSNaresh Kumar Inna static int
64*a3667aaeSNaresh Kumar Inna csio_mem_open(struct inode *inode, struct file *file)
65*a3667aaeSNaresh Kumar Inna {
66*a3667aaeSNaresh Kumar Inna 	file->private_data = inode->i_private;
67*a3667aaeSNaresh Kumar Inna 	return 0;
68*a3667aaeSNaresh Kumar Inna }
69*a3667aaeSNaresh Kumar Inna 
70*a3667aaeSNaresh Kumar Inna static ssize_t
71*a3667aaeSNaresh Kumar Inna csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
72*a3667aaeSNaresh Kumar Inna {
73*a3667aaeSNaresh Kumar Inna 	loff_t pos = *ppos;
74*a3667aaeSNaresh Kumar Inna 	loff_t avail = file->f_path.dentry->d_inode->i_size;
75*a3667aaeSNaresh Kumar Inna 	unsigned int mem = (uintptr_t)file->private_data & 3;
76*a3667aaeSNaresh Kumar Inna 	struct csio_hw *hw = file->private_data - mem;
77*a3667aaeSNaresh Kumar Inna 
78*a3667aaeSNaresh Kumar Inna 	if (pos < 0)
79*a3667aaeSNaresh Kumar Inna 		return -EINVAL;
80*a3667aaeSNaresh Kumar Inna 	if (pos >= avail)
81*a3667aaeSNaresh Kumar Inna 		return 0;
82*a3667aaeSNaresh Kumar Inna 	if (count > avail - pos)
83*a3667aaeSNaresh Kumar Inna 		count = avail - pos;
84*a3667aaeSNaresh Kumar Inna 
85*a3667aaeSNaresh Kumar Inna 	while (count) {
86*a3667aaeSNaresh Kumar Inna 		size_t len;
87*a3667aaeSNaresh Kumar Inna 		int ret, ofst;
88*a3667aaeSNaresh Kumar Inna 		__be32 data[16];
89*a3667aaeSNaresh Kumar Inna 
90*a3667aaeSNaresh Kumar Inna 		if (mem == MEM_MC)
91*a3667aaeSNaresh Kumar Inna 			ret = csio_hw_mc_read(hw, pos, data, NULL);
92*a3667aaeSNaresh Kumar Inna 		else
93*a3667aaeSNaresh Kumar Inna 			ret = csio_hw_edc_read(hw, mem, pos, data, NULL);
94*a3667aaeSNaresh Kumar Inna 		if (ret)
95*a3667aaeSNaresh Kumar Inna 			return ret;
96*a3667aaeSNaresh Kumar Inna 
97*a3667aaeSNaresh Kumar Inna 		ofst = pos % sizeof(data);
98*a3667aaeSNaresh Kumar Inna 		len = min(count, sizeof(data) - ofst);
99*a3667aaeSNaresh Kumar Inna 		if (copy_to_user(buf, (u8 *)data + ofst, len))
100*a3667aaeSNaresh Kumar Inna 			return -EFAULT;
101*a3667aaeSNaresh Kumar Inna 
102*a3667aaeSNaresh Kumar Inna 		buf += len;
103*a3667aaeSNaresh Kumar Inna 		pos += len;
104*a3667aaeSNaresh Kumar Inna 		count -= len;
105*a3667aaeSNaresh Kumar Inna 	}
106*a3667aaeSNaresh Kumar Inna 	count = pos - *ppos;
107*a3667aaeSNaresh Kumar Inna 	*ppos = pos;
108*a3667aaeSNaresh Kumar Inna 	return count;
109*a3667aaeSNaresh Kumar Inna }
110*a3667aaeSNaresh Kumar Inna 
111*a3667aaeSNaresh Kumar Inna static const struct file_operations csio_mem_debugfs_fops = {
112*a3667aaeSNaresh Kumar Inna 	.owner   = THIS_MODULE,
113*a3667aaeSNaresh Kumar Inna 	.open    = csio_mem_open,
114*a3667aaeSNaresh Kumar Inna 	.read    = csio_mem_read,
115*a3667aaeSNaresh Kumar Inna 	.llseek  = default_llseek,
116*a3667aaeSNaresh Kumar Inna };
117*a3667aaeSNaresh Kumar Inna 
118*a3667aaeSNaresh Kumar Inna static void __devinit
119*a3667aaeSNaresh Kumar Inna csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
120*a3667aaeSNaresh Kumar Inna 		     unsigned int idx, unsigned int size_mb)
121*a3667aaeSNaresh Kumar Inna {
122*a3667aaeSNaresh Kumar Inna 	struct dentry *de;
123*a3667aaeSNaresh Kumar Inna 
124*a3667aaeSNaresh Kumar Inna 	de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root,
125*a3667aaeSNaresh Kumar Inna 				 (void *)hw + idx, &csio_mem_debugfs_fops);
126*a3667aaeSNaresh Kumar Inna 	if (de && de->d_inode)
127*a3667aaeSNaresh Kumar Inna 		de->d_inode->i_size = size_mb << 20;
128*a3667aaeSNaresh Kumar Inna }
129*a3667aaeSNaresh Kumar Inna 
130*a3667aaeSNaresh Kumar Inna static int __devinit
131*a3667aaeSNaresh Kumar Inna csio_setup_debugfs(struct csio_hw *hw)
132*a3667aaeSNaresh Kumar Inna {
133*a3667aaeSNaresh Kumar Inna 	int i;
134*a3667aaeSNaresh Kumar Inna 
135*a3667aaeSNaresh Kumar Inna 	if (IS_ERR_OR_NULL(hw->debugfs_root))
136*a3667aaeSNaresh Kumar Inna 		return -1;
137*a3667aaeSNaresh Kumar Inna 
138*a3667aaeSNaresh Kumar Inna 	i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
139*a3667aaeSNaresh Kumar Inna 	if (i & EDRAM0_ENABLE)
140*a3667aaeSNaresh Kumar Inna 		csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
141*a3667aaeSNaresh Kumar Inna 	if (i & EDRAM1_ENABLE)
142*a3667aaeSNaresh Kumar Inna 		csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
143*a3667aaeSNaresh Kumar Inna 	if (i & EXT_MEM_ENABLE)
144*a3667aaeSNaresh Kumar Inna 		csio_add_debugfs_mem(hw, "mc", MEM_MC,
145*a3667aaeSNaresh Kumar Inna 		      EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR)));
146*a3667aaeSNaresh Kumar Inna 	return 0;
147*a3667aaeSNaresh Kumar Inna }
148*a3667aaeSNaresh Kumar Inna 
149*a3667aaeSNaresh Kumar Inna /*
150*a3667aaeSNaresh Kumar Inna  * csio_dfs_create - Creates and sets up per-hw debugfs.
151*a3667aaeSNaresh Kumar Inna  *
152*a3667aaeSNaresh Kumar Inna  */
153*a3667aaeSNaresh Kumar Inna static int
154*a3667aaeSNaresh Kumar Inna csio_dfs_create(struct csio_hw *hw)
155*a3667aaeSNaresh Kumar Inna {
156*a3667aaeSNaresh Kumar Inna 	if (csio_debugfs_root) {
157*a3667aaeSNaresh Kumar Inna 		hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev),
158*a3667aaeSNaresh Kumar Inna 							csio_debugfs_root);
159*a3667aaeSNaresh Kumar Inna 		csio_setup_debugfs(hw);
160*a3667aaeSNaresh Kumar Inna 	}
161*a3667aaeSNaresh Kumar Inna 
162*a3667aaeSNaresh Kumar Inna 	return 0;
163*a3667aaeSNaresh Kumar Inna }
164*a3667aaeSNaresh Kumar Inna 
165*a3667aaeSNaresh Kumar Inna /*
166*a3667aaeSNaresh Kumar Inna  * csio_dfs_destroy - Destroys per-hw debugfs.
167*a3667aaeSNaresh Kumar Inna  */
168*a3667aaeSNaresh Kumar Inna static int
169*a3667aaeSNaresh Kumar Inna csio_dfs_destroy(struct csio_hw *hw)
170*a3667aaeSNaresh Kumar Inna {
171*a3667aaeSNaresh Kumar Inna 	if (hw->debugfs_root)
172*a3667aaeSNaresh Kumar Inna 		debugfs_remove_recursive(hw->debugfs_root);
173*a3667aaeSNaresh Kumar Inna 
174*a3667aaeSNaresh Kumar Inna 	return 0;
175*a3667aaeSNaresh Kumar Inna }
176*a3667aaeSNaresh Kumar Inna 
177*a3667aaeSNaresh Kumar Inna /*
178*a3667aaeSNaresh Kumar Inna  * csio_dfs_init - Debug filesystem initialization for the module.
179*a3667aaeSNaresh Kumar Inna  *
180*a3667aaeSNaresh Kumar Inna  */
181*a3667aaeSNaresh Kumar Inna static int
182*a3667aaeSNaresh Kumar Inna csio_dfs_init(void)
183*a3667aaeSNaresh Kumar Inna {
184*a3667aaeSNaresh Kumar Inna 	csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
185*a3667aaeSNaresh Kumar Inna 	if (!csio_debugfs_root)
186*a3667aaeSNaresh Kumar Inna 		pr_warn("Could not create debugfs entry, continuing\n");
187*a3667aaeSNaresh Kumar Inna 
188*a3667aaeSNaresh Kumar Inna 	return 0;
189*a3667aaeSNaresh Kumar Inna }
190*a3667aaeSNaresh Kumar Inna 
191*a3667aaeSNaresh Kumar Inna /*
192*a3667aaeSNaresh Kumar Inna  * csio_dfs_exit - debugfs cleanup for the module.
193*a3667aaeSNaresh Kumar Inna  */
194*a3667aaeSNaresh Kumar Inna static void
195*a3667aaeSNaresh Kumar Inna csio_dfs_exit(void)
196*a3667aaeSNaresh Kumar Inna {
197*a3667aaeSNaresh Kumar Inna 	debugfs_remove(csio_debugfs_root);
198*a3667aaeSNaresh Kumar Inna }
199*a3667aaeSNaresh Kumar Inna 
200*a3667aaeSNaresh Kumar Inna /*
201*a3667aaeSNaresh Kumar Inna  * csio_pci_init - PCI initialization.
202*a3667aaeSNaresh Kumar Inna  * @pdev: PCI device.
203*a3667aaeSNaresh Kumar Inna  * @bars: Bitmask of bars to be requested.
204*a3667aaeSNaresh Kumar Inna  *
205*a3667aaeSNaresh Kumar Inna  * Initializes the PCI function by enabling MMIO, setting bus
206*a3667aaeSNaresh Kumar Inna  * mastership and setting DMA mask.
207*a3667aaeSNaresh Kumar Inna  */
208*a3667aaeSNaresh Kumar Inna static int
209*a3667aaeSNaresh Kumar Inna csio_pci_init(struct pci_dev *pdev, int *bars)
210*a3667aaeSNaresh Kumar Inna {
211*a3667aaeSNaresh Kumar Inna 	int rv = -ENODEV;
212*a3667aaeSNaresh Kumar Inna 
213*a3667aaeSNaresh Kumar Inna 	*bars = pci_select_bars(pdev, IORESOURCE_MEM);
214*a3667aaeSNaresh Kumar Inna 
215*a3667aaeSNaresh Kumar Inna 	if (pci_enable_device_mem(pdev))
216*a3667aaeSNaresh Kumar Inna 		goto err;
217*a3667aaeSNaresh Kumar Inna 
218*a3667aaeSNaresh Kumar Inna 	if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME))
219*a3667aaeSNaresh Kumar Inna 		goto err_disable_device;
220*a3667aaeSNaresh Kumar Inna 
221*a3667aaeSNaresh Kumar Inna 	pci_set_master(pdev);
222*a3667aaeSNaresh Kumar Inna 	pci_try_set_mwi(pdev);
223*a3667aaeSNaresh Kumar Inna 
224*a3667aaeSNaresh Kumar Inna 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
225*a3667aaeSNaresh Kumar Inna 		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
226*a3667aaeSNaresh Kumar Inna 	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
227*a3667aaeSNaresh Kumar Inna 		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
228*a3667aaeSNaresh Kumar Inna 	} else {
229*a3667aaeSNaresh Kumar Inna 		dev_err(&pdev->dev, "No suitable DMA available.\n");
230*a3667aaeSNaresh Kumar Inna 		goto err_release_regions;
231*a3667aaeSNaresh Kumar Inna 	}
232*a3667aaeSNaresh Kumar Inna 
233*a3667aaeSNaresh Kumar Inna 	return 0;
234*a3667aaeSNaresh Kumar Inna 
235*a3667aaeSNaresh Kumar Inna err_release_regions:
236*a3667aaeSNaresh Kumar Inna 	pci_release_selected_regions(pdev, *bars);
237*a3667aaeSNaresh Kumar Inna err_disable_device:
238*a3667aaeSNaresh Kumar Inna 	pci_disable_device(pdev);
239*a3667aaeSNaresh Kumar Inna err:
240*a3667aaeSNaresh Kumar Inna 	return rv;
241*a3667aaeSNaresh Kumar Inna 
242*a3667aaeSNaresh Kumar Inna }
243*a3667aaeSNaresh Kumar Inna 
244*a3667aaeSNaresh Kumar Inna /*
245*a3667aaeSNaresh Kumar Inna  * csio_pci_exit - PCI unitialization.
246*a3667aaeSNaresh Kumar Inna  * @pdev: PCI device.
247*a3667aaeSNaresh Kumar Inna  * @bars: Bars to be released.
248*a3667aaeSNaresh Kumar Inna  *
249*a3667aaeSNaresh Kumar Inna  */
250*a3667aaeSNaresh Kumar Inna static void
251*a3667aaeSNaresh Kumar Inna csio_pci_exit(struct pci_dev *pdev, int *bars)
252*a3667aaeSNaresh Kumar Inna {
253*a3667aaeSNaresh Kumar Inna 	pci_release_selected_regions(pdev, *bars);
254*a3667aaeSNaresh Kumar Inna 	pci_disable_device(pdev);
255*a3667aaeSNaresh Kumar Inna }
256*a3667aaeSNaresh Kumar Inna 
257*a3667aaeSNaresh Kumar Inna /*
258*a3667aaeSNaresh Kumar Inna  * csio_hw_init_workers - Initialize the HW module's worker threads.
259*a3667aaeSNaresh Kumar Inna  * @hw: HW module.
260*a3667aaeSNaresh Kumar Inna  *
261*a3667aaeSNaresh Kumar Inna  */
262*a3667aaeSNaresh Kumar Inna static void
263*a3667aaeSNaresh Kumar Inna csio_hw_init_workers(struct csio_hw *hw)
264*a3667aaeSNaresh Kumar Inna {
265*a3667aaeSNaresh Kumar Inna 	INIT_WORK(&hw->evtq_work, csio_evtq_worker);
266*a3667aaeSNaresh Kumar Inna }
267*a3667aaeSNaresh Kumar Inna 
268*a3667aaeSNaresh Kumar Inna static void
269*a3667aaeSNaresh Kumar Inna csio_hw_exit_workers(struct csio_hw *hw)
270*a3667aaeSNaresh Kumar Inna {
271*a3667aaeSNaresh Kumar Inna 	cancel_work_sync(&hw->evtq_work);
272*a3667aaeSNaresh Kumar Inna 	flush_scheduled_work();
273*a3667aaeSNaresh Kumar Inna }
274*a3667aaeSNaresh Kumar Inna 
275*a3667aaeSNaresh Kumar Inna static int
276*a3667aaeSNaresh Kumar Inna csio_create_queues(struct csio_hw *hw)
277*a3667aaeSNaresh Kumar Inna {
278*a3667aaeSNaresh Kumar Inna 	int i, j;
279*a3667aaeSNaresh Kumar Inna 	struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
280*a3667aaeSNaresh Kumar Inna 	int rv;
281*a3667aaeSNaresh Kumar Inna 	struct csio_scsi_cpu_info *info;
282*a3667aaeSNaresh Kumar Inna 
283*a3667aaeSNaresh Kumar Inna 	if (hw->flags & CSIO_HWF_Q_FW_ALLOCED)
284*a3667aaeSNaresh Kumar Inna 		return 0;
285*a3667aaeSNaresh Kumar Inna 
286*a3667aaeSNaresh Kumar Inna 	if (hw->intr_mode != CSIO_IM_MSIX) {
287*a3667aaeSNaresh Kumar Inna 		rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx,
288*a3667aaeSNaresh Kumar Inna 					0, hw->pport[0].portid, false, NULL);
289*a3667aaeSNaresh Kumar Inna 		if (rv != 0) {
290*a3667aaeSNaresh Kumar Inna 			csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv);
291*a3667aaeSNaresh Kumar Inna 			return rv;
292*a3667aaeSNaresh Kumar Inna 		}
293*a3667aaeSNaresh Kumar Inna 	}
294*a3667aaeSNaresh Kumar Inna 
295*a3667aaeSNaresh Kumar Inna 	/* FW event queue */
296*a3667aaeSNaresh Kumar Inna 	rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx,
297*a3667aaeSNaresh Kumar Inna 			       csio_get_fwevt_intr_idx(hw),
298*a3667aaeSNaresh Kumar Inna 			       hw->pport[0].portid, true, NULL);
299*a3667aaeSNaresh Kumar Inna 	if (rv != 0) {
300*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "FW event IQ config failed!: %d\n", rv);
301*a3667aaeSNaresh Kumar Inna 		return rv;
302*a3667aaeSNaresh Kumar Inna 	}
303*a3667aaeSNaresh Kumar Inna 
304*a3667aaeSNaresh Kumar Inna 	/* Create mgmt queue */
305*a3667aaeSNaresh Kumar Inna 	rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx,
306*a3667aaeSNaresh Kumar Inna 			mgmtm->iq_idx, hw->pport[0].portid, NULL);
307*a3667aaeSNaresh Kumar Inna 
308*a3667aaeSNaresh Kumar Inna 	if (rv != 0) {
309*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "Mgmt EQ create failed!: %d\n", rv);
310*a3667aaeSNaresh Kumar Inna 		goto err;
311*a3667aaeSNaresh Kumar Inna 	}
312*a3667aaeSNaresh Kumar Inna 
313*a3667aaeSNaresh Kumar Inna 	/* Create SCSI queues */
314*a3667aaeSNaresh Kumar Inna 	for (i = 0; i < hw->num_pports; i++) {
315*a3667aaeSNaresh Kumar Inna 		info = &hw->scsi_cpu_info[i];
316*a3667aaeSNaresh Kumar Inna 
317*a3667aaeSNaresh Kumar Inna 		for (j = 0; j < info->max_cpus; j++) {
318*a3667aaeSNaresh Kumar Inna 			struct csio_scsi_qset *sqset = &hw->sqset[i][j];
319*a3667aaeSNaresh Kumar Inna 
320*a3667aaeSNaresh Kumar Inna 			rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx,
321*a3667aaeSNaresh Kumar Inna 					       sqset->intr_idx, i, false, NULL);
322*a3667aaeSNaresh Kumar Inna 			if (rv != 0) {
323*a3667aaeSNaresh Kumar Inna 				csio_err(hw,
324*a3667aaeSNaresh Kumar Inna 				   "SCSI module IQ config failed [%d][%d]:%d\n",
325*a3667aaeSNaresh Kumar Inna 				   i, j, rv);
326*a3667aaeSNaresh Kumar Inna 				goto err;
327*a3667aaeSNaresh Kumar Inna 			}
328*a3667aaeSNaresh Kumar Inna 			rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx,
329*a3667aaeSNaresh Kumar Inna 					       sqset->iq_idx, i, NULL);
330*a3667aaeSNaresh Kumar Inna 			if (rv != 0) {
331*a3667aaeSNaresh Kumar Inna 				csio_err(hw,
332*a3667aaeSNaresh Kumar Inna 				   "SCSI module EQ config failed [%d][%d]:%d\n",
333*a3667aaeSNaresh Kumar Inna 				   i, j, rv);
334*a3667aaeSNaresh Kumar Inna 				goto err;
335*a3667aaeSNaresh Kumar Inna 			}
336*a3667aaeSNaresh Kumar Inna 		} /* for all CPUs */
337*a3667aaeSNaresh Kumar Inna 	} /* For all ports */
338*a3667aaeSNaresh Kumar Inna 
339*a3667aaeSNaresh Kumar Inna 	hw->flags |= CSIO_HWF_Q_FW_ALLOCED;
340*a3667aaeSNaresh Kumar Inna 	return 0;
341*a3667aaeSNaresh Kumar Inna err:
342*a3667aaeSNaresh Kumar Inna 	csio_wr_destroy_queues(hw, true);
343*a3667aaeSNaresh Kumar Inna 	return -EINVAL;
344*a3667aaeSNaresh Kumar Inna }
345*a3667aaeSNaresh Kumar Inna 
346*a3667aaeSNaresh Kumar Inna /*
347*a3667aaeSNaresh Kumar Inna  * csio_config_queues - Configure the DMA queues.
348*a3667aaeSNaresh Kumar Inna  * @hw: HW module.
349*a3667aaeSNaresh Kumar Inna  *
350*a3667aaeSNaresh Kumar Inna  * Allocates memory for queues are registers them with FW.
351*a3667aaeSNaresh Kumar Inna  */
352*a3667aaeSNaresh Kumar Inna int
353*a3667aaeSNaresh Kumar Inna csio_config_queues(struct csio_hw *hw)
354*a3667aaeSNaresh Kumar Inna {
355*a3667aaeSNaresh Kumar Inna 	int i, j, idx, k = 0;
356*a3667aaeSNaresh Kumar Inna 	int rv;
357*a3667aaeSNaresh Kumar Inna 	struct csio_scsi_qset *sqset;
358*a3667aaeSNaresh Kumar Inna 	struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
359*a3667aaeSNaresh Kumar Inna 	struct csio_scsi_qset *orig;
360*a3667aaeSNaresh Kumar Inna 	struct csio_scsi_cpu_info *info;
361*a3667aaeSNaresh Kumar Inna 
362*a3667aaeSNaresh Kumar Inna 	if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED)
363*a3667aaeSNaresh Kumar Inna 		return csio_create_queues(hw);
364*a3667aaeSNaresh Kumar Inna 
365*a3667aaeSNaresh Kumar Inna 	/* Calculate number of SCSI queues for MSIX we would like */
366*a3667aaeSNaresh Kumar Inna 	hw->num_scsi_msix_cpus = num_online_cpus();
367*a3667aaeSNaresh Kumar Inna 	hw->num_sqsets = num_online_cpus() * hw->num_pports;
368*a3667aaeSNaresh Kumar Inna 
369*a3667aaeSNaresh Kumar Inna 	if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) {
370*a3667aaeSNaresh Kumar Inna 		hw->num_sqsets = CSIO_MAX_SCSI_QSETS;
371*a3667aaeSNaresh Kumar Inna 		hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU;
372*a3667aaeSNaresh Kumar Inna 	}
373*a3667aaeSNaresh Kumar Inna 
374*a3667aaeSNaresh Kumar Inna 	/* Initialize max_cpus, may get reduced during msix allocations */
375*a3667aaeSNaresh Kumar Inna 	for (i = 0; i < hw->num_pports; i++)
376*a3667aaeSNaresh Kumar Inna 		hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus;
377*a3667aaeSNaresh Kumar Inna 
378*a3667aaeSNaresh Kumar Inna 	csio_dbg(hw, "nsqsets:%d scpus:%d\n",
379*a3667aaeSNaresh Kumar Inna 		    hw->num_sqsets, hw->num_scsi_msix_cpus);
380*a3667aaeSNaresh Kumar Inna 
381*a3667aaeSNaresh Kumar Inna 	csio_intr_enable(hw);
382*a3667aaeSNaresh Kumar Inna 
383*a3667aaeSNaresh Kumar Inna 	if (hw->intr_mode != CSIO_IM_MSIX) {
384*a3667aaeSNaresh Kumar Inna 
385*a3667aaeSNaresh Kumar Inna 		/* Allocate Forward interrupt iq. */
386*a3667aaeSNaresh Kumar Inna 		hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE,
387*a3667aaeSNaresh Kumar Inna 						CSIO_INTR_WRSIZE, CSIO_INGRESS,
388*a3667aaeSNaresh Kumar Inna 						(void *)hw, 0, 0, NULL);
389*a3667aaeSNaresh Kumar Inna 		if (hw->intr_iq_idx == -1) {
390*a3667aaeSNaresh Kumar Inna 			csio_err(hw,
391*a3667aaeSNaresh Kumar Inna 				 "Forward interrupt queue creation failed\n");
392*a3667aaeSNaresh Kumar Inna 			goto intr_disable;
393*a3667aaeSNaresh Kumar Inna 		}
394*a3667aaeSNaresh Kumar Inna 	}
395*a3667aaeSNaresh Kumar Inna 
396*a3667aaeSNaresh Kumar Inna 	/* Allocate the FW evt queue */
397*a3667aaeSNaresh Kumar Inna 	hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE,
398*a3667aaeSNaresh Kumar Inna 					   CSIO_FWEVT_WRSIZE,
399*a3667aaeSNaresh Kumar Inna 					   CSIO_INGRESS, (void *)hw,
400*a3667aaeSNaresh Kumar Inna 					   CSIO_FWEVT_FLBUFS, 0,
401*a3667aaeSNaresh Kumar Inna 					   csio_fwevt_intx_handler);
402*a3667aaeSNaresh Kumar Inna 	if (hw->fwevt_iq_idx == -1) {
403*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "FW evt queue creation failed\n");
404*a3667aaeSNaresh Kumar Inna 		goto intr_disable;
405*a3667aaeSNaresh Kumar Inna 	}
406*a3667aaeSNaresh Kumar Inna 
407*a3667aaeSNaresh Kumar Inna 	/* Allocate the mgmt queue */
408*a3667aaeSNaresh Kumar Inna 	mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE,
409*a3667aaeSNaresh Kumar Inna 				      CSIO_MGMT_EQ_WRSIZE,
410*a3667aaeSNaresh Kumar Inna 				      CSIO_EGRESS, (void *)hw, 0, 0, NULL);
411*a3667aaeSNaresh Kumar Inna 	if (mgmtm->eq_idx == -1) {
412*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "Failed to alloc egress queue for mgmt module\n");
413*a3667aaeSNaresh Kumar Inna 		goto intr_disable;
414*a3667aaeSNaresh Kumar Inna 	}
415*a3667aaeSNaresh Kumar Inna 
416*a3667aaeSNaresh Kumar Inna 	/* Use FW IQ for MGMT req completion */
417*a3667aaeSNaresh Kumar Inna 	mgmtm->iq_idx = hw->fwevt_iq_idx;
418*a3667aaeSNaresh Kumar Inna 
419*a3667aaeSNaresh Kumar Inna 	/* Allocate SCSI queues */
420*a3667aaeSNaresh Kumar Inna 	for (i = 0; i < hw->num_pports; i++) {
421*a3667aaeSNaresh Kumar Inna 		info = &hw->scsi_cpu_info[i];
422*a3667aaeSNaresh Kumar Inna 
423*a3667aaeSNaresh Kumar Inna 		for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
424*a3667aaeSNaresh Kumar Inna 			sqset = &hw->sqset[i][j];
425*a3667aaeSNaresh Kumar Inna 
426*a3667aaeSNaresh Kumar Inna 			if (j >= info->max_cpus) {
427*a3667aaeSNaresh Kumar Inna 				k = j % info->max_cpus;
428*a3667aaeSNaresh Kumar Inna 				orig = &hw->sqset[i][k];
429*a3667aaeSNaresh Kumar Inna 				sqset->eq_idx = orig->eq_idx;
430*a3667aaeSNaresh Kumar Inna 				sqset->iq_idx = orig->iq_idx;
431*a3667aaeSNaresh Kumar Inna 				continue;
432*a3667aaeSNaresh Kumar Inna 			}
433*a3667aaeSNaresh Kumar Inna 
434*a3667aaeSNaresh Kumar Inna 			idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0,
435*a3667aaeSNaresh Kumar Inna 					      CSIO_EGRESS, (void *)hw, 0, 0,
436*a3667aaeSNaresh Kumar Inna 					      NULL);
437*a3667aaeSNaresh Kumar Inna 			if (idx == -1) {
438*a3667aaeSNaresh Kumar Inna 				csio_err(hw, "EQ creation failed for idx:%d\n",
439*a3667aaeSNaresh Kumar Inna 					    idx);
440*a3667aaeSNaresh Kumar Inna 				goto intr_disable;
441*a3667aaeSNaresh Kumar Inna 			}
442*a3667aaeSNaresh Kumar Inna 
443*a3667aaeSNaresh Kumar Inna 			sqset->eq_idx = idx;
444*a3667aaeSNaresh Kumar Inna 
445*a3667aaeSNaresh Kumar Inna 			idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE,
446*a3667aaeSNaresh Kumar Inna 					     CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS,
447*a3667aaeSNaresh Kumar Inna 					     (void *)hw, 0, 0,
448*a3667aaeSNaresh Kumar Inna 					     csio_scsi_intx_handler);
449*a3667aaeSNaresh Kumar Inna 			if (idx == -1) {
450*a3667aaeSNaresh Kumar Inna 				csio_err(hw, "IQ creation failed for idx:%d\n",
451*a3667aaeSNaresh Kumar Inna 					    idx);
452*a3667aaeSNaresh Kumar Inna 				goto intr_disable;
453*a3667aaeSNaresh Kumar Inna 			}
454*a3667aaeSNaresh Kumar Inna 			sqset->iq_idx = idx;
455*a3667aaeSNaresh Kumar Inna 		} /* for all CPUs */
456*a3667aaeSNaresh Kumar Inna 	} /* For all ports */
457*a3667aaeSNaresh Kumar Inna 
458*a3667aaeSNaresh Kumar Inna 	hw->flags |= CSIO_HWF_Q_MEM_ALLOCED;
459*a3667aaeSNaresh Kumar Inna 
460*a3667aaeSNaresh Kumar Inna 	rv = csio_create_queues(hw);
461*a3667aaeSNaresh Kumar Inna 	if (rv != 0)
462*a3667aaeSNaresh Kumar Inna 		goto intr_disable;
463*a3667aaeSNaresh Kumar Inna 
464*a3667aaeSNaresh Kumar Inna 	/*
465*a3667aaeSNaresh Kumar Inna 	 * Now request IRQs for the vectors. In the event of a failure,
466*a3667aaeSNaresh Kumar Inna 	 * cleanup is handled internally by this function.
467*a3667aaeSNaresh Kumar Inna 	 */
468*a3667aaeSNaresh Kumar Inna 	rv = csio_request_irqs(hw);
469*a3667aaeSNaresh Kumar Inna 	if (rv != 0)
470*a3667aaeSNaresh Kumar Inna 		return -EINVAL;
471*a3667aaeSNaresh Kumar Inna 
472*a3667aaeSNaresh Kumar Inna 	return 0;
473*a3667aaeSNaresh Kumar Inna 
474*a3667aaeSNaresh Kumar Inna intr_disable:
475*a3667aaeSNaresh Kumar Inna 	csio_intr_disable(hw, false);
476*a3667aaeSNaresh Kumar Inna 
477*a3667aaeSNaresh Kumar Inna 	return -EINVAL;
478*a3667aaeSNaresh Kumar Inna }
479*a3667aaeSNaresh Kumar Inna 
480*a3667aaeSNaresh Kumar Inna static int
481*a3667aaeSNaresh Kumar Inna csio_resource_alloc(struct csio_hw *hw)
482*a3667aaeSNaresh Kumar Inna {
483*a3667aaeSNaresh Kumar Inna 	struct csio_wrm *wrm = csio_hw_to_wrm(hw);
484*a3667aaeSNaresh Kumar Inna 	int rv = -ENOMEM;
485*a3667aaeSNaresh Kumar Inna 
486*a3667aaeSNaresh Kumar Inna 	wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ +
487*a3667aaeSNaresh Kumar Inna 		       CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ);
488*a3667aaeSNaresh Kumar Inna 
489*a3667aaeSNaresh Kumar Inna 	hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
490*a3667aaeSNaresh Kumar Inna 						  sizeof(struct csio_mb));
491*a3667aaeSNaresh Kumar Inna 	if (!hw->mb_mempool)
492*a3667aaeSNaresh Kumar Inna 		goto err;
493*a3667aaeSNaresh Kumar Inna 
494*a3667aaeSNaresh Kumar Inna 	hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
495*a3667aaeSNaresh Kumar Inna 						     sizeof(struct csio_rnode));
496*a3667aaeSNaresh Kumar Inna 	if (!hw->rnode_mempool)
497*a3667aaeSNaresh Kumar Inna 		goto err_free_mb_mempool;
498*a3667aaeSNaresh Kumar Inna 
499*a3667aaeSNaresh Kumar Inna 	hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,
500*a3667aaeSNaresh Kumar Inna 					    CSIO_SCSI_RSP_LEN, 8, 0);
501*a3667aaeSNaresh Kumar Inna 	if (!hw->scsi_pci_pool)
502*a3667aaeSNaresh Kumar Inna 		goto err_free_rn_pool;
503*a3667aaeSNaresh Kumar Inna 
504*a3667aaeSNaresh Kumar Inna 	return 0;
505*a3667aaeSNaresh Kumar Inna 
506*a3667aaeSNaresh Kumar Inna err_free_rn_pool:
507*a3667aaeSNaresh Kumar Inna 	mempool_destroy(hw->rnode_mempool);
508*a3667aaeSNaresh Kumar Inna 	hw->rnode_mempool = NULL;
509*a3667aaeSNaresh Kumar Inna err_free_mb_mempool:
510*a3667aaeSNaresh Kumar Inna 	mempool_destroy(hw->mb_mempool);
511*a3667aaeSNaresh Kumar Inna 	hw->mb_mempool = NULL;
512*a3667aaeSNaresh Kumar Inna err:
513*a3667aaeSNaresh Kumar Inna 	return rv;
514*a3667aaeSNaresh Kumar Inna }
515*a3667aaeSNaresh Kumar Inna 
516*a3667aaeSNaresh Kumar Inna static void
517*a3667aaeSNaresh Kumar Inna csio_resource_free(struct csio_hw *hw)
518*a3667aaeSNaresh Kumar Inna {
519*a3667aaeSNaresh Kumar Inna 	pci_pool_destroy(hw->scsi_pci_pool);
520*a3667aaeSNaresh Kumar Inna 	hw->scsi_pci_pool = NULL;
521*a3667aaeSNaresh Kumar Inna 	mempool_destroy(hw->rnode_mempool);
522*a3667aaeSNaresh Kumar Inna 	hw->rnode_mempool = NULL;
523*a3667aaeSNaresh Kumar Inna 	mempool_destroy(hw->mb_mempool);
524*a3667aaeSNaresh Kumar Inna 	hw->mb_mempool = NULL;
525*a3667aaeSNaresh Kumar Inna }
526*a3667aaeSNaresh Kumar Inna 
527*a3667aaeSNaresh Kumar Inna /*
528*a3667aaeSNaresh Kumar Inna  * csio_hw_alloc - Allocate and initialize the HW module.
529*a3667aaeSNaresh Kumar Inna  * @pdev: PCI device.
530*a3667aaeSNaresh Kumar Inna  *
531*a3667aaeSNaresh Kumar Inna  * Allocates HW structure, DMA, memory resources, maps BARS to
532*a3667aaeSNaresh Kumar Inna  * host memory and initializes HW module.
533*a3667aaeSNaresh Kumar Inna  */
534*a3667aaeSNaresh Kumar Inna static struct csio_hw * __devinit
535*a3667aaeSNaresh Kumar Inna csio_hw_alloc(struct pci_dev *pdev)
536*a3667aaeSNaresh Kumar Inna {
537*a3667aaeSNaresh Kumar Inna 	struct csio_hw *hw;
538*a3667aaeSNaresh Kumar Inna 
539*a3667aaeSNaresh Kumar Inna 	hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL);
540*a3667aaeSNaresh Kumar Inna 	if (!hw)
541*a3667aaeSNaresh Kumar Inna 		goto err;
542*a3667aaeSNaresh Kumar Inna 
543*a3667aaeSNaresh Kumar Inna 	hw->pdev = pdev;
544*a3667aaeSNaresh Kumar Inna 	strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);
545*a3667aaeSNaresh Kumar Inna 
546*a3667aaeSNaresh Kumar Inna 	/* memory pool/DMA pool allocation */
547*a3667aaeSNaresh Kumar Inna 	if (csio_resource_alloc(hw))
548*a3667aaeSNaresh Kumar Inna 		goto err_free_hw;
549*a3667aaeSNaresh Kumar Inna 
550*a3667aaeSNaresh Kumar Inna 	/* Get the start address of registers from BAR 0 */
551*a3667aaeSNaresh Kumar Inna 	hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
552*a3667aaeSNaresh Kumar Inna 				       pci_resource_len(pdev, 0));
553*a3667aaeSNaresh Kumar Inna 	if (!hw->regstart) {
554*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "Could not map BAR 0, regstart = %p\n",
555*a3667aaeSNaresh Kumar Inna 			 hw->regstart);
556*a3667aaeSNaresh Kumar Inna 		goto err_resource_free;
557*a3667aaeSNaresh Kumar Inna 	}
558*a3667aaeSNaresh Kumar Inna 
559*a3667aaeSNaresh Kumar Inna 	csio_hw_init_workers(hw);
560*a3667aaeSNaresh Kumar Inna 
561*a3667aaeSNaresh Kumar Inna 	if (csio_hw_init(hw))
562*a3667aaeSNaresh Kumar Inna 		goto err_unmap_bar;
563*a3667aaeSNaresh Kumar Inna 
564*a3667aaeSNaresh Kumar Inna 	csio_dfs_create(hw);
565*a3667aaeSNaresh Kumar Inna 
566*a3667aaeSNaresh Kumar Inna 	csio_dbg(hw, "hw:%p\n", hw);
567*a3667aaeSNaresh Kumar Inna 
568*a3667aaeSNaresh Kumar Inna 	return hw;
569*a3667aaeSNaresh Kumar Inna 
570*a3667aaeSNaresh Kumar Inna err_unmap_bar:
571*a3667aaeSNaresh Kumar Inna 	csio_hw_exit_workers(hw);
572*a3667aaeSNaresh Kumar Inna 	iounmap(hw->regstart);
573*a3667aaeSNaresh Kumar Inna err_resource_free:
574*a3667aaeSNaresh Kumar Inna 	csio_resource_free(hw);
575*a3667aaeSNaresh Kumar Inna err_free_hw:
576*a3667aaeSNaresh Kumar Inna 	kfree(hw);
577*a3667aaeSNaresh Kumar Inna err:
578*a3667aaeSNaresh Kumar Inna 	return NULL;
579*a3667aaeSNaresh Kumar Inna }
580*a3667aaeSNaresh Kumar Inna 
581*a3667aaeSNaresh Kumar Inna /*
582*a3667aaeSNaresh Kumar Inna  * csio_hw_free - Uninitialize and free the HW module.
583*a3667aaeSNaresh Kumar Inna  * @hw: The HW module
584*a3667aaeSNaresh Kumar Inna  *
585*a3667aaeSNaresh Kumar Inna  * Disable interrupts, uninit the HW module, free resources, free hw.
586*a3667aaeSNaresh Kumar Inna  */
587*a3667aaeSNaresh Kumar Inna static void
588*a3667aaeSNaresh Kumar Inna csio_hw_free(struct csio_hw *hw)
589*a3667aaeSNaresh Kumar Inna {
590*a3667aaeSNaresh Kumar Inna 	csio_intr_disable(hw, true);
591*a3667aaeSNaresh Kumar Inna 	csio_hw_exit_workers(hw);
592*a3667aaeSNaresh Kumar Inna 	csio_hw_exit(hw);
593*a3667aaeSNaresh Kumar Inna 	iounmap(hw->regstart);
594*a3667aaeSNaresh Kumar Inna 	csio_dfs_destroy(hw);
595*a3667aaeSNaresh Kumar Inna 	csio_resource_free(hw);
596*a3667aaeSNaresh Kumar Inna 	kfree(hw);
597*a3667aaeSNaresh Kumar Inna }
598*a3667aaeSNaresh Kumar Inna 
599*a3667aaeSNaresh Kumar Inna /**
600*a3667aaeSNaresh Kumar Inna  * csio_shost_init - Create and initialize the lnode module.
601*a3667aaeSNaresh Kumar Inna  * @hw:		The HW module.
602*a3667aaeSNaresh Kumar Inna  * @dev:	The device associated with this invocation.
603*a3667aaeSNaresh Kumar Inna  * @probe:	Called from probe context or not?
604*a3667aaeSNaresh Kumar Inna  * @os_pln:	Parent lnode if any.
605*a3667aaeSNaresh Kumar Inna  *
606*a3667aaeSNaresh Kumar Inna  * Allocates lnode structure via scsi_host_alloc, initializes
607*a3667aaeSNaresh Kumar Inna  * shost, initializes lnode module and registers with SCSI ML
608*a3667aaeSNaresh Kumar Inna  * via scsi_host_add. This function is shared between physical and
609*a3667aaeSNaresh Kumar Inna  * virtual node ports.
610*a3667aaeSNaresh Kumar Inna  */
611*a3667aaeSNaresh Kumar Inna struct csio_lnode *
612*a3667aaeSNaresh Kumar Inna csio_shost_init(struct csio_hw *hw, struct device *dev,
613*a3667aaeSNaresh Kumar Inna 		  bool probe, struct csio_lnode *pln)
614*a3667aaeSNaresh Kumar Inna {
615*a3667aaeSNaresh Kumar Inna 	struct Scsi_Host  *shost = NULL;
616*a3667aaeSNaresh Kumar Inna 	struct csio_lnode *ln;
617*a3667aaeSNaresh Kumar Inna 
618*a3667aaeSNaresh Kumar Inna 	csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth;
619*a3667aaeSNaresh Kumar Inna 	csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth;
620*a3667aaeSNaresh Kumar Inna 
621*a3667aaeSNaresh Kumar Inna 	/*
622*a3667aaeSNaresh Kumar Inna 	 * hw->pdev is the physical port's PCI dev structure,
623*a3667aaeSNaresh Kumar Inna 	 * which will be different from the NPIV dev structure.
624*a3667aaeSNaresh Kumar Inna 	 */
625*a3667aaeSNaresh Kumar Inna 	if (dev == &hw->pdev->dev)
626*a3667aaeSNaresh Kumar Inna 		shost = scsi_host_alloc(
627*a3667aaeSNaresh Kumar Inna 				&csio_fcoe_shost_template,
628*a3667aaeSNaresh Kumar Inna 				sizeof(struct csio_lnode));
629*a3667aaeSNaresh Kumar Inna 	else
630*a3667aaeSNaresh Kumar Inna 		shost = scsi_host_alloc(
631*a3667aaeSNaresh Kumar Inna 				&csio_fcoe_shost_vport_template,
632*a3667aaeSNaresh Kumar Inna 				sizeof(struct csio_lnode));
633*a3667aaeSNaresh Kumar Inna 
634*a3667aaeSNaresh Kumar Inna 	if (!shost)
635*a3667aaeSNaresh Kumar Inna 		goto err;
636*a3667aaeSNaresh Kumar Inna 
637*a3667aaeSNaresh Kumar Inna 	ln = shost_priv(shost);
638*a3667aaeSNaresh Kumar Inna 	memset(ln, 0, sizeof(struct csio_lnode));
639*a3667aaeSNaresh Kumar Inna 
640*a3667aaeSNaresh Kumar Inna 	/* Link common lnode to this lnode */
641*a3667aaeSNaresh Kumar Inna 	ln->dev_num = (shost->host_no << 16);
642*a3667aaeSNaresh Kumar Inna 
643*a3667aaeSNaresh Kumar Inna 	shost->can_queue = CSIO_MAX_QUEUE;
644*a3667aaeSNaresh Kumar Inna 	shost->this_id = -1;
645*a3667aaeSNaresh Kumar Inna 	shost->unique_id = shost->host_no;
646*a3667aaeSNaresh Kumar Inna 	shost->max_cmd_len = 16; /* Max CDB length supported */
647*a3667aaeSNaresh Kumar Inna 	shost->max_id = min_t(uint32_t, csio_fcoe_rnodes,
648*a3667aaeSNaresh Kumar Inna 			      hw->fres_info.max_ssns);
649*a3667aaeSNaresh Kumar Inna 	shost->max_lun = CSIO_MAX_LUN;
650*a3667aaeSNaresh Kumar Inna 	if (dev == &hw->pdev->dev)
651*a3667aaeSNaresh Kumar Inna 		shost->transportt = csio_fcoe_transport;
652*a3667aaeSNaresh Kumar Inna 	else
653*a3667aaeSNaresh Kumar Inna 		shost->transportt = csio_fcoe_transport_vport;
654*a3667aaeSNaresh Kumar Inna 
655*a3667aaeSNaresh Kumar Inna 	/* root lnode */
656*a3667aaeSNaresh Kumar Inna 	if (!hw->rln)
657*a3667aaeSNaresh Kumar Inna 		hw->rln = ln;
658*a3667aaeSNaresh Kumar Inna 
659*a3667aaeSNaresh Kumar Inna 	/* Other initialization here: Common, Transport specific */
660*a3667aaeSNaresh Kumar Inna 	if (csio_lnode_init(ln, hw, pln))
661*a3667aaeSNaresh Kumar Inna 		goto err_shost_put;
662*a3667aaeSNaresh Kumar Inna 
663*a3667aaeSNaresh Kumar Inna 	if (scsi_add_host(shost, dev))
664*a3667aaeSNaresh Kumar Inna 		goto err_lnode_exit;
665*a3667aaeSNaresh Kumar Inna 
666*a3667aaeSNaresh Kumar Inna 	return ln;
667*a3667aaeSNaresh Kumar Inna 
668*a3667aaeSNaresh Kumar Inna err_lnode_exit:
669*a3667aaeSNaresh Kumar Inna 	csio_lnode_exit(ln);
670*a3667aaeSNaresh Kumar Inna err_shost_put:
671*a3667aaeSNaresh Kumar Inna 	scsi_host_put(shost);
672*a3667aaeSNaresh Kumar Inna err:
673*a3667aaeSNaresh Kumar Inna 	return NULL;
674*a3667aaeSNaresh Kumar Inna }
675*a3667aaeSNaresh Kumar Inna 
676*a3667aaeSNaresh Kumar Inna /**
677*a3667aaeSNaresh Kumar Inna  * csio_shost_exit - De-instantiate the shost.
678*a3667aaeSNaresh Kumar Inna  * @ln:		The lnode module corresponding to the shost.
679*a3667aaeSNaresh Kumar Inna  *
680*a3667aaeSNaresh Kumar Inna  */
681*a3667aaeSNaresh Kumar Inna void
682*a3667aaeSNaresh Kumar Inna csio_shost_exit(struct csio_lnode *ln)
683*a3667aaeSNaresh Kumar Inna {
684*a3667aaeSNaresh Kumar Inna 	struct Scsi_Host *shost = csio_ln_to_shost(ln);
685*a3667aaeSNaresh Kumar Inna 	struct csio_hw *hw = csio_lnode_to_hw(ln);
686*a3667aaeSNaresh Kumar Inna 
687*a3667aaeSNaresh Kumar Inna 	/* Inform transport */
688*a3667aaeSNaresh Kumar Inna 	fc_remove_host(shost);
689*a3667aaeSNaresh Kumar Inna 
690*a3667aaeSNaresh Kumar Inna 	/* Inform SCSI ML */
691*a3667aaeSNaresh Kumar Inna 	scsi_remove_host(shost);
692*a3667aaeSNaresh Kumar Inna 
693*a3667aaeSNaresh Kumar Inna 	/* Flush all the events, so that any rnode removal events
694*a3667aaeSNaresh Kumar Inna 	 * already queued are all handled, before we remove the lnode.
695*a3667aaeSNaresh Kumar Inna 	 */
696*a3667aaeSNaresh Kumar Inna 	spin_lock_irq(&hw->lock);
697*a3667aaeSNaresh Kumar Inna 	csio_evtq_flush(hw);
698*a3667aaeSNaresh Kumar Inna 	spin_unlock_irq(&hw->lock);
699*a3667aaeSNaresh Kumar Inna 
700*a3667aaeSNaresh Kumar Inna 	csio_lnode_exit(ln);
701*a3667aaeSNaresh Kumar Inna 	scsi_host_put(shost);
702*a3667aaeSNaresh Kumar Inna }
703*a3667aaeSNaresh Kumar Inna 
704*a3667aaeSNaresh Kumar Inna struct csio_lnode *
705*a3667aaeSNaresh Kumar Inna csio_lnode_alloc(struct csio_hw *hw)
706*a3667aaeSNaresh Kumar Inna {
707*a3667aaeSNaresh Kumar Inna 	return csio_shost_init(hw, &hw->pdev->dev, false, NULL);
708*a3667aaeSNaresh Kumar Inna }
709*a3667aaeSNaresh Kumar Inna 
710*a3667aaeSNaresh Kumar Inna void
711*a3667aaeSNaresh Kumar Inna csio_lnodes_block_request(struct csio_hw *hw)
712*a3667aaeSNaresh Kumar Inna {
713*a3667aaeSNaresh Kumar Inna 	struct Scsi_Host  *shost;
714*a3667aaeSNaresh Kumar Inna 	struct csio_lnode *sln;
715*a3667aaeSNaresh Kumar Inna 	struct csio_lnode *ln;
716*a3667aaeSNaresh Kumar Inna 	struct list_head *cur_ln, *cur_cln;
717*a3667aaeSNaresh Kumar Inna 	struct csio_lnode **lnode_list;
718*a3667aaeSNaresh Kumar Inna 	int cur_cnt = 0, ii;
719*a3667aaeSNaresh Kumar Inna 
720*a3667aaeSNaresh Kumar Inna 	lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
721*a3667aaeSNaresh Kumar Inna 			GFP_KERNEL);
722*a3667aaeSNaresh Kumar Inna 	if (!lnode_list) {
723*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "Failed to allocate lnodes_list");
724*a3667aaeSNaresh Kumar Inna 		return;
725*a3667aaeSNaresh Kumar Inna 	}
726*a3667aaeSNaresh Kumar Inna 
727*a3667aaeSNaresh Kumar Inna 	spin_lock_irq(&hw->lock);
728*a3667aaeSNaresh Kumar Inna 	/* Traverse sibling lnodes */
729*a3667aaeSNaresh Kumar Inna 	list_for_each(cur_ln, &hw->sln_head) {
730*a3667aaeSNaresh Kumar Inna 		sln = (struct csio_lnode *) cur_ln;
731*a3667aaeSNaresh Kumar Inna 		lnode_list[cur_cnt++] = sln;
732*a3667aaeSNaresh Kumar Inna 
733*a3667aaeSNaresh Kumar Inna 		/* Traverse children lnodes */
734*a3667aaeSNaresh Kumar Inna 		list_for_each(cur_cln, &sln->cln_head)
735*a3667aaeSNaresh Kumar Inna 			lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
736*a3667aaeSNaresh Kumar Inna 	}
737*a3667aaeSNaresh Kumar Inna 	spin_unlock_irq(&hw->lock);
738*a3667aaeSNaresh Kumar Inna 
739*a3667aaeSNaresh Kumar Inna 	for (ii = 0; ii < cur_cnt; ii++) {
740*a3667aaeSNaresh Kumar Inna 		csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
741*a3667aaeSNaresh Kumar Inna 		ln = lnode_list[ii];
742*a3667aaeSNaresh Kumar Inna 		shost = csio_ln_to_shost(ln);
743*a3667aaeSNaresh Kumar Inna 		scsi_block_requests(shost);
744*a3667aaeSNaresh Kumar Inna 
745*a3667aaeSNaresh Kumar Inna 	}
746*a3667aaeSNaresh Kumar Inna 	kfree(lnode_list);
747*a3667aaeSNaresh Kumar Inna }
748*a3667aaeSNaresh Kumar Inna 
749*a3667aaeSNaresh Kumar Inna void
750*a3667aaeSNaresh Kumar Inna csio_lnodes_unblock_request(struct csio_hw *hw)
751*a3667aaeSNaresh Kumar Inna {
752*a3667aaeSNaresh Kumar Inna 	struct csio_lnode *ln;
753*a3667aaeSNaresh Kumar Inna 	struct Scsi_Host  *shost;
754*a3667aaeSNaresh Kumar Inna 	struct csio_lnode *sln;
755*a3667aaeSNaresh Kumar Inna 	struct list_head *cur_ln, *cur_cln;
756*a3667aaeSNaresh Kumar Inna 	struct csio_lnode **lnode_list;
757*a3667aaeSNaresh Kumar Inna 	int cur_cnt = 0, ii;
758*a3667aaeSNaresh Kumar Inna 
759*a3667aaeSNaresh Kumar Inna 	lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
760*a3667aaeSNaresh Kumar Inna 			GFP_KERNEL);
761*a3667aaeSNaresh Kumar Inna 	if (!lnode_list) {
762*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "Failed to allocate lnodes_list");
763*a3667aaeSNaresh Kumar Inna 		return;
764*a3667aaeSNaresh Kumar Inna 	}
765*a3667aaeSNaresh Kumar Inna 
766*a3667aaeSNaresh Kumar Inna 	spin_lock_irq(&hw->lock);
767*a3667aaeSNaresh Kumar Inna 	/* Traverse sibling lnodes */
768*a3667aaeSNaresh Kumar Inna 	list_for_each(cur_ln, &hw->sln_head) {
769*a3667aaeSNaresh Kumar Inna 		sln = (struct csio_lnode *) cur_ln;
770*a3667aaeSNaresh Kumar Inna 		lnode_list[cur_cnt++] = sln;
771*a3667aaeSNaresh Kumar Inna 
772*a3667aaeSNaresh Kumar Inna 		/* Traverse children lnodes */
773*a3667aaeSNaresh Kumar Inna 		list_for_each(cur_cln, &sln->cln_head)
774*a3667aaeSNaresh Kumar Inna 			lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
775*a3667aaeSNaresh Kumar Inna 	}
776*a3667aaeSNaresh Kumar Inna 	spin_unlock_irq(&hw->lock);
777*a3667aaeSNaresh Kumar Inna 
778*a3667aaeSNaresh Kumar Inna 	for (ii = 0; ii < cur_cnt; ii++) {
779*a3667aaeSNaresh Kumar Inna 		csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
780*a3667aaeSNaresh Kumar Inna 		ln = lnode_list[ii];
781*a3667aaeSNaresh Kumar Inna 		shost = csio_ln_to_shost(ln);
782*a3667aaeSNaresh Kumar Inna 		scsi_unblock_requests(shost);
783*a3667aaeSNaresh Kumar Inna 	}
784*a3667aaeSNaresh Kumar Inna 	kfree(lnode_list);
785*a3667aaeSNaresh Kumar Inna }
786*a3667aaeSNaresh Kumar Inna 
787*a3667aaeSNaresh Kumar Inna void
788*a3667aaeSNaresh Kumar Inna csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid)
789*a3667aaeSNaresh Kumar Inna {
790*a3667aaeSNaresh Kumar Inna 	struct csio_lnode *ln;
791*a3667aaeSNaresh Kumar Inna 	struct Scsi_Host  *shost;
792*a3667aaeSNaresh Kumar Inna 	struct csio_lnode *sln;
793*a3667aaeSNaresh Kumar Inna 	struct list_head *cur_ln, *cur_cln;
794*a3667aaeSNaresh Kumar Inna 	struct csio_lnode **lnode_list;
795*a3667aaeSNaresh Kumar Inna 	int cur_cnt = 0, ii;
796*a3667aaeSNaresh Kumar Inna 
797*a3667aaeSNaresh Kumar Inna 	lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
798*a3667aaeSNaresh Kumar Inna 			GFP_KERNEL);
799*a3667aaeSNaresh Kumar Inna 	if (!lnode_list) {
800*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "Failed to allocate lnodes_list");
801*a3667aaeSNaresh Kumar Inna 		return;
802*a3667aaeSNaresh Kumar Inna 	}
803*a3667aaeSNaresh Kumar Inna 
804*a3667aaeSNaresh Kumar Inna 	spin_lock_irq(&hw->lock);
805*a3667aaeSNaresh Kumar Inna 	/* Traverse sibling lnodes */
806*a3667aaeSNaresh Kumar Inna 	list_for_each(cur_ln, &hw->sln_head) {
807*a3667aaeSNaresh Kumar Inna 		sln = (struct csio_lnode *) cur_ln;
808*a3667aaeSNaresh Kumar Inna 		if (sln->portid != portid)
809*a3667aaeSNaresh Kumar Inna 			continue;
810*a3667aaeSNaresh Kumar Inna 
811*a3667aaeSNaresh Kumar Inna 		lnode_list[cur_cnt++] = sln;
812*a3667aaeSNaresh Kumar Inna 
813*a3667aaeSNaresh Kumar Inna 		/* Traverse children lnodes */
814*a3667aaeSNaresh Kumar Inna 		list_for_each(cur_cln, &sln->cln_head)
815*a3667aaeSNaresh Kumar Inna 			lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
816*a3667aaeSNaresh Kumar Inna 	}
817*a3667aaeSNaresh Kumar Inna 	spin_unlock_irq(&hw->lock);
818*a3667aaeSNaresh Kumar Inna 
819*a3667aaeSNaresh Kumar Inna 	for (ii = 0; ii < cur_cnt; ii++) {
820*a3667aaeSNaresh Kumar Inna 		csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
821*a3667aaeSNaresh Kumar Inna 		ln = lnode_list[ii];
822*a3667aaeSNaresh Kumar Inna 		shost = csio_ln_to_shost(ln);
823*a3667aaeSNaresh Kumar Inna 		scsi_block_requests(shost);
824*a3667aaeSNaresh Kumar Inna 	}
825*a3667aaeSNaresh Kumar Inna 	kfree(lnode_list);
826*a3667aaeSNaresh Kumar Inna }
827*a3667aaeSNaresh Kumar Inna 
828*a3667aaeSNaresh Kumar Inna void
829*a3667aaeSNaresh Kumar Inna csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid)
830*a3667aaeSNaresh Kumar Inna {
831*a3667aaeSNaresh Kumar Inna 	struct csio_lnode *ln;
832*a3667aaeSNaresh Kumar Inna 	struct Scsi_Host  *shost;
833*a3667aaeSNaresh Kumar Inna 	struct csio_lnode *sln;
834*a3667aaeSNaresh Kumar Inna 	struct list_head *cur_ln, *cur_cln;
835*a3667aaeSNaresh Kumar Inna 	struct csio_lnode **lnode_list;
836*a3667aaeSNaresh Kumar Inna 	int cur_cnt = 0, ii;
837*a3667aaeSNaresh Kumar Inna 
838*a3667aaeSNaresh Kumar Inna 	lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
839*a3667aaeSNaresh Kumar Inna 			GFP_KERNEL);
840*a3667aaeSNaresh Kumar Inna 	if (!lnode_list) {
841*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "Failed to allocate lnodes_list");
842*a3667aaeSNaresh Kumar Inna 		return;
843*a3667aaeSNaresh Kumar Inna 	}
844*a3667aaeSNaresh Kumar Inna 
845*a3667aaeSNaresh Kumar Inna 	spin_lock_irq(&hw->lock);
846*a3667aaeSNaresh Kumar Inna 	/* Traverse sibling lnodes */
847*a3667aaeSNaresh Kumar Inna 	list_for_each(cur_ln, &hw->sln_head) {
848*a3667aaeSNaresh Kumar Inna 		sln = (struct csio_lnode *) cur_ln;
849*a3667aaeSNaresh Kumar Inna 		if (sln->portid != portid)
850*a3667aaeSNaresh Kumar Inna 			continue;
851*a3667aaeSNaresh Kumar Inna 		lnode_list[cur_cnt++] = sln;
852*a3667aaeSNaresh Kumar Inna 
853*a3667aaeSNaresh Kumar Inna 		/* Traverse children lnodes */
854*a3667aaeSNaresh Kumar Inna 		list_for_each(cur_cln, &sln->cln_head)
855*a3667aaeSNaresh Kumar Inna 			lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
856*a3667aaeSNaresh Kumar Inna 	}
857*a3667aaeSNaresh Kumar Inna 	spin_unlock_irq(&hw->lock);
858*a3667aaeSNaresh Kumar Inna 
859*a3667aaeSNaresh Kumar Inna 	for (ii = 0; ii < cur_cnt; ii++) {
860*a3667aaeSNaresh Kumar Inna 		csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
861*a3667aaeSNaresh Kumar Inna 		ln = lnode_list[ii];
862*a3667aaeSNaresh Kumar Inna 		shost = csio_ln_to_shost(ln);
863*a3667aaeSNaresh Kumar Inna 		scsi_unblock_requests(shost);
864*a3667aaeSNaresh Kumar Inna 	}
865*a3667aaeSNaresh Kumar Inna 	kfree(lnode_list);
866*a3667aaeSNaresh Kumar Inna }
867*a3667aaeSNaresh Kumar Inna 
868*a3667aaeSNaresh Kumar Inna void
869*a3667aaeSNaresh Kumar Inna csio_lnodes_exit(struct csio_hw *hw, bool npiv)
870*a3667aaeSNaresh Kumar Inna {
871*a3667aaeSNaresh Kumar Inna 	struct csio_lnode *sln;
872*a3667aaeSNaresh Kumar Inna 	struct csio_lnode *ln;
873*a3667aaeSNaresh Kumar Inna 	struct list_head *cur_ln, *cur_cln;
874*a3667aaeSNaresh Kumar Inna 	struct csio_lnode **lnode_list;
875*a3667aaeSNaresh Kumar Inna 	int cur_cnt = 0, ii;
876*a3667aaeSNaresh Kumar Inna 
877*a3667aaeSNaresh Kumar Inna 	lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
878*a3667aaeSNaresh Kumar Inna 			GFP_KERNEL);
879*a3667aaeSNaresh Kumar Inna 	if (!lnode_list) {
880*a3667aaeSNaresh Kumar Inna 		csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n");
881*a3667aaeSNaresh Kumar Inna 		return;
882*a3667aaeSNaresh Kumar Inna 	}
883*a3667aaeSNaresh Kumar Inna 
884*a3667aaeSNaresh Kumar Inna 	/* Get all child lnodes(NPIV ports) */
885*a3667aaeSNaresh Kumar Inna 	spin_lock_irq(&hw->lock);
886*a3667aaeSNaresh Kumar Inna 	list_for_each(cur_ln, &hw->sln_head) {
887*a3667aaeSNaresh Kumar Inna 		sln = (struct csio_lnode *) cur_ln;
888*a3667aaeSNaresh Kumar Inna 
889*a3667aaeSNaresh Kumar Inna 		/* Traverse children lnodes */
890*a3667aaeSNaresh Kumar Inna 		list_for_each(cur_cln, &sln->cln_head)
891*a3667aaeSNaresh Kumar Inna 			lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
892*a3667aaeSNaresh Kumar Inna 	}
893*a3667aaeSNaresh Kumar Inna 	spin_unlock_irq(&hw->lock);
894*a3667aaeSNaresh Kumar Inna 
895*a3667aaeSNaresh Kumar Inna 	/* Delete NPIV lnodes */
896*a3667aaeSNaresh Kumar Inna 	for (ii = 0; ii < cur_cnt; ii++) {
897*a3667aaeSNaresh Kumar Inna 		csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]);
898*a3667aaeSNaresh Kumar Inna 		ln = lnode_list[ii];
899*a3667aaeSNaresh Kumar Inna 		fc_vport_terminate(ln->fc_vport);
900*a3667aaeSNaresh Kumar Inna 	}
901*a3667aaeSNaresh Kumar Inna 
902*a3667aaeSNaresh Kumar Inna 	/* Delete only npiv lnodes */
903*a3667aaeSNaresh Kumar Inna 	if (npiv)
904*a3667aaeSNaresh Kumar Inna 		goto free_lnodes;
905*a3667aaeSNaresh Kumar Inna 
906*a3667aaeSNaresh Kumar Inna 	cur_cnt = 0;
907*a3667aaeSNaresh Kumar Inna 	/* Get all physical lnodes */
908*a3667aaeSNaresh Kumar Inna 	spin_lock_irq(&hw->lock);
909*a3667aaeSNaresh Kumar Inna 	/* Traverse sibling lnodes */
910*a3667aaeSNaresh Kumar Inna 	list_for_each(cur_ln, &hw->sln_head) {
911*a3667aaeSNaresh Kumar Inna 		sln = (struct csio_lnode *) cur_ln;
912*a3667aaeSNaresh Kumar Inna 		lnode_list[cur_cnt++] = sln;
913*a3667aaeSNaresh Kumar Inna 	}
914*a3667aaeSNaresh Kumar Inna 	spin_unlock_irq(&hw->lock);
915*a3667aaeSNaresh Kumar Inna 
916*a3667aaeSNaresh Kumar Inna 	/* Delete physical lnodes */
917*a3667aaeSNaresh Kumar Inna 	for (ii = 0; ii < cur_cnt; ii++) {
918*a3667aaeSNaresh Kumar Inna 		csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]);
919*a3667aaeSNaresh Kumar Inna 		csio_shost_exit(lnode_list[ii]);
920*a3667aaeSNaresh Kumar Inna 	}
921*a3667aaeSNaresh Kumar Inna 
922*a3667aaeSNaresh Kumar Inna free_lnodes:
923*a3667aaeSNaresh Kumar Inna 	kfree(lnode_list);
924*a3667aaeSNaresh Kumar Inna }
925*a3667aaeSNaresh Kumar Inna 
926*a3667aaeSNaresh Kumar Inna /*
927*a3667aaeSNaresh Kumar Inna  * csio_lnode_init_post: Set lnode attributes after starting HW.
928*a3667aaeSNaresh Kumar Inna  * @ln: lnode.
929*a3667aaeSNaresh Kumar Inna  *
930*a3667aaeSNaresh Kumar Inna  */
931*a3667aaeSNaresh Kumar Inna static void
932*a3667aaeSNaresh Kumar Inna csio_lnode_init_post(struct csio_lnode *ln)
933*a3667aaeSNaresh Kumar Inna {
934*a3667aaeSNaresh Kumar Inna 	struct Scsi_Host  *shost = csio_ln_to_shost(ln);
935*a3667aaeSNaresh Kumar Inna 
936*a3667aaeSNaresh Kumar Inna 	csio_fchost_attr_init(ln);
937*a3667aaeSNaresh Kumar Inna 
938*a3667aaeSNaresh Kumar Inna 	scsi_scan_host(shost);
939*a3667aaeSNaresh Kumar Inna }
940*a3667aaeSNaresh Kumar Inna 
941*a3667aaeSNaresh Kumar Inna /*
942*a3667aaeSNaresh Kumar Inna  * csio_probe_one - Instantiate this function.
943*a3667aaeSNaresh Kumar Inna  * @pdev: PCI device
944*a3667aaeSNaresh Kumar Inna  * @id: Device ID
945*a3667aaeSNaresh Kumar Inna  *
946*a3667aaeSNaresh Kumar Inna  * This is the .probe() callback of the driver. This function:
947*a3667aaeSNaresh Kumar Inna  * - Initializes the PCI function by enabling MMIO, setting bus
948*a3667aaeSNaresh Kumar Inna  *   mastership and setting DMA mask.
949*a3667aaeSNaresh Kumar Inna  * - Allocates HW structure, DMA, memory resources, maps BARS to
950*a3667aaeSNaresh Kumar Inna  *   host memory and initializes HW module.
951*a3667aaeSNaresh Kumar Inna  * - Allocates lnode structure via scsi_host_alloc, initializes
952*a3667aaeSNaresh Kumar Inna  *   shost, initialized lnode module and registers with SCSI ML
953*a3667aaeSNaresh Kumar Inna  *   via scsi_host_add.
954*a3667aaeSNaresh Kumar Inna  * - Enables interrupts, and starts the chip by kicking off the
955*a3667aaeSNaresh Kumar Inna  *   HW state machine.
956*a3667aaeSNaresh Kumar Inna  * - Once hardware is ready, initiated scan of the host via
957*a3667aaeSNaresh Kumar Inna  *   scsi_scan_host.
958*a3667aaeSNaresh Kumar Inna  */
959*a3667aaeSNaresh Kumar Inna static int __devinit
960*a3667aaeSNaresh Kumar Inna csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
961*a3667aaeSNaresh Kumar Inna {
962*a3667aaeSNaresh Kumar Inna 	int rv;
963*a3667aaeSNaresh Kumar Inna 	int bars;
964*a3667aaeSNaresh Kumar Inna 	int i;
965*a3667aaeSNaresh Kumar Inna 	struct csio_hw *hw;
966*a3667aaeSNaresh Kumar Inna 	struct csio_lnode *ln;
967*a3667aaeSNaresh Kumar Inna 
968*a3667aaeSNaresh Kumar Inna 	rv = csio_pci_init(pdev, &bars);
969*a3667aaeSNaresh Kumar Inna 	if (rv)
970*a3667aaeSNaresh Kumar Inna 		goto err;
971*a3667aaeSNaresh Kumar Inna 
972*a3667aaeSNaresh Kumar Inna 	hw = csio_hw_alloc(pdev);
973*a3667aaeSNaresh Kumar Inna 	if (!hw) {
974*a3667aaeSNaresh Kumar Inna 		rv = -ENODEV;
975*a3667aaeSNaresh Kumar Inna 		goto err_pci_exit;
976*a3667aaeSNaresh Kumar Inna 	}
977*a3667aaeSNaresh Kumar Inna 
978*a3667aaeSNaresh Kumar Inna 	pci_set_drvdata(pdev, hw);
979*a3667aaeSNaresh Kumar Inna 
980*a3667aaeSNaresh Kumar Inna 	if (csio_hw_start(hw) != 0) {
981*a3667aaeSNaresh Kumar Inna 		dev_err(&pdev->dev,
982*a3667aaeSNaresh Kumar Inna 			"Failed to start FW, continuing in debug mode.\n");
983*a3667aaeSNaresh Kumar Inna 		return 0;
984*a3667aaeSNaresh Kumar Inna 	}
985*a3667aaeSNaresh Kumar Inna 
986*a3667aaeSNaresh Kumar Inna 	sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
987*a3667aaeSNaresh Kumar Inna 		    FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
988*a3667aaeSNaresh Kumar Inna 		    FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
989*a3667aaeSNaresh Kumar Inna 		    FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
990*a3667aaeSNaresh Kumar Inna 		    FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
991*a3667aaeSNaresh Kumar Inna 
992*a3667aaeSNaresh Kumar Inna 	for (i = 0; i < hw->num_pports; i++) {
993*a3667aaeSNaresh Kumar Inna 		ln = csio_shost_init(hw, &pdev->dev, true, NULL);
994*a3667aaeSNaresh Kumar Inna 		if (!ln) {
995*a3667aaeSNaresh Kumar Inna 			rv = -ENODEV;
996*a3667aaeSNaresh Kumar Inna 			break;
997*a3667aaeSNaresh Kumar Inna 		}
998*a3667aaeSNaresh Kumar Inna 		/* Initialize portid */
999*a3667aaeSNaresh Kumar Inna 		ln->portid = hw->pport[i].portid;
1000*a3667aaeSNaresh Kumar Inna 
1001*a3667aaeSNaresh Kumar Inna 		spin_lock_irq(&hw->lock);
1002*a3667aaeSNaresh Kumar Inna 		if (csio_lnode_start(ln) != 0)
1003*a3667aaeSNaresh Kumar Inna 			rv = -ENODEV;
1004*a3667aaeSNaresh Kumar Inna 		spin_unlock_irq(&hw->lock);
1005*a3667aaeSNaresh Kumar Inna 
1006*a3667aaeSNaresh Kumar Inna 		if (rv)
1007*a3667aaeSNaresh Kumar Inna 			break;
1008*a3667aaeSNaresh Kumar Inna 
1009*a3667aaeSNaresh Kumar Inna 		csio_lnode_init_post(ln);
1010*a3667aaeSNaresh Kumar Inna 	}
1011*a3667aaeSNaresh Kumar Inna 
1012*a3667aaeSNaresh Kumar Inna 	if (rv)
1013*a3667aaeSNaresh Kumar Inna 		goto err_lnode_exit;
1014*a3667aaeSNaresh Kumar Inna 
1015*a3667aaeSNaresh Kumar Inna 	return 0;
1016*a3667aaeSNaresh Kumar Inna 
1017*a3667aaeSNaresh Kumar Inna err_lnode_exit:
1018*a3667aaeSNaresh Kumar Inna 	csio_lnodes_block_request(hw);
1019*a3667aaeSNaresh Kumar Inna 	spin_lock_irq(&hw->lock);
1020*a3667aaeSNaresh Kumar Inna 	csio_hw_stop(hw);
1021*a3667aaeSNaresh Kumar Inna 	spin_unlock_irq(&hw->lock);
1022*a3667aaeSNaresh Kumar Inna 	csio_lnodes_unblock_request(hw);
1023*a3667aaeSNaresh Kumar Inna 	pci_set_drvdata(hw->pdev, NULL);
1024*a3667aaeSNaresh Kumar Inna 	csio_lnodes_exit(hw, 0);
1025*a3667aaeSNaresh Kumar Inna 	csio_hw_free(hw);
1026*a3667aaeSNaresh Kumar Inna err_pci_exit:
1027*a3667aaeSNaresh Kumar Inna 	csio_pci_exit(pdev, &bars);
1028*a3667aaeSNaresh Kumar Inna err:
1029*a3667aaeSNaresh Kumar Inna 	dev_err(&pdev->dev, "probe of device failed: %d\n", rv);
1030*a3667aaeSNaresh Kumar Inna 	return rv;
1031*a3667aaeSNaresh Kumar Inna }
1032*a3667aaeSNaresh Kumar Inna 
1033*a3667aaeSNaresh Kumar Inna /*
1034*a3667aaeSNaresh Kumar Inna  * csio_remove_one - Remove one instance of the driver at this PCI function.
1035*a3667aaeSNaresh Kumar Inna  * @pdev: PCI device
1036*a3667aaeSNaresh Kumar Inna  *
1037*a3667aaeSNaresh Kumar Inna  * Used during hotplug operation.
1038*a3667aaeSNaresh Kumar Inna  */
1039*a3667aaeSNaresh Kumar Inna static void __devexit
1040*a3667aaeSNaresh Kumar Inna csio_remove_one(struct pci_dev *pdev)
1041*a3667aaeSNaresh Kumar Inna {
1042*a3667aaeSNaresh Kumar Inna 	struct csio_hw *hw = pci_get_drvdata(pdev);
1043*a3667aaeSNaresh Kumar Inna 	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
1044*a3667aaeSNaresh Kumar Inna 
1045*a3667aaeSNaresh Kumar Inna 	csio_lnodes_block_request(hw);
1046*a3667aaeSNaresh Kumar Inna 	spin_lock_irq(&hw->lock);
1047*a3667aaeSNaresh Kumar Inna 
1048*a3667aaeSNaresh Kumar Inna 	/* Stops lnode, Rnode s/m
1049*a3667aaeSNaresh Kumar Inna 	 * Quiesce IOs.
1050*a3667aaeSNaresh Kumar Inna 	 * All sessions with remote ports are unregistered.
1051*a3667aaeSNaresh Kumar Inna 	 */
1052*a3667aaeSNaresh Kumar Inna 	csio_hw_stop(hw);
1053*a3667aaeSNaresh Kumar Inna 	spin_unlock_irq(&hw->lock);
1054*a3667aaeSNaresh Kumar Inna 	csio_lnodes_unblock_request(hw);
1055*a3667aaeSNaresh Kumar Inna 
1056*a3667aaeSNaresh Kumar Inna 	csio_lnodes_exit(hw, 0);
1057*a3667aaeSNaresh Kumar Inna 	csio_hw_free(hw);
1058*a3667aaeSNaresh Kumar Inna 	pci_set_drvdata(pdev, NULL);
1059*a3667aaeSNaresh Kumar Inna 	csio_pci_exit(pdev, &bars);
1060*a3667aaeSNaresh Kumar Inna }
1061*a3667aaeSNaresh Kumar Inna 
1062*a3667aaeSNaresh Kumar Inna /*
1063*a3667aaeSNaresh Kumar Inna  * csio_pci_error_detected - PCI error was detected
1064*a3667aaeSNaresh Kumar Inna  * @pdev: PCI device
1065*a3667aaeSNaresh Kumar Inna  *
1066*a3667aaeSNaresh Kumar Inna  */
1067*a3667aaeSNaresh Kumar Inna static pci_ers_result_t
1068*a3667aaeSNaresh Kumar Inna csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
1069*a3667aaeSNaresh Kumar Inna {
1070*a3667aaeSNaresh Kumar Inna 	struct csio_hw *hw = pci_get_drvdata(pdev);
1071*a3667aaeSNaresh Kumar Inna 
1072*a3667aaeSNaresh Kumar Inna 	csio_lnodes_block_request(hw);
1073*a3667aaeSNaresh Kumar Inna 	spin_lock_irq(&hw->lock);
1074*a3667aaeSNaresh Kumar Inna 
1075*a3667aaeSNaresh Kumar Inna 	/* Post PCI error detected evt to HW s/m
1076*a3667aaeSNaresh Kumar Inna 	 * HW s/m handles this evt by quiescing IOs, unregisters rports
1077*a3667aaeSNaresh Kumar Inna 	 * and finally takes the device to offline.
1078*a3667aaeSNaresh Kumar Inna 	 */
1079*a3667aaeSNaresh Kumar Inna 	csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED);
1080*a3667aaeSNaresh Kumar Inna 	spin_unlock_irq(&hw->lock);
1081*a3667aaeSNaresh Kumar Inna 	csio_lnodes_unblock_request(hw);
1082*a3667aaeSNaresh Kumar Inna 	csio_lnodes_exit(hw, 0);
1083*a3667aaeSNaresh Kumar Inna 	csio_intr_disable(hw, true);
1084*a3667aaeSNaresh Kumar Inna 	pci_disable_device(pdev);
1085*a3667aaeSNaresh Kumar Inna 	return state == pci_channel_io_perm_failure ?
1086*a3667aaeSNaresh Kumar Inna 		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1087*a3667aaeSNaresh Kumar Inna }
1088*a3667aaeSNaresh Kumar Inna 
1089*a3667aaeSNaresh Kumar Inna /*
1090*a3667aaeSNaresh Kumar Inna  * csio_pci_slot_reset - PCI slot has been reset.
1091*a3667aaeSNaresh Kumar Inna  * @pdev: PCI device
1092*a3667aaeSNaresh Kumar Inna  *
1093*a3667aaeSNaresh Kumar Inna  */
1094*a3667aaeSNaresh Kumar Inna static pci_ers_result_t
1095*a3667aaeSNaresh Kumar Inna csio_pci_slot_reset(struct pci_dev *pdev)
1096*a3667aaeSNaresh Kumar Inna {
1097*a3667aaeSNaresh Kumar Inna 	struct csio_hw *hw = pci_get_drvdata(pdev);
1098*a3667aaeSNaresh Kumar Inna 	int ready;
1099*a3667aaeSNaresh Kumar Inna 
1100*a3667aaeSNaresh Kumar Inna 	if (pci_enable_device(pdev)) {
1101*a3667aaeSNaresh Kumar Inna 		dev_err(&pdev->dev, "cannot re-enable device in slot reset\n");
1102*a3667aaeSNaresh Kumar Inna 		return PCI_ERS_RESULT_DISCONNECT;
1103*a3667aaeSNaresh Kumar Inna 	}
1104*a3667aaeSNaresh Kumar Inna 
1105*a3667aaeSNaresh Kumar Inna 	pci_set_master(pdev);
1106*a3667aaeSNaresh Kumar Inna 	pci_restore_state(pdev);
1107*a3667aaeSNaresh Kumar Inna 	pci_save_state(pdev);
1108*a3667aaeSNaresh Kumar Inna 	pci_cleanup_aer_uncorrect_error_status(pdev);
1109*a3667aaeSNaresh Kumar Inna 
1110*a3667aaeSNaresh Kumar Inna 	/* Bring HW s/m to ready state.
1111*a3667aaeSNaresh Kumar Inna 	 * but don't resume IOs.
1112*a3667aaeSNaresh Kumar Inna 	 */
1113*a3667aaeSNaresh Kumar Inna 	spin_lock_irq(&hw->lock);
1114*a3667aaeSNaresh Kumar Inna 	csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET);
1115*a3667aaeSNaresh Kumar Inna 	ready = csio_is_hw_ready(hw);
1116*a3667aaeSNaresh Kumar Inna 	spin_unlock_irq(&hw->lock);
1117*a3667aaeSNaresh Kumar Inna 
1118*a3667aaeSNaresh Kumar Inna 	if (ready) {
1119*a3667aaeSNaresh Kumar Inna 		return PCI_ERS_RESULT_RECOVERED;
1120*a3667aaeSNaresh Kumar Inna 	} else {
1121*a3667aaeSNaresh Kumar Inna 		dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n");
1122*a3667aaeSNaresh Kumar Inna 		return PCI_ERS_RESULT_DISCONNECT;
1123*a3667aaeSNaresh Kumar Inna 	}
1124*a3667aaeSNaresh Kumar Inna }
1125*a3667aaeSNaresh Kumar Inna 
1126*a3667aaeSNaresh Kumar Inna /*
1127*a3667aaeSNaresh Kumar Inna  * csio_pci_resume - Resume normal operations
1128*a3667aaeSNaresh Kumar Inna  * @pdev: PCI device
1129*a3667aaeSNaresh Kumar Inna  *
1130*a3667aaeSNaresh Kumar Inna  */
1131*a3667aaeSNaresh Kumar Inna static void
1132*a3667aaeSNaresh Kumar Inna csio_pci_resume(struct pci_dev *pdev)
1133*a3667aaeSNaresh Kumar Inna {
1134*a3667aaeSNaresh Kumar Inna 	struct csio_hw *hw = pci_get_drvdata(pdev);
1135*a3667aaeSNaresh Kumar Inna 	struct csio_lnode *ln;
1136*a3667aaeSNaresh Kumar Inna 	int rv = 0;
1137*a3667aaeSNaresh Kumar Inna 	int i;
1138*a3667aaeSNaresh Kumar Inna 
1139*a3667aaeSNaresh Kumar Inna 	/* Bring the LINK UP and Resume IO */
1140*a3667aaeSNaresh Kumar Inna 
1141*a3667aaeSNaresh Kumar Inna 	for (i = 0; i < hw->num_pports; i++) {
1142*a3667aaeSNaresh Kumar Inna 		ln = csio_shost_init(hw, &pdev->dev, true, NULL);
1143*a3667aaeSNaresh Kumar Inna 		if (!ln) {
1144*a3667aaeSNaresh Kumar Inna 			rv = -ENODEV;
1145*a3667aaeSNaresh Kumar Inna 			break;
1146*a3667aaeSNaresh Kumar Inna 		}
1147*a3667aaeSNaresh Kumar Inna 		/* Initialize portid */
1148*a3667aaeSNaresh Kumar Inna 		ln->portid = hw->pport[i].portid;
1149*a3667aaeSNaresh Kumar Inna 
1150*a3667aaeSNaresh Kumar Inna 		spin_lock_irq(&hw->lock);
1151*a3667aaeSNaresh Kumar Inna 		if (csio_lnode_start(ln) != 0)
1152*a3667aaeSNaresh Kumar Inna 			rv = -ENODEV;
1153*a3667aaeSNaresh Kumar Inna 		spin_unlock_irq(&hw->lock);
1154*a3667aaeSNaresh Kumar Inna 
1155*a3667aaeSNaresh Kumar Inna 		if (rv)
1156*a3667aaeSNaresh Kumar Inna 			break;
1157*a3667aaeSNaresh Kumar Inna 
1158*a3667aaeSNaresh Kumar Inna 		csio_lnode_init_post(ln);
1159*a3667aaeSNaresh Kumar Inna 	}
1160*a3667aaeSNaresh Kumar Inna 
1161*a3667aaeSNaresh Kumar Inna 	if (rv)
1162*a3667aaeSNaresh Kumar Inna 		goto err_resume_exit;
1163*a3667aaeSNaresh Kumar Inna 
1164*a3667aaeSNaresh Kumar Inna 	return;
1165*a3667aaeSNaresh Kumar Inna 
1166*a3667aaeSNaresh Kumar Inna err_resume_exit:
1167*a3667aaeSNaresh Kumar Inna 	csio_lnodes_block_request(hw);
1168*a3667aaeSNaresh Kumar Inna 	spin_lock_irq(&hw->lock);
1169*a3667aaeSNaresh Kumar Inna 	csio_hw_stop(hw);
1170*a3667aaeSNaresh Kumar Inna 	spin_unlock_irq(&hw->lock);
1171*a3667aaeSNaresh Kumar Inna 	csio_lnodes_unblock_request(hw);
1172*a3667aaeSNaresh Kumar Inna 	csio_lnodes_exit(hw, 0);
1173*a3667aaeSNaresh Kumar Inna 	csio_hw_free(hw);
1174*a3667aaeSNaresh Kumar Inna 	dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
1175*a3667aaeSNaresh Kumar Inna }
1176*a3667aaeSNaresh Kumar Inna 
1177*a3667aaeSNaresh Kumar Inna static struct pci_error_handlers csio_err_handler = {
1178*a3667aaeSNaresh Kumar Inna 	.error_detected = csio_pci_error_detected,
1179*a3667aaeSNaresh Kumar Inna 	.slot_reset	= csio_pci_slot_reset,
1180*a3667aaeSNaresh Kumar Inna 	.resume		= csio_pci_resume,
1181*a3667aaeSNaresh Kumar Inna };
1182*a3667aaeSNaresh Kumar Inna 
1183*a3667aaeSNaresh Kumar Inna static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
1184*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0),	/* T440DBG FCOE */
1185*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0),		/* T420CR FCOE */
1186*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0),		/* T422CR FCOE */
1187*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0),		/* T440CR FCOE */
1188*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0),	/* T420BCH FCOE */
1189*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0),	/* T440BCH FCOE */
1190*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0),		/* T440CH FCOE */
1191*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0),		/* T420SO FCOE */
1192*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0),		/* T420CX FCOE */
1193*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0),		/* T420BT FCOE */
1194*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0),		/* T404BT FCOE */
1195*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0),		/* B420 FCOE */
1196*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0),		/* B404 FCOE */
1197*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0),		/* T480 CR FCOE */
1198*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0),	/* T440 LP-CR FCOE */
1199*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_PE10K, 0),		/* PE10K FCOE */
1200*a3667aaeSNaresh Kumar Inna 	CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0),	/* PE10K FCOE on PF1 */
1201*a3667aaeSNaresh Kumar Inna 	{ 0, 0, 0, 0, 0, 0, 0 }
1202*a3667aaeSNaresh Kumar Inna };
1203*a3667aaeSNaresh Kumar Inna 
1204*a3667aaeSNaresh Kumar Inna 
1205*a3667aaeSNaresh Kumar Inna static struct pci_driver csio_pci_driver = {
1206*a3667aaeSNaresh Kumar Inna 	.name		= KBUILD_MODNAME,
1207*a3667aaeSNaresh Kumar Inna 	.driver		= {
1208*a3667aaeSNaresh Kumar Inna 		.owner	= THIS_MODULE,
1209*a3667aaeSNaresh Kumar Inna 	},
1210*a3667aaeSNaresh Kumar Inna 	.id_table	= csio_pci_tbl,
1211*a3667aaeSNaresh Kumar Inna 	.probe		= csio_probe_one,
1212*a3667aaeSNaresh Kumar Inna 	.remove		= csio_remove_one,
1213*a3667aaeSNaresh Kumar Inna 	.err_handler	= &csio_err_handler,
1214*a3667aaeSNaresh Kumar Inna };
1215*a3667aaeSNaresh Kumar Inna 
1216*a3667aaeSNaresh Kumar Inna /*
1217*a3667aaeSNaresh Kumar Inna  * csio_init - Chelsio storage driver initialization function.
1218*a3667aaeSNaresh Kumar Inna  *
1219*a3667aaeSNaresh Kumar Inna  */
1220*a3667aaeSNaresh Kumar Inna static int __init
1221*a3667aaeSNaresh Kumar Inna csio_init(void)
1222*a3667aaeSNaresh Kumar Inna {
1223*a3667aaeSNaresh Kumar Inna 	int rv = -ENOMEM;
1224*a3667aaeSNaresh Kumar Inna 
1225*a3667aaeSNaresh Kumar Inna 	pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION);
1226*a3667aaeSNaresh Kumar Inna 
1227*a3667aaeSNaresh Kumar Inna 	csio_dfs_init();
1228*a3667aaeSNaresh Kumar Inna 
1229*a3667aaeSNaresh Kumar Inna 	csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs);
1230*a3667aaeSNaresh Kumar Inna 	if (!csio_fcoe_transport)
1231*a3667aaeSNaresh Kumar Inna 		goto err;
1232*a3667aaeSNaresh Kumar Inna 
1233*a3667aaeSNaresh Kumar Inna 	csio_fcoe_transport_vport =
1234*a3667aaeSNaresh Kumar Inna 			fc_attach_transport(&csio_fc_transport_vport_funcs);
1235*a3667aaeSNaresh Kumar Inna 	if (!csio_fcoe_transport_vport)
1236*a3667aaeSNaresh Kumar Inna 		goto err_vport;
1237*a3667aaeSNaresh Kumar Inna 
1238*a3667aaeSNaresh Kumar Inna 	rv = pci_register_driver(&csio_pci_driver);
1239*a3667aaeSNaresh Kumar Inna 	if (rv)
1240*a3667aaeSNaresh Kumar Inna 		goto err_pci;
1241*a3667aaeSNaresh Kumar Inna 
1242*a3667aaeSNaresh Kumar Inna 	return 0;
1243*a3667aaeSNaresh Kumar Inna 
1244*a3667aaeSNaresh Kumar Inna err_pci:
1245*a3667aaeSNaresh Kumar Inna 	fc_release_transport(csio_fcoe_transport_vport);
1246*a3667aaeSNaresh Kumar Inna err_vport:
1247*a3667aaeSNaresh Kumar Inna 	fc_release_transport(csio_fcoe_transport);
1248*a3667aaeSNaresh Kumar Inna err:
1249*a3667aaeSNaresh Kumar Inna 	csio_dfs_exit();
1250*a3667aaeSNaresh Kumar Inna 	return rv;
1251*a3667aaeSNaresh Kumar Inna }
1252*a3667aaeSNaresh Kumar Inna 
1253*a3667aaeSNaresh Kumar Inna /*
1254*a3667aaeSNaresh Kumar Inna  * csio_exit - Chelsio storage driver uninitialization .
1255*a3667aaeSNaresh Kumar Inna  *
1256*a3667aaeSNaresh Kumar Inna  * Function that gets called in the unload path.
1257*a3667aaeSNaresh Kumar Inna  */
1258*a3667aaeSNaresh Kumar Inna static void __exit
1259*a3667aaeSNaresh Kumar Inna csio_exit(void)
1260*a3667aaeSNaresh Kumar Inna {
1261*a3667aaeSNaresh Kumar Inna 	pci_unregister_driver(&csio_pci_driver);
1262*a3667aaeSNaresh Kumar Inna 	csio_dfs_exit();
1263*a3667aaeSNaresh Kumar Inna 	fc_release_transport(csio_fcoe_transport_vport);
1264*a3667aaeSNaresh Kumar Inna 	fc_release_transport(csio_fcoe_transport);
1265*a3667aaeSNaresh Kumar Inna }
1266*a3667aaeSNaresh Kumar Inna 
1267*a3667aaeSNaresh Kumar Inna module_init(csio_init);
1268*a3667aaeSNaresh Kumar Inna module_exit(csio_exit);
1269*a3667aaeSNaresh Kumar Inna MODULE_AUTHOR(CSIO_DRV_AUTHOR);
1270*a3667aaeSNaresh Kumar Inna MODULE_DESCRIPTION(CSIO_DRV_DESC);
1271*a3667aaeSNaresh Kumar Inna MODULE_LICENSE(CSIO_DRV_LICENSE);
1272*a3667aaeSNaresh Kumar Inna MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
1273*a3667aaeSNaresh Kumar Inna MODULE_VERSION(CSIO_DRV_VERSION);
1274*a3667aaeSNaresh Kumar Inna MODULE_FIRMWARE(CSIO_FW_FNAME);
1275