xref: /openbmc/linux/drivers/scsi/csiostor/csio_isr.c (revision c39f2d9db0fd81ea20bb5cce9b3f082ca63753e2)
1a3667aaeSNaresh Kumar Inna /*
2a3667aaeSNaresh Kumar Inna  * This file is part of the Chelsio FCoE driver for Linux.
3a3667aaeSNaresh Kumar Inna  *
4a3667aaeSNaresh Kumar Inna  * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5a3667aaeSNaresh Kumar Inna  *
6a3667aaeSNaresh Kumar Inna  * This software is available to you under a choice of one of two
7a3667aaeSNaresh Kumar Inna  * licenses.  You may choose to be licensed under the terms of the GNU
8a3667aaeSNaresh Kumar Inna  * General Public License (GPL) Version 2, available from the file
9a3667aaeSNaresh Kumar Inna  * COPYING in the main directory of this source tree, or the
10a3667aaeSNaresh Kumar Inna  * OpenIB.org BSD license below:
11a3667aaeSNaresh Kumar Inna  *
12a3667aaeSNaresh Kumar Inna  *     Redistribution and use in source and binary forms, with or
13a3667aaeSNaresh Kumar Inna  *     without modification, are permitted provided that the following
14a3667aaeSNaresh Kumar Inna  *     conditions are met:
15a3667aaeSNaresh Kumar Inna  *
16a3667aaeSNaresh Kumar Inna  *      - Redistributions of source code must retain the above
17a3667aaeSNaresh Kumar Inna  *        copyright notice, this list of conditions and the following
18a3667aaeSNaresh Kumar Inna  *        disclaimer.
19a3667aaeSNaresh Kumar Inna  *
20a3667aaeSNaresh Kumar Inna  *      - Redistributions in binary form must reproduce the above
21a3667aaeSNaresh Kumar Inna  *        copyright notice, this list of conditions and the following
22a3667aaeSNaresh Kumar Inna  *        disclaimer in the documentation and/or other materials
23a3667aaeSNaresh Kumar Inna  *        provided with the distribution.
24a3667aaeSNaresh Kumar Inna  *
25a3667aaeSNaresh Kumar Inna  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26a3667aaeSNaresh Kumar Inna  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27a3667aaeSNaresh Kumar Inna  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28a3667aaeSNaresh Kumar Inna  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29a3667aaeSNaresh Kumar Inna  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30a3667aaeSNaresh Kumar Inna  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31a3667aaeSNaresh Kumar Inna  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32a3667aaeSNaresh Kumar Inna  * SOFTWARE.
33a3667aaeSNaresh Kumar Inna  */
34a3667aaeSNaresh Kumar Inna 
35a3667aaeSNaresh Kumar Inna #include <linux/kernel.h>
36a3667aaeSNaresh Kumar Inna #include <linux/pci.h>
37a3667aaeSNaresh Kumar Inna #include <linux/interrupt.h>
38a3667aaeSNaresh Kumar Inna #include <linux/cpumask.h>
39a3667aaeSNaresh Kumar Inna #include <linux/string.h>
40a3667aaeSNaresh Kumar Inna 
41a3667aaeSNaresh Kumar Inna #include "csio_init.h"
42a3667aaeSNaresh Kumar Inna #include "csio_hw.h"
43a3667aaeSNaresh Kumar Inna 
44a3667aaeSNaresh Kumar Inna static irqreturn_t
csio_nondata_isr(int irq,void * dev_id)45a3667aaeSNaresh Kumar Inna csio_nondata_isr(int irq, void *dev_id)
46a3667aaeSNaresh Kumar Inna {
47a3667aaeSNaresh Kumar Inna 	struct csio_hw *hw = (struct csio_hw *) dev_id;
48a3667aaeSNaresh Kumar Inna 	int rv;
49a3667aaeSNaresh Kumar Inna 	unsigned long flags;
50a3667aaeSNaresh Kumar Inna 
51a3667aaeSNaresh Kumar Inna 	if (unlikely(!hw))
52a3667aaeSNaresh Kumar Inna 		return IRQ_NONE;
53a3667aaeSNaresh Kumar Inna 
54a3667aaeSNaresh Kumar Inna 	if (unlikely(pci_channel_offline(hw->pdev))) {
55a3667aaeSNaresh Kumar Inna 		CSIO_INC_STATS(hw, n_pcich_offline);
56a3667aaeSNaresh Kumar Inna 		return IRQ_NONE;
57a3667aaeSNaresh Kumar Inna 	}
58a3667aaeSNaresh Kumar Inna 
59a3667aaeSNaresh Kumar Inna 	spin_lock_irqsave(&hw->lock, flags);
60a3667aaeSNaresh Kumar Inna 	csio_hw_slow_intr_handler(hw);
61a3667aaeSNaresh Kumar Inna 	rv = csio_mb_isr_handler(hw);
62a3667aaeSNaresh Kumar Inna 
63a3667aaeSNaresh Kumar Inna 	if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
64a3667aaeSNaresh Kumar Inna 		hw->flags |= CSIO_HWF_FWEVT_PENDING;
65a3667aaeSNaresh Kumar Inna 		spin_unlock_irqrestore(&hw->lock, flags);
66a3667aaeSNaresh Kumar Inna 		schedule_work(&hw->evtq_work);
67a3667aaeSNaresh Kumar Inna 		return IRQ_HANDLED;
68a3667aaeSNaresh Kumar Inna 	}
69a3667aaeSNaresh Kumar Inna 	spin_unlock_irqrestore(&hw->lock, flags);
70a3667aaeSNaresh Kumar Inna 	return IRQ_HANDLED;
71a3667aaeSNaresh Kumar Inna }
72a3667aaeSNaresh Kumar Inna 
73a3667aaeSNaresh Kumar Inna /*
74a3667aaeSNaresh Kumar Inna  * csio_fwevt_handler - Common FW event handler routine.
75a3667aaeSNaresh Kumar Inna  * @hw: HW module.
76a3667aaeSNaresh Kumar Inna  *
77a3667aaeSNaresh Kumar Inna  * This is the ISR for FW events. It is shared b/w MSIX
78a3667aaeSNaresh Kumar Inna  * and INTx handlers.
79a3667aaeSNaresh Kumar Inna  */
80a3667aaeSNaresh Kumar Inna static void
csio_fwevt_handler(struct csio_hw * hw)81a3667aaeSNaresh Kumar Inna csio_fwevt_handler(struct csio_hw *hw)
82a3667aaeSNaresh Kumar Inna {
83a3667aaeSNaresh Kumar Inna 	int rv;
84a3667aaeSNaresh Kumar Inna 	unsigned long flags;
85a3667aaeSNaresh Kumar Inna 
86a3667aaeSNaresh Kumar Inna 	rv = csio_fwevtq_handler(hw);
87a3667aaeSNaresh Kumar Inna 
88a3667aaeSNaresh Kumar Inna 	spin_lock_irqsave(&hw->lock, flags);
89a3667aaeSNaresh Kumar Inna 	if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
90a3667aaeSNaresh Kumar Inna 		hw->flags |= CSIO_HWF_FWEVT_PENDING;
91a3667aaeSNaresh Kumar Inna 		spin_unlock_irqrestore(&hw->lock, flags);
92a3667aaeSNaresh Kumar Inna 		schedule_work(&hw->evtq_work);
93a3667aaeSNaresh Kumar Inna 		return;
94a3667aaeSNaresh Kumar Inna 	}
95a3667aaeSNaresh Kumar Inna 	spin_unlock_irqrestore(&hw->lock, flags);
96a3667aaeSNaresh Kumar Inna 
97a3667aaeSNaresh Kumar Inna } /* csio_fwevt_handler */
98a3667aaeSNaresh Kumar Inna 
99a3667aaeSNaresh Kumar Inna /*
100a3667aaeSNaresh Kumar Inna  * csio_fwevt_isr() - FW events MSIX ISR
101a3667aaeSNaresh Kumar Inna  * @irq:
102a3667aaeSNaresh Kumar Inna  * @dev_id:
103a3667aaeSNaresh Kumar Inna  *
104a3667aaeSNaresh Kumar Inna  * Process WRs on the FW event queue.
105a3667aaeSNaresh Kumar Inna  *
106a3667aaeSNaresh Kumar Inna  */
107a3667aaeSNaresh Kumar Inna static irqreturn_t
csio_fwevt_isr(int irq,void * dev_id)108a3667aaeSNaresh Kumar Inna csio_fwevt_isr(int irq, void *dev_id)
109a3667aaeSNaresh Kumar Inna {
110a3667aaeSNaresh Kumar Inna 	struct csio_hw *hw = (struct csio_hw *) dev_id;
111a3667aaeSNaresh Kumar Inna 
112a3667aaeSNaresh Kumar Inna 	if (unlikely(!hw))
113a3667aaeSNaresh Kumar Inna 		return IRQ_NONE;
114a3667aaeSNaresh Kumar Inna 
115a3667aaeSNaresh Kumar Inna 	if (unlikely(pci_channel_offline(hw->pdev))) {
116a3667aaeSNaresh Kumar Inna 		CSIO_INC_STATS(hw, n_pcich_offline);
117a3667aaeSNaresh Kumar Inna 		return IRQ_NONE;
118a3667aaeSNaresh Kumar Inna 	}
119a3667aaeSNaresh Kumar Inna 
120a3667aaeSNaresh Kumar Inna 	csio_fwevt_handler(hw);
121a3667aaeSNaresh Kumar Inna 
122a3667aaeSNaresh Kumar Inna 	return IRQ_HANDLED;
123a3667aaeSNaresh Kumar Inna }
124a3667aaeSNaresh Kumar Inna 
125a3667aaeSNaresh Kumar Inna /*
126a3667aaeSNaresh Kumar Inna  * csio_fwevt_isr() - INTx wrapper for handling FW events.
127a3667aaeSNaresh Kumar Inna  * @irq:
128a3667aaeSNaresh Kumar Inna  * @dev_id:
129a3667aaeSNaresh Kumar Inna  */
130a3667aaeSNaresh Kumar Inna void
csio_fwevt_intx_handler(struct csio_hw * hw,void * wr,uint32_t len,struct csio_fl_dma_buf * flb,void * priv)131a3667aaeSNaresh Kumar Inna csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
132a3667aaeSNaresh Kumar Inna 			   struct csio_fl_dma_buf *flb, void *priv)
133a3667aaeSNaresh Kumar Inna {
134a3667aaeSNaresh Kumar Inna 	csio_fwevt_handler(hw);
135a3667aaeSNaresh Kumar Inna } /* csio_fwevt_intx_handler */
136a3667aaeSNaresh Kumar Inna 
137a3667aaeSNaresh Kumar Inna /*
138a3667aaeSNaresh Kumar Inna  * csio_process_scsi_cmpl - Process a SCSI WR completion.
139a3667aaeSNaresh Kumar Inna  * @hw: HW module.
140a3667aaeSNaresh Kumar Inna  * @wr: The completed WR from the ingress queue.
141a3667aaeSNaresh Kumar Inna  * @len: Length of the WR.
142a3667aaeSNaresh Kumar Inna  * @flb: Freelist buffer array.
143a3667aaeSNaresh Kumar Inna  *
144a3667aaeSNaresh Kumar Inna  */
145a3667aaeSNaresh Kumar Inna static void
csio_process_scsi_cmpl(struct csio_hw * hw,void * wr,uint32_t len,struct csio_fl_dma_buf * flb,void * cbfn_q)146a3667aaeSNaresh Kumar Inna csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
147a3667aaeSNaresh Kumar Inna 			struct csio_fl_dma_buf *flb, void *cbfn_q)
148a3667aaeSNaresh Kumar Inna {
149a3667aaeSNaresh Kumar Inna 	struct csio_ioreq *ioreq;
150a3667aaeSNaresh Kumar Inna 	uint8_t *scsiwr;
151a3667aaeSNaresh Kumar Inna 	uint8_t subop;
152a3667aaeSNaresh Kumar Inna 	void *cmnd;
153a3667aaeSNaresh Kumar Inna 	unsigned long flags;
154a3667aaeSNaresh Kumar Inna 
155a3667aaeSNaresh Kumar Inna 	ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
156a3667aaeSNaresh Kumar Inna 	if (likely(ioreq)) {
157a3667aaeSNaresh Kumar Inna 		if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
158a3667aaeSNaresh Kumar Inna 			subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
159a3667aaeSNaresh Kumar Inna 					((struct fw_scsi_abrt_cls_wr *)
160a3667aaeSNaresh Kumar Inna 					    scsiwr)->sub_opcode_to_chk_all_io);
161a3667aaeSNaresh Kumar Inna 
162a3667aaeSNaresh Kumar Inna 			csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
163a3667aaeSNaresh Kumar Inna 				    subop ? "Close" : "Abort",
164a3667aaeSNaresh Kumar Inna 				    ioreq, ioreq->wr_status);
165a3667aaeSNaresh Kumar Inna 
166a3667aaeSNaresh Kumar Inna 			spin_lock_irqsave(&hw->lock, flags);
167a3667aaeSNaresh Kumar Inna 			if (subop)
168a3667aaeSNaresh Kumar Inna 				csio_scsi_closed(ioreq,
169a3667aaeSNaresh Kumar Inna 						 (struct list_head *)cbfn_q);
170a3667aaeSNaresh Kumar Inna 			else
171a3667aaeSNaresh Kumar Inna 				csio_scsi_aborted(ioreq,
172a3667aaeSNaresh Kumar Inna 						  (struct list_head *)cbfn_q);
173a3667aaeSNaresh Kumar Inna 			/*
174a3667aaeSNaresh Kumar Inna 			 * We call scsi_done for I/Os that driver thinks aborts
175a3667aaeSNaresh Kumar Inna 			 * have timed out. If there is a race caused by FW
176a3667aaeSNaresh Kumar Inna 			 * completing abort at the exact same time that the
177a3667aaeSNaresh Kumar Inna 			 * driver has deteced the abort timeout, the following
178a3667aaeSNaresh Kumar Inna 			 * check prevents calling of scsi_done twice for the
179a3667aaeSNaresh Kumar Inna 			 * same command: once from the eh_abort_handler, another
180a3667aaeSNaresh Kumar Inna 			 * from csio_scsi_isr_handler(). This also avoids the
181a3667aaeSNaresh Kumar Inna 			 * need to check if csio_scsi_cmnd(req) is NULL in the
182a3667aaeSNaresh Kumar Inna 			 * fast path.
183a3667aaeSNaresh Kumar Inna 			 */
184a3667aaeSNaresh Kumar Inna 			cmnd = csio_scsi_cmnd(ioreq);
185a3667aaeSNaresh Kumar Inna 			if (unlikely(cmnd == NULL))
186a3667aaeSNaresh Kumar Inna 				list_del_init(&ioreq->sm.sm_list);
187a3667aaeSNaresh Kumar Inna 
188a3667aaeSNaresh Kumar Inna 			spin_unlock_irqrestore(&hw->lock, flags);
189a3667aaeSNaresh Kumar Inna 
190a3667aaeSNaresh Kumar Inna 			if (unlikely(cmnd == NULL))
191a3667aaeSNaresh Kumar Inna 				csio_put_scsi_ioreq_lock(hw,
192a3667aaeSNaresh Kumar Inna 						csio_hw_to_scsim(hw), ioreq);
193a3667aaeSNaresh Kumar Inna 		} else {
194a3667aaeSNaresh Kumar Inna 			spin_lock_irqsave(&hw->lock, flags);
195a3667aaeSNaresh Kumar Inna 			csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
196a3667aaeSNaresh Kumar Inna 			spin_unlock_irqrestore(&hw->lock, flags);
197a3667aaeSNaresh Kumar Inna 		}
198a3667aaeSNaresh Kumar Inna 	}
199a3667aaeSNaresh Kumar Inna }
200a3667aaeSNaresh Kumar Inna 
201a3667aaeSNaresh Kumar Inna /*
202a3667aaeSNaresh Kumar Inna  * csio_scsi_isr_handler() - Common SCSI ISR handler.
203a3667aaeSNaresh Kumar Inna  * @iq: Ingress queue pointer.
204a3667aaeSNaresh Kumar Inna  *
205a3667aaeSNaresh Kumar Inna  * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
206a3667aaeSNaresh Kumar Inna  * by calling csio_wr_process_iq_idx. If there are completions on the
207a3667aaeSNaresh Kumar Inna  * isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
208a3667aaeSNaresh Kumar Inna  * Once done, add these completions onto the freelist.
209a3667aaeSNaresh Kumar Inna  * This routine is shared b/w MSIX and INTx.
210a3667aaeSNaresh Kumar Inna  */
211a3667aaeSNaresh Kumar Inna static inline irqreturn_t
csio_scsi_isr_handler(struct csio_q * iq)212a3667aaeSNaresh Kumar Inna csio_scsi_isr_handler(struct csio_q *iq)
213a3667aaeSNaresh Kumar Inna {
214a3667aaeSNaresh Kumar Inna 	struct csio_hw *hw = (struct csio_hw *)iq->owner;
215a3667aaeSNaresh Kumar Inna 	LIST_HEAD(cbfn_q);
216a3667aaeSNaresh Kumar Inna 	struct list_head *tmp;
217a3667aaeSNaresh Kumar Inna 	struct csio_scsim *scm;
218a3667aaeSNaresh Kumar Inna 	struct csio_ioreq *ioreq;
219a3667aaeSNaresh Kumar Inna 	int isr_completions = 0;
220a3667aaeSNaresh Kumar Inna 
221a3667aaeSNaresh Kumar Inna 	scm = csio_hw_to_scsim(hw);
222a3667aaeSNaresh Kumar Inna 
223a3667aaeSNaresh Kumar Inna 	if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
224a3667aaeSNaresh Kumar Inna 					&cbfn_q) != 0))
225a3667aaeSNaresh Kumar Inna 		return IRQ_NONE;
226a3667aaeSNaresh Kumar Inna 
227a3667aaeSNaresh Kumar Inna 	/* Call back the completion routines */
228a3667aaeSNaresh Kumar Inna 	list_for_each(tmp, &cbfn_q) {
229a3667aaeSNaresh Kumar Inna 		ioreq = (struct csio_ioreq *)tmp;
230a3667aaeSNaresh Kumar Inna 		isr_completions++;
231a3667aaeSNaresh Kumar Inna 		ioreq->io_cbfn(hw, ioreq);
232a3667aaeSNaresh Kumar Inna 		/* Release ddp buffer if used for this req */
233a3667aaeSNaresh Kumar Inna 		if (unlikely(ioreq->dcopy))
234a3667aaeSNaresh Kumar Inna 			csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
235a3667aaeSNaresh Kumar Inna 						    ioreq->nsge);
236a3667aaeSNaresh Kumar Inna 	}
237a3667aaeSNaresh Kumar Inna 
238a3667aaeSNaresh Kumar Inna 	if (isr_completions) {
239a3667aaeSNaresh Kumar Inna 		/* Return the ioreqs back to ioreq->freelist */
240a3667aaeSNaresh Kumar Inna 		csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
241a3667aaeSNaresh Kumar Inna 					      isr_completions);
242a3667aaeSNaresh Kumar Inna 	}
243a3667aaeSNaresh Kumar Inna 
244a3667aaeSNaresh Kumar Inna 	return IRQ_HANDLED;
245a3667aaeSNaresh Kumar Inna }
246a3667aaeSNaresh Kumar Inna 
247a3667aaeSNaresh Kumar Inna /*
248a3667aaeSNaresh Kumar Inna  * csio_scsi_isr() - SCSI MSIX handler
249a3667aaeSNaresh Kumar Inna  * @irq:
250a3667aaeSNaresh Kumar Inna  * @dev_id:
251a3667aaeSNaresh Kumar Inna  *
252a3667aaeSNaresh Kumar Inna  * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
253a3667aaeSNaresh Kumar Inna  * for handling SCSI completions.
254a3667aaeSNaresh Kumar Inna  */
255a3667aaeSNaresh Kumar Inna static irqreturn_t
csio_scsi_isr(int irq,void * dev_id)256a3667aaeSNaresh Kumar Inna csio_scsi_isr(int irq, void *dev_id)
257a3667aaeSNaresh Kumar Inna {
258a3667aaeSNaresh Kumar Inna 	struct csio_q *iq = (struct csio_q *) dev_id;
259a3667aaeSNaresh Kumar Inna 	struct csio_hw *hw;
260a3667aaeSNaresh Kumar Inna 
261a3667aaeSNaresh Kumar Inna 	if (unlikely(!iq))
262a3667aaeSNaresh Kumar Inna 		return IRQ_NONE;
263a3667aaeSNaresh Kumar Inna 
264a3667aaeSNaresh Kumar Inna 	hw = (struct csio_hw *)iq->owner;
265a3667aaeSNaresh Kumar Inna 
266a3667aaeSNaresh Kumar Inna 	if (unlikely(pci_channel_offline(hw->pdev))) {
267a3667aaeSNaresh Kumar Inna 		CSIO_INC_STATS(hw, n_pcich_offline);
268a3667aaeSNaresh Kumar Inna 		return IRQ_NONE;
269a3667aaeSNaresh Kumar Inna 	}
270a3667aaeSNaresh Kumar Inna 
271a3667aaeSNaresh Kumar Inna 	csio_scsi_isr_handler(iq);
272a3667aaeSNaresh Kumar Inna 
273a3667aaeSNaresh Kumar Inna 	return IRQ_HANDLED;
274a3667aaeSNaresh Kumar Inna }
275a3667aaeSNaresh Kumar Inna 
276a3667aaeSNaresh Kumar Inna /*
277a3667aaeSNaresh Kumar Inna  * csio_scsi_intx_handler() - SCSI INTx handler
278a3667aaeSNaresh Kumar Inna  * @irq:
279a3667aaeSNaresh Kumar Inna  * @dev_id:
280a3667aaeSNaresh Kumar Inna  *
281a3667aaeSNaresh Kumar Inna  * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
282a3667aaeSNaresh Kumar Inna  * for handling SCSI completions.
283a3667aaeSNaresh Kumar Inna  */
284a3667aaeSNaresh Kumar Inna void
csio_scsi_intx_handler(struct csio_hw * hw,void * wr,uint32_t len,struct csio_fl_dma_buf * flb,void * priv)285a3667aaeSNaresh Kumar Inna csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
286a3667aaeSNaresh Kumar Inna 			struct csio_fl_dma_buf *flb, void *priv)
287a3667aaeSNaresh Kumar Inna {
288a3667aaeSNaresh Kumar Inna 	struct csio_q *iq = priv;
289a3667aaeSNaresh Kumar Inna 
290a3667aaeSNaresh Kumar Inna 	csio_scsi_isr_handler(iq);
291a3667aaeSNaresh Kumar Inna 
292a3667aaeSNaresh Kumar Inna } /* csio_scsi_intx_handler */
293a3667aaeSNaresh Kumar Inna 
294a3667aaeSNaresh Kumar Inna /*
295a3667aaeSNaresh Kumar Inna  * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
296a3667aaeSNaresh Kumar Inna  * @irq:
297a3667aaeSNaresh Kumar Inna  * @dev_id:
298a3667aaeSNaresh Kumar Inna  *
299a3667aaeSNaresh Kumar Inna  *
300a3667aaeSNaresh Kumar Inna  */
301a3667aaeSNaresh Kumar Inna static irqreturn_t
csio_fcoe_isr(int irq,void * dev_id)302a3667aaeSNaresh Kumar Inna csio_fcoe_isr(int irq, void *dev_id)
303a3667aaeSNaresh Kumar Inna {
304a3667aaeSNaresh Kumar Inna 	struct csio_hw *hw = (struct csio_hw *) dev_id;
305a3667aaeSNaresh Kumar Inna 	struct csio_q *intx_q = NULL;
306a3667aaeSNaresh Kumar Inna 	int rv;
307a3667aaeSNaresh Kumar Inna 	irqreturn_t ret = IRQ_NONE;
308a3667aaeSNaresh Kumar Inna 	unsigned long flags;
309a3667aaeSNaresh Kumar Inna 
310a3667aaeSNaresh Kumar Inna 	if (unlikely(!hw))
311a3667aaeSNaresh Kumar Inna 		return IRQ_NONE;
312a3667aaeSNaresh Kumar Inna 
313a3667aaeSNaresh Kumar Inna 	if (unlikely(pci_channel_offline(hw->pdev))) {
314a3667aaeSNaresh Kumar Inna 		CSIO_INC_STATS(hw, n_pcich_offline);
315a3667aaeSNaresh Kumar Inna 		return IRQ_NONE;
316a3667aaeSNaresh Kumar Inna 	}
317a3667aaeSNaresh Kumar Inna 
318a3667aaeSNaresh Kumar Inna 	/* Disable the interrupt for this PCI function. */
319a3667aaeSNaresh Kumar Inna 	if (hw->intr_mode == CSIO_IM_INTX)
320f061de42SHariprasad Shenai 		csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
321a3667aaeSNaresh Kumar Inna 
322a3667aaeSNaresh Kumar Inna 	/*
323a3667aaeSNaresh Kumar Inna 	 * The read in the following function will flush the
324a3667aaeSNaresh Kumar Inna 	 * above write.
325a3667aaeSNaresh Kumar Inna 	 */
326a3667aaeSNaresh Kumar Inna 	if (csio_hw_slow_intr_handler(hw))
327a3667aaeSNaresh Kumar Inna 		ret = IRQ_HANDLED;
328a3667aaeSNaresh Kumar Inna 
329a3667aaeSNaresh Kumar Inna 	/* Get the INTx Forward interrupt IQ. */
330a3667aaeSNaresh Kumar Inna 	intx_q = csio_get_q(hw, hw->intr_iq_idx);
331a3667aaeSNaresh Kumar Inna 
332a3667aaeSNaresh Kumar Inna 	CSIO_DB_ASSERT(intx_q);
333a3667aaeSNaresh Kumar Inna 
334a3667aaeSNaresh Kumar Inna 	/* IQ handler is not possible for intx_q, hence pass in NULL */
335a3667aaeSNaresh Kumar Inna 	if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
336a3667aaeSNaresh Kumar Inna 		ret = IRQ_HANDLED;
337a3667aaeSNaresh Kumar Inna 
338a3667aaeSNaresh Kumar Inna 	spin_lock_irqsave(&hw->lock, flags);
339a3667aaeSNaresh Kumar Inna 	rv = csio_mb_isr_handler(hw);
340a3667aaeSNaresh Kumar Inna 	if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
341a3667aaeSNaresh Kumar Inna 		hw->flags |= CSIO_HWF_FWEVT_PENDING;
342a3667aaeSNaresh Kumar Inna 		spin_unlock_irqrestore(&hw->lock, flags);
343a3667aaeSNaresh Kumar Inna 		schedule_work(&hw->evtq_work);
344a3667aaeSNaresh Kumar Inna 		return IRQ_HANDLED;
345a3667aaeSNaresh Kumar Inna 	}
346a3667aaeSNaresh Kumar Inna 	spin_unlock_irqrestore(&hw->lock, flags);
347a3667aaeSNaresh Kumar Inna 
348a3667aaeSNaresh Kumar Inna 	return ret;
349a3667aaeSNaresh Kumar Inna }
350a3667aaeSNaresh Kumar Inna 
351a3667aaeSNaresh Kumar Inna static void
csio_add_msix_desc(struct csio_hw * hw)352a3667aaeSNaresh Kumar Inna csio_add_msix_desc(struct csio_hw *hw)
353a3667aaeSNaresh Kumar Inna {
354a3667aaeSNaresh Kumar Inna 	int i;
355a3667aaeSNaresh Kumar Inna 	struct csio_msix_entries *entryp = &hw->msix_entries[0];
356a3667aaeSNaresh Kumar Inna 	int k = CSIO_EXTRA_VECS;
357a3667aaeSNaresh Kumar Inna 	int len = sizeof(entryp->desc) - 1;
358a3667aaeSNaresh Kumar Inna 	int cnt = hw->num_sqsets + k;
359a3667aaeSNaresh Kumar Inna 
360a3667aaeSNaresh Kumar Inna 	/* Non-data vector */
361a3667aaeSNaresh Kumar Inna 	memset(entryp->desc, 0, len + 1);
362a3667aaeSNaresh Kumar Inna 	snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
363a3667aaeSNaresh Kumar Inna 		 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
364a3667aaeSNaresh Kumar Inna 
365a3667aaeSNaresh Kumar Inna 	entryp++;
366a3667aaeSNaresh Kumar Inna 	memset(entryp->desc, 0, len + 1);
367a3667aaeSNaresh Kumar Inna 	snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
368a3667aaeSNaresh Kumar Inna 		 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
369a3667aaeSNaresh Kumar Inna 	entryp++;
370a3667aaeSNaresh Kumar Inna 
371a3667aaeSNaresh Kumar Inna 	/* Name SCSI vecs */
372a3667aaeSNaresh Kumar Inna 	for (i = k; i < cnt; i++, entryp++) {
373a3667aaeSNaresh Kumar Inna 		memset(entryp->desc, 0, len + 1);
374a3667aaeSNaresh Kumar Inna 		snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
375a3667aaeSNaresh Kumar Inna 			 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
376a3667aaeSNaresh Kumar Inna 			 CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
377a3667aaeSNaresh Kumar Inna 	}
378a3667aaeSNaresh Kumar Inna }
379a3667aaeSNaresh Kumar Inna 
380a3667aaeSNaresh Kumar Inna int
csio_request_irqs(struct csio_hw * hw)381a3667aaeSNaresh Kumar Inna csio_request_irqs(struct csio_hw *hw)
382a3667aaeSNaresh Kumar Inna {
383a3667aaeSNaresh Kumar Inna 	int rv, i, j, k = 0;
384a3667aaeSNaresh Kumar Inna 	struct csio_msix_entries *entryp = &hw->msix_entries[0];
385a3667aaeSNaresh Kumar Inna 	struct csio_scsi_cpu_info *info;
386104d9c7fSChristoph Hellwig 	struct pci_dev *pdev = hw->pdev;
387a3667aaeSNaresh Kumar Inna 
388a3667aaeSNaresh Kumar Inna 	if (hw->intr_mode != CSIO_IM_MSIX) {
389104d9c7fSChristoph Hellwig 		rv = request_irq(pci_irq_vector(pdev, 0), csio_fcoe_isr,
390104d9c7fSChristoph Hellwig 				hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED,
391a3667aaeSNaresh Kumar Inna 				KBUILD_MODNAME, hw);
392a3667aaeSNaresh Kumar Inna 		if (rv) {
393a3667aaeSNaresh Kumar Inna 			csio_err(hw, "Failed to allocate interrupt line.\n");
394104d9c7fSChristoph Hellwig 			goto out_free_irqs;
395a3667aaeSNaresh Kumar Inna 		}
396a3667aaeSNaresh Kumar Inna 
397a3667aaeSNaresh Kumar Inna 		goto out;
398a3667aaeSNaresh Kumar Inna 	}
399a3667aaeSNaresh Kumar Inna 
400a3667aaeSNaresh Kumar Inna 	/* Add the MSIX vector descriptions */
401a3667aaeSNaresh Kumar Inna 	csio_add_msix_desc(hw);
402a3667aaeSNaresh Kumar Inna 
403104d9c7fSChristoph Hellwig 	rv = request_irq(pci_irq_vector(pdev, k), csio_nondata_isr, 0,
404a3667aaeSNaresh Kumar Inna 			 entryp[k].desc, hw);
405a3667aaeSNaresh Kumar Inna 	if (rv) {
406a3667aaeSNaresh Kumar Inna 		csio_err(hw, "IRQ request failed for vec %d err:%d\n",
407104d9c7fSChristoph Hellwig 			 pci_irq_vector(pdev, k), rv);
408104d9c7fSChristoph Hellwig 		goto out_free_irqs;
409a3667aaeSNaresh Kumar Inna 	}
410a3667aaeSNaresh Kumar Inna 
411104d9c7fSChristoph Hellwig 	entryp[k++].dev_id = hw;
412a3667aaeSNaresh Kumar Inna 
413104d9c7fSChristoph Hellwig 	rv = request_irq(pci_irq_vector(pdev, k), csio_fwevt_isr, 0,
414a3667aaeSNaresh Kumar Inna 			 entryp[k].desc, hw);
415a3667aaeSNaresh Kumar Inna 	if (rv) {
416a3667aaeSNaresh Kumar Inna 		csio_err(hw, "IRQ request failed for vec %d err:%d\n",
417104d9c7fSChristoph Hellwig 			 pci_irq_vector(pdev, k), rv);
418104d9c7fSChristoph Hellwig 		goto out_free_irqs;
419a3667aaeSNaresh Kumar Inna 	}
420a3667aaeSNaresh Kumar Inna 
421a3667aaeSNaresh Kumar Inna 	entryp[k++].dev_id = (void *)hw;
422a3667aaeSNaresh Kumar Inna 
423a3667aaeSNaresh Kumar Inna 	/* Allocate IRQs for SCSI */
424a3667aaeSNaresh Kumar Inna 	for (i = 0; i < hw->num_pports; i++) {
425a3667aaeSNaresh Kumar Inna 		info = &hw->scsi_cpu_info[i];
426a3667aaeSNaresh Kumar Inna 		for (j = 0; j < info->max_cpus; j++, k++) {
427a3667aaeSNaresh Kumar Inna 			struct csio_scsi_qset *sqset = &hw->sqset[i][j];
428a3667aaeSNaresh Kumar Inna 			struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
429a3667aaeSNaresh Kumar Inna 
430104d9c7fSChristoph Hellwig 			rv = request_irq(pci_irq_vector(pdev, k), csio_scsi_isr, 0,
431a3667aaeSNaresh Kumar Inna 					 entryp[k].desc, q);
432a3667aaeSNaresh Kumar Inna 			if (rv) {
433a3667aaeSNaresh Kumar Inna 				csio_err(hw,
434a3667aaeSNaresh Kumar Inna 				       "IRQ request failed for vec %d err:%d\n",
435104d9c7fSChristoph Hellwig 				       pci_irq_vector(pdev, k), rv);
436104d9c7fSChristoph Hellwig 				goto out_free_irqs;
437a3667aaeSNaresh Kumar Inna 			}
438a3667aaeSNaresh Kumar Inna 
439104d9c7fSChristoph Hellwig 			entryp[k].dev_id = q;
440a3667aaeSNaresh Kumar Inna 
441a3667aaeSNaresh Kumar Inna 		} /* for all scsi cpus */
442a3667aaeSNaresh Kumar Inna 	} /* for all ports */
443a3667aaeSNaresh Kumar Inna 
444a3667aaeSNaresh Kumar Inna out:
445a3667aaeSNaresh Kumar Inna 	hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
446a3667aaeSNaresh Kumar Inna 	return 0;
447a3667aaeSNaresh Kumar Inna 
448104d9c7fSChristoph Hellwig out_free_irqs:
449104d9c7fSChristoph Hellwig 	for (i = 0; i < k; i++)
450104d9c7fSChristoph Hellwig 		free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id);
451104d9c7fSChristoph Hellwig 	pci_free_irq_vectors(hw->pdev);
452a3667aaeSNaresh Kumar Inna 	return -EINVAL;
453a3667aaeSNaresh Kumar Inna }
454a3667aaeSNaresh Kumar Inna 
455a3667aaeSNaresh Kumar Inna /* Reduce per-port max possible CPUs */
456a3667aaeSNaresh Kumar Inna static void
csio_reduce_sqsets(struct csio_hw * hw,int cnt)457a3667aaeSNaresh Kumar Inna csio_reduce_sqsets(struct csio_hw *hw, int cnt)
458a3667aaeSNaresh Kumar Inna {
459a3667aaeSNaresh Kumar Inna 	int i;
460a3667aaeSNaresh Kumar Inna 	struct csio_scsi_cpu_info *info;
461a3667aaeSNaresh Kumar Inna 
462a3667aaeSNaresh Kumar Inna 	while (cnt < hw->num_sqsets) {
463a3667aaeSNaresh Kumar Inna 		for (i = 0; i < hw->num_pports; i++) {
464a3667aaeSNaresh Kumar Inna 			info = &hw->scsi_cpu_info[i];
465a3667aaeSNaresh Kumar Inna 			if (info->max_cpus > 1) {
466a3667aaeSNaresh Kumar Inna 				info->max_cpus--;
467a3667aaeSNaresh Kumar Inna 				hw->num_sqsets--;
468a3667aaeSNaresh Kumar Inna 				if (hw->num_sqsets <= cnt)
469a3667aaeSNaresh Kumar Inna 					break;
470a3667aaeSNaresh Kumar Inna 			}
471a3667aaeSNaresh Kumar Inna 		}
472a3667aaeSNaresh Kumar Inna 	}
473a3667aaeSNaresh Kumar Inna 
474a3667aaeSNaresh Kumar Inna 	csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
475a3667aaeSNaresh Kumar Inna }
476a3667aaeSNaresh Kumar Inna 
csio_calc_sets(struct irq_affinity * affd,unsigned int nvecs)477*e95a5e4bSVarun Prakash static void csio_calc_sets(struct irq_affinity *affd, unsigned int nvecs)
478*e95a5e4bSVarun Prakash {
479*e95a5e4bSVarun Prakash 	struct csio_hw *hw = affd->priv;
480*e95a5e4bSVarun Prakash 	u8 i;
481*e95a5e4bSVarun Prakash 
482*e95a5e4bSVarun Prakash 	if (!nvecs)
483*e95a5e4bSVarun Prakash 		return;
484*e95a5e4bSVarun Prakash 
485*e95a5e4bSVarun Prakash 	if (nvecs < hw->num_pports) {
486*e95a5e4bSVarun Prakash 		affd->nr_sets = 1;
487*e95a5e4bSVarun Prakash 		affd->set_size[0] = nvecs;
488*e95a5e4bSVarun Prakash 		return;
489*e95a5e4bSVarun Prakash 	}
490*e95a5e4bSVarun Prakash 
491*e95a5e4bSVarun Prakash 	affd->nr_sets = hw->num_pports;
492*e95a5e4bSVarun Prakash 	for (i = 0; i < hw->num_pports; i++)
493*e95a5e4bSVarun Prakash 		affd->set_size[i] = nvecs / hw->num_pports;
494*e95a5e4bSVarun Prakash }
495*e95a5e4bSVarun Prakash 
496a3667aaeSNaresh Kumar Inna static int
csio_enable_msix(struct csio_hw * hw)497a3667aaeSNaresh Kumar Inna csio_enable_msix(struct csio_hw *hw)
498a3667aaeSNaresh Kumar Inna {
4996b733521SAlexander Gordeev 	int i, j, k, n, min, cnt;
500a3667aaeSNaresh Kumar Inna 	int extra = CSIO_EXTRA_VECS;
501a3667aaeSNaresh Kumar Inna 	struct csio_scsi_cpu_info *info;
502*e95a5e4bSVarun Prakash 	struct irq_affinity desc = {
503*e95a5e4bSVarun Prakash 		.pre_vectors = CSIO_EXTRA_VECS,
504*e95a5e4bSVarun Prakash 		.calc_sets = csio_calc_sets,
505*e95a5e4bSVarun Prakash 		.priv = hw,
506*e95a5e4bSVarun Prakash 	};
507*e95a5e4bSVarun Prakash 
508*e95a5e4bSVarun Prakash 	if (hw->num_pports > IRQ_AFFINITY_MAX_SETS)
509*e95a5e4bSVarun Prakash 		return -ENOSPC;
510a3667aaeSNaresh Kumar Inna 
511a3667aaeSNaresh Kumar Inna 	min = hw->num_pports + extra;
512a3667aaeSNaresh Kumar Inna 	cnt = hw->num_sqsets + extra;
513a3667aaeSNaresh Kumar Inna 
514a3667aaeSNaresh Kumar Inna 	/* Max vectors required based on #niqs configured in fw */
515a3667aaeSNaresh Kumar Inna 	if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
516a3667aaeSNaresh Kumar Inna 		cnt = min_t(uint8_t, hw->cfg_niq, cnt);
517a3667aaeSNaresh Kumar Inna 
518a3667aaeSNaresh Kumar Inna 	csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
519a3667aaeSNaresh Kumar Inna 
520104d9c7fSChristoph Hellwig 	cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt,
521104d9c7fSChristoph Hellwig 			PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
522104d9c7fSChristoph Hellwig 	if (cnt < 0)
5236b733521SAlexander Gordeev 		return cnt;
5246b733521SAlexander Gordeev 
525a3667aaeSNaresh Kumar Inna 	if (cnt < (hw->num_sqsets + extra)) {
526a3667aaeSNaresh Kumar Inna 		csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
527a3667aaeSNaresh Kumar Inna 		csio_reduce_sqsets(hw, cnt - extra);
528a3667aaeSNaresh Kumar Inna 	}
529a3667aaeSNaresh Kumar Inna 
530a3667aaeSNaresh Kumar Inna 	/* Distribute vectors */
531a3667aaeSNaresh Kumar Inna 	k = 0;
532104d9c7fSChristoph Hellwig 	csio_set_nondata_intr_idx(hw, k);
533104d9c7fSChristoph Hellwig 	csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++);
534104d9c7fSChristoph Hellwig 	csio_set_fwevt_intr_idx(hw, k++);
535a3667aaeSNaresh Kumar Inna 
536a3667aaeSNaresh Kumar Inna 	for (i = 0; i < hw->num_pports; i++) {
537a3667aaeSNaresh Kumar Inna 		info = &hw->scsi_cpu_info[i];
538a3667aaeSNaresh Kumar Inna 
539a3667aaeSNaresh Kumar Inna 		for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
540a3667aaeSNaresh Kumar Inna 			n = (j % info->max_cpus) +  k;
541104d9c7fSChristoph Hellwig 			hw->sqset[i][j].intr_idx = n;
542a3667aaeSNaresh Kumar Inna 		}
543a3667aaeSNaresh Kumar Inna 
544a3667aaeSNaresh Kumar Inna 		k += info->max_cpus;
545a3667aaeSNaresh Kumar Inna 	}
546a3667aaeSNaresh Kumar Inna 
547a3667aaeSNaresh Kumar Inna 	return 0;
548a3667aaeSNaresh Kumar Inna }
549a3667aaeSNaresh Kumar Inna 
550a3667aaeSNaresh Kumar Inna void
csio_intr_enable(struct csio_hw * hw)551a3667aaeSNaresh Kumar Inna csio_intr_enable(struct csio_hw *hw)
552a3667aaeSNaresh Kumar Inna {
553a3667aaeSNaresh Kumar Inna 	hw->intr_mode = CSIO_IM_NONE;
554a3667aaeSNaresh Kumar Inna 	hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
555a3667aaeSNaresh Kumar Inna 
556a3667aaeSNaresh Kumar Inna 	/* Try MSIX, then MSI or fall back to INTx */
557a3667aaeSNaresh Kumar Inna 	if ((csio_msi == 2) && !csio_enable_msix(hw))
558a3667aaeSNaresh Kumar Inna 		hw->intr_mode = CSIO_IM_MSIX;
559a3667aaeSNaresh Kumar Inna 	else {
560a3667aaeSNaresh Kumar Inna 		/* Max iqs required based on #niqs configured in fw */
561a3667aaeSNaresh Kumar Inna 		if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
562a3667aaeSNaresh Kumar Inna 			!csio_is_hw_master(hw)) {
563a3667aaeSNaresh Kumar Inna 			int extra = CSIO_EXTRA_MSI_IQS;
564a3667aaeSNaresh Kumar Inna 
565a3667aaeSNaresh Kumar Inna 			if (hw->cfg_niq < (hw->num_sqsets + extra)) {
566a3667aaeSNaresh Kumar Inna 				csio_dbg(hw, "Reducing sqsets to %d\n",
567a3667aaeSNaresh Kumar Inna 					 hw->cfg_niq - extra);
568a3667aaeSNaresh Kumar Inna 				csio_reduce_sqsets(hw, hw->cfg_niq - extra);
569a3667aaeSNaresh Kumar Inna 			}
570a3667aaeSNaresh Kumar Inna 		}
571a3667aaeSNaresh Kumar Inna 
572a3667aaeSNaresh Kumar Inna 		if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
573a3667aaeSNaresh Kumar Inna 			hw->intr_mode = CSIO_IM_MSI;
574a3667aaeSNaresh Kumar Inna 		else
575a3667aaeSNaresh Kumar Inna 			hw->intr_mode = CSIO_IM_INTX;
576a3667aaeSNaresh Kumar Inna 	}
577a3667aaeSNaresh Kumar Inna 
578a3667aaeSNaresh Kumar Inna 	csio_dbg(hw, "Using %s interrupt mode.\n",
579a3667aaeSNaresh Kumar Inna 		(hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
580a3667aaeSNaresh Kumar Inna 		((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
581a3667aaeSNaresh Kumar Inna }
582a3667aaeSNaresh Kumar Inna 
583a3667aaeSNaresh Kumar Inna void
csio_intr_disable(struct csio_hw * hw,bool free)584a3667aaeSNaresh Kumar Inna csio_intr_disable(struct csio_hw *hw, bool free)
585a3667aaeSNaresh Kumar Inna {
586a3667aaeSNaresh Kumar Inna 	csio_hw_intr_disable(hw);
587a3667aaeSNaresh Kumar Inna 
588104d9c7fSChristoph Hellwig 	if (free) {
589104d9c7fSChristoph Hellwig 		int i;
590104d9c7fSChristoph Hellwig 
591a3667aaeSNaresh Kumar Inna 		switch (hw->intr_mode) {
592a3667aaeSNaresh Kumar Inna 		case CSIO_IM_MSIX:
593104d9c7fSChristoph Hellwig 			for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) {
594104d9c7fSChristoph Hellwig 				free_irq(pci_irq_vector(hw->pdev, i),
595104d9c7fSChristoph Hellwig 					 hw->msix_entries[i].dev_id);
596104d9c7fSChristoph Hellwig 			}
597a3667aaeSNaresh Kumar Inna 			break;
598a3667aaeSNaresh Kumar Inna 		case CSIO_IM_MSI:
599a3667aaeSNaresh Kumar Inna 		case CSIO_IM_INTX:
600104d9c7fSChristoph Hellwig 			free_irq(pci_irq_vector(hw->pdev, 0), hw);
601a3667aaeSNaresh Kumar Inna 			break;
602a3667aaeSNaresh Kumar Inna 		default:
603a3667aaeSNaresh Kumar Inna 			break;
604a3667aaeSNaresh Kumar Inna 		}
605104d9c7fSChristoph Hellwig 	}
606104d9c7fSChristoph Hellwig 
607104d9c7fSChristoph Hellwig 	pci_free_irq_vectors(hw->pdev);
608a3667aaeSNaresh Kumar Inna 	hw->intr_mode = CSIO_IM_NONE;
609a3667aaeSNaresh Kumar Inna 	hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
610a3667aaeSNaresh Kumar Inna }
611