1cfdda9d7SSteve Wise /*
2cfdda9d7SSteve Wise  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3cfdda9d7SSteve Wise  *
4cfdda9d7SSteve Wise  * This software is available to you under a choice of one of two
5cfdda9d7SSteve Wise  * licenses.  You may choose to be licensed under the terms of the GNU
6cfdda9d7SSteve Wise  * General Public License (GPL) Version 2, available from the file
7cfdda9d7SSteve Wise  * COPYING in the main directory of this source tree, or the
8cfdda9d7SSteve Wise  * OpenIB.org BSD license below:
9cfdda9d7SSteve Wise  *
10cfdda9d7SSteve Wise  *     Redistribution and use in source and binary forms, with or
11cfdda9d7SSteve Wise  *     without modification, are permitted provided that the following
12cfdda9d7SSteve Wise  *     conditions are met:
13cfdda9d7SSteve Wise  *
14cfdda9d7SSteve Wise  *      - Redistributions of source code must retain the above
15cfdda9d7SSteve Wise  *	  copyright notice, this list of conditions and the following
16cfdda9d7SSteve Wise  *	  disclaimer.
17cfdda9d7SSteve Wise  *
18cfdda9d7SSteve Wise  *      - Redistributions in binary form must reproduce the above
19cfdda9d7SSteve Wise  *	  copyright notice, this list of conditions and the following
20cfdda9d7SSteve Wise  *	  disclaimer in the documentation and/or other materials
21cfdda9d7SSteve Wise  *	  provided with the distribution.
22cfdda9d7SSteve Wise  *
23cfdda9d7SSteve Wise  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24cfdda9d7SSteve Wise  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25cfdda9d7SSteve Wise  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26cfdda9d7SSteve Wise  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27cfdda9d7SSteve Wise  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28cfdda9d7SSteve Wise  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29cfdda9d7SSteve Wise  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30cfdda9d7SSteve Wise  * SOFTWARE.
31cfdda9d7SSteve Wise  */
32cfdda9d7SSteve Wise #include <linux/module.h>
33cfdda9d7SSteve Wise #include <linux/moduleparam.h>
34cfdda9d7SSteve Wise #include <linux/debugfs.h>
35e572568fSVipul Pandya #include <linux/vmalloc.h>
36da388973SHariprasad Shenai #include <linux/math64.h>
37cfdda9d7SSteve Wise 
38cfdda9d7SSteve Wise #include <rdma/ib_verbs.h>
39cfdda9d7SSteve Wise 
40cfdda9d7SSteve Wise #include "iw_cxgb4.h"
41cfdda9d7SSteve Wise 
42cfdda9d7SSteve Wise #define DRV_VERSION "0.1"
43cfdda9d7SSteve Wise 
44cfdda9d7SSteve Wise MODULE_AUTHOR("Steve Wise");
45f079af7aSVipul Pandya MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
46cfdda9d7SSteve Wise MODULE_LICENSE("Dual BSD/GPL");
47cfdda9d7SSteve Wise 
4880ccdd60SVipul Pandya static int allow_db_fc_on_t5;
4980ccdd60SVipul Pandya module_param(allow_db_fc_on_t5, int, 0644);
5080ccdd60SVipul Pandya MODULE_PARM_DESC(allow_db_fc_on_t5,
5180ccdd60SVipul Pandya 		 "Allow DB Flow Control on T5 (default = 0)");
5280ccdd60SVipul Pandya 
5380ccdd60SVipul Pandya static int allow_db_coalescing_on_t5;
5480ccdd60SVipul Pandya module_param(allow_db_coalescing_on_t5, int, 0644);
5580ccdd60SVipul Pandya MODULE_PARM_DESC(allow_db_coalescing_on_t5,
5680ccdd60SVipul Pandya 		 "Allow DB Coalescing on T5 (default = 0)");
5780ccdd60SVipul Pandya 
587730b4c7SHariprasad Shenai int c4iw_wr_log = 0;
597730b4c7SHariprasad Shenai module_param(c4iw_wr_log, int, 0444);
607730b4c7SHariprasad Shenai MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
617730b4c7SHariprasad Shenai 
6265d4c01aSSteve Wise static int c4iw_wr_log_size_order = 12;
637730b4c7SHariprasad Shenai module_param(c4iw_wr_log_size_order, int, 0444);
647730b4c7SHariprasad Shenai MODULE_PARM_DESC(c4iw_wr_log_size_order,
657730b4c7SHariprasad Shenai 		 "Number of entries (log2) in the work request timing log.");
667730b4c7SHariprasad Shenai 
672f25e9a5SSteve Wise static LIST_HEAD(uld_ctx_list);
68cfdda9d7SSteve Wise static DEFINE_MUTEX(dev_mutex);
690cb65d42SColin Ian King static struct workqueue_struct *reg_workq;
70cfdda9d7SSteve Wise 
7105eb2389SSteve Wise #define DB_FC_RESUME_SIZE 64
7205eb2389SSteve Wise #define DB_FC_RESUME_DELAY 1
7305eb2389SSteve Wise #define DB_FC_DRAIN_THRESH 0
7405eb2389SSteve Wise 
75cfdda9d7SSteve Wise static struct dentry *c4iw_debugfs_root;
76cfdda9d7SSteve Wise 
779e8d1fa3SSteve Wise struct c4iw_debugfs_data {
78cfdda9d7SSteve Wise 	struct c4iw_dev *devp;
79cfdda9d7SSteve Wise 	char *buf;
80cfdda9d7SSteve Wise 	int bufsize;
81cfdda9d7SSteve Wise 	int pos;
82cfdda9d7SSteve Wise };
83cfdda9d7SSteve Wise 
debugfs_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)849e8d1fa3SSteve Wise static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
859e8d1fa3SSteve Wise 			    loff_t *ppos)
869e8d1fa3SSteve Wise {
879e8d1fa3SSteve Wise 	struct c4iw_debugfs_data *d = file->private_data;
889e8d1fa3SSteve Wise 
893160977aSSteve Wise 	return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
909e8d1fa3SSteve Wise }
919e8d1fa3SSteve Wise 
c4iw_log_wr_stats(struct t4_wq * wq,struct t4_cqe * cqe)927730b4c7SHariprasad Shenai void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
937730b4c7SHariprasad Shenai {
947730b4c7SHariprasad Shenai 	struct wr_log_entry le;
957730b4c7SHariprasad Shenai 	int idx;
967730b4c7SHariprasad Shenai 
977730b4c7SHariprasad Shenai 	if (!wq->rdev->wr_log)
987730b4c7SHariprasad Shenai 		return;
997730b4c7SHariprasad Shenai 
1007730b4c7SHariprasad Shenai 	idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
1017730b4c7SHariprasad Shenai 		(wq->rdev->wr_log_size - 1);
1027730b4c7SHariprasad Shenai 	le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
103f8109d9eSArnd Bergmann 	le.poll_host_time = ktime_get();
1047730b4c7SHariprasad Shenai 	le.valid = 1;
1057730b4c7SHariprasad Shenai 	le.cqe_sge_ts = CQE_TS(cqe);
1067730b4c7SHariprasad Shenai 	if (SQ_TYPE(cqe)) {
1077730b4c7SHariprasad Shenai 		le.qid = wq->sq.qid;
1087730b4c7SHariprasad Shenai 		le.opcode = CQE_OPCODE(cqe);
109f8109d9eSArnd Bergmann 		le.post_host_time = wq->sq.sw_sq[wq->sq.cidx].host_time;
1107730b4c7SHariprasad Shenai 		le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
1117730b4c7SHariprasad Shenai 		le.wr_id = CQE_WRID_SQ_IDX(cqe);
1127730b4c7SHariprasad Shenai 	} else {
1137730b4c7SHariprasad Shenai 		le.qid = wq->rq.qid;
1147730b4c7SHariprasad Shenai 		le.opcode = FW_RI_RECEIVE;
115f8109d9eSArnd Bergmann 		le.post_host_time = wq->rq.sw_rq[wq->rq.cidx].host_time;
1167730b4c7SHariprasad Shenai 		le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
1177730b4c7SHariprasad Shenai 		le.wr_id = CQE_WRID_MSN(cqe);
1187730b4c7SHariprasad Shenai 	}
1197730b4c7SHariprasad Shenai 	wq->rdev->wr_log[idx] = le;
1207730b4c7SHariprasad Shenai }
1217730b4c7SHariprasad Shenai 
wr_log_show(struct seq_file * seq,void * v)1227730b4c7SHariprasad Shenai static int wr_log_show(struct seq_file *seq, void *v)
1237730b4c7SHariprasad Shenai {
1247730b4c7SHariprasad Shenai 	struct c4iw_dev *dev = seq->private;
125f8109d9eSArnd Bergmann 	ktime_t prev_time;
1267730b4c7SHariprasad Shenai 	struct wr_log_entry *lep;
127f8109d9eSArnd Bergmann 	int prev_time_set = 0;
1287730b4c7SHariprasad Shenai 	int idx, end;
1297730b4c7SHariprasad Shenai 
1306198dd8dSHariprasad S #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
1317730b4c7SHariprasad Shenai 
1327730b4c7SHariprasad Shenai 	idx = atomic_read(&dev->rdev.wr_log_idx) &
1337730b4c7SHariprasad Shenai 		(dev->rdev.wr_log_size - 1);
1347730b4c7SHariprasad Shenai 	end = idx - 1;
1357730b4c7SHariprasad Shenai 	if (end < 0)
1367730b4c7SHariprasad Shenai 		end = dev->rdev.wr_log_size - 1;
1377730b4c7SHariprasad Shenai 	lep = &dev->rdev.wr_log[idx];
1387730b4c7SHariprasad Shenai 	while (idx != end) {
1397730b4c7SHariprasad Shenai 		if (lep->valid) {
140f8109d9eSArnd Bergmann 			if (!prev_time_set) {
141f8109d9eSArnd Bergmann 				prev_time_set = 1;
142f8109d9eSArnd Bergmann 				prev_time = lep->poll_host_time;
1437730b4c7SHariprasad Shenai 			}
144f8109d9eSArnd Bergmann 			seq_printf(seq, "%04u: nsec %llu qid %u opcode "
145f8109d9eSArnd Bergmann 				   "%u %s 0x%x host_wr_delta nsec %llu "
1467730b4c7SHariprasad Shenai 				   "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
1477730b4c7SHariprasad Shenai 				   "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
1487730b4c7SHariprasad Shenai 				   "cqe_poll_delta_ns %llu\n",
1497730b4c7SHariprasad Shenai 				   idx,
150f8109d9eSArnd Bergmann 				   ktime_to_ns(ktime_sub(lep->poll_host_time,
151f8109d9eSArnd Bergmann 							 prev_time)),
1527730b4c7SHariprasad Shenai 				   lep->qid, lep->opcode,
1537730b4c7SHariprasad Shenai 				   lep->opcode == FW_RI_RECEIVE ?
1547730b4c7SHariprasad Shenai 							"msn" : "wrid",
1557730b4c7SHariprasad Shenai 				   lep->wr_id,
156f8109d9eSArnd Bergmann 				   ktime_to_ns(ktime_sub(lep->poll_host_time,
157f8109d9eSArnd Bergmann 							 lep->post_host_time)),
1587730b4c7SHariprasad Shenai 				   lep->post_sge_ts, lep->cqe_sge_ts,
1597730b4c7SHariprasad Shenai 				   lep->poll_sge_ts,
1607730b4c7SHariprasad Shenai 				   ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
1617730b4c7SHariprasad Shenai 				   ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
162f8109d9eSArnd Bergmann 			prev_time = lep->poll_host_time;
1637730b4c7SHariprasad Shenai 		}
1647730b4c7SHariprasad Shenai 		idx++;
1657730b4c7SHariprasad Shenai 		if (idx > (dev->rdev.wr_log_size - 1))
1667730b4c7SHariprasad Shenai 			idx = 0;
1677730b4c7SHariprasad Shenai 		lep = &dev->rdev.wr_log[idx];
1687730b4c7SHariprasad Shenai 	}
1697730b4c7SHariprasad Shenai #undef ts2ns
1707730b4c7SHariprasad Shenai 	return 0;
1717730b4c7SHariprasad Shenai }
1727730b4c7SHariprasad Shenai 
wr_log_open(struct inode * inode,struct file * file)1737730b4c7SHariprasad Shenai static int wr_log_open(struct inode *inode, struct file *file)
1747730b4c7SHariprasad Shenai {
1757730b4c7SHariprasad Shenai 	return single_open(file, wr_log_show, inode->i_private);
1767730b4c7SHariprasad Shenai }
1777730b4c7SHariprasad Shenai 
wr_log_clear(struct file * file,const char __user * buf,size_t count,loff_t * pos)1787730b4c7SHariprasad Shenai static ssize_t wr_log_clear(struct file *file, const char __user *buf,
1797730b4c7SHariprasad Shenai 			    size_t count, loff_t *pos)
1807730b4c7SHariprasad Shenai {
1817730b4c7SHariprasad Shenai 	struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
1827730b4c7SHariprasad Shenai 	int i;
1837730b4c7SHariprasad Shenai 
1847730b4c7SHariprasad Shenai 	if (dev->rdev.wr_log)
1857730b4c7SHariprasad Shenai 		for (i = 0; i < dev->rdev.wr_log_size; i++)
1867730b4c7SHariprasad Shenai 			dev->rdev.wr_log[i].valid = 0;
1877730b4c7SHariprasad Shenai 	return count;
1887730b4c7SHariprasad Shenai }
1897730b4c7SHariprasad Shenai 
1907730b4c7SHariprasad Shenai static const struct file_operations wr_log_debugfs_fops = {
1917730b4c7SHariprasad Shenai 	.owner   = THIS_MODULE,
1927730b4c7SHariprasad Shenai 	.open    = wr_log_open,
1937730b4c7SHariprasad Shenai 	.release = single_release,
1947730b4c7SHariprasad Shenai 	.read    = seq_read,
1957730b4c7SHariprasad Shenai 	.llseek  = seq_lseek,
1967730b4c7SHariprasad Shenai 	.write   = wr_log_clear,
1977730b4c7SHariprasad Shenai };
1987730b4c7SHariprasad Shenai 
199bab572f1SGanesh Goudar static struct sockaddr_in zero_sin = {
200bab572f1SGanesh Goudar 	.sin_family = AF_INET,
201bab572f1SGanesh Goudar };
202bab572f1SGanesh Goudar 
203bab572f1SGanesh Goudar static struct sockaddr_in6 zero_sin6 = {
204bab572f1SGanesh Goudar 	.sin6_family = AF_INET6,
205bab572f1SGanesh Goudar };
206bab572f1SGanesh Goudar 
set_ep_sin_addrs(struct c4iw_ep * ep,struct sockaddr_in ** lsin,struct sockaddr_in ** rsin,struct sockaddr_in ** m_lsin,struct sockaddr_in ** m_rsin)207bab572f1SGanesh Goudar static void set_ep_sin_addrs(struct c4iw_ep *ep,
208bab572f1SGanesh Goudar 			     struct sockaddr_in **lsin,
209bab572f1SGanesh Goudar 			     struct sockaddr_in **rsin,
210bab572f1SGanesh Goudar 			     struct sockaddr_in **m_lsin,
211bab572f1SGanesh Goudar 			     struct sockaddr_in **m_rsin)
212bab572f1SGanesh Goudar {
213bab572f1SGanesh Goudar 	struct iw_cm_id *id = ep->com.cm_id;
214bab572f1SGanesh Goudar 
21544016b34SBharat Potnuri 	*m_lsin = (struct sockaddr_in *)&ep->com.local_addr;
21644016b34SBharat Potnuri 	*m_rsin = (struct sockaddr_in *)&ep->com.remote_addr;
217bab572f1SGanesh Goudar 	if (id) {
21844016b34SBharat Potnuri 		*lsin = (struct sockaddr_in *)&id->local_addr;
21944016b34SBharat Potnuri 		*rsin = (struct sockaddr_in *)&id->remote_addr;
220bab572f1SGanesh Goudar 	} else {
22144016b34SBharat Potnuri 		*lsin = &zero_sin;
22244016b34SBharat Potnuri 		*rsin = &zero_sin;
223bab572f1SGanesh Goudar 	}
224bab572f1SGanesh Goudar }
225bab572f1SGanesh Goudar 
set_ep_sin6_addrs(struct c4iw_ep * ep,struct sockaddr_in6 ** lsin6,struct sockaddr_in6 ** rsin6,struct sockaddr_in6 ** m_lsin6,struct sockaddr_in6 ** m_rsin6)226bab572f1SGanesh Goudar static void set_ep_sin6_addrs(struct c4iw_ep *ep,
227bab572f1SGanesh Goudar 			      struct sockaddr_in6 **lsin6,
228bab572f1SGanesh Goudar 			      struct sockaddr_in6 **rsin6,
229bab572f1SGanesh Goudar 			      struct sockaddr_in6 **m_lsin6,
230bab572f1SGanesh Goudar 			      struct sockaddr_in6 **m_rsin6)
231bab572f1SGanesh Goudar {
232bab572f1SGanesh Goudar 	struct iw_cm_id *id = ep->com.cm_id;
233bab572f1SGanesh Goudar 
23444016b34SBharat Potnuri 	*m_lsin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
23544016b34SBharat Potnuri 	*m_rsin6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
236bab572f1SGanesh Goudar 	if (id) {
23744016b34SBharat Potnuri 		*lsin6 = (struct sockaddr_in6 *)&id->local_addr;
23844016b34SBharat Potnuri 		*rsin6 = (struct sockaddr_in6 *)&id->remote_addr;
239bab572f1SGanesh Goudar 	} else {
24044016b34SBharat Potnuri 		*lsin6 = &zero_sin6;
24144016b34SBharat Potnuri 		*rsin6 = &zero_sin6;
242bab572f1SGanesh Goudar 	}
243bab572f1SGanesh Goudar }
244bab572f1SGanesh Goudar 
dump_qp(unsigned long id,struct c4iw_qp * qp,struct c4iw_debugfs_data * qpd)24591724c1eSPotnuri Bharat Teja static int dump_qp(unsigned long id, struct c4iw_qp *qp,
24691724c1eSPotnuri Bharat Teja 		   struct c4iw_debugfs_data *qpd)
247cfdda9d7SSteve Wise {
248cfdda9d7SSteve Wise 	int space;
249cfdda9d7SSteve Wise 	int cc;
25091724c1eSPotnuri Bharat Teja 	if (id != qp->wq.sq.qid)
25191724c1eSPotnuri Bharat Teja 		return 0;
252cfdda9d7SSteve Wise 
253cfdda9d7SSteve Wise 	space = qpd->bufsize - qpd->pos - 1;
254cfdda9d7SSteve Wise 	if (space == 0)
255cfdda9d7SSteve Wise 		return 1;
256cfdda9d7SSteve Wise 
257830662f6SVipul Pandya 	if (qp->ep) {
258bab572f1SGanesh Goudar 		struct c4iw_ep *ep = qp->ep;
259830662f6SVipul Pandya 
260bab572f1SGanesh Goudar 		if (ep->com.local_addr.ss_family == AF_INET) {
261bab572f1SGanesh Goudar 			struct sockaddr_in *lsin;
262bab572f1SGanesh Goudar 			struct sockaddr_in *rsin;
263bab572f1SGanesh Goudar 			struct sockaddr_in *m_lsin;
264bab572f1SGanesh Goudar 			struct sockaddr_in *m_rsin;
265bab572f1SGanesh Goudar 
266bab572f1SGanesh Goudar 			set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin);
267db5d040dSSteve Wise 			cc = snprintf(qpd->buf + qpd->pos, space,
2686a0b6174SRaju Rangoju 				      "rc qp sq id %u %s id %u state %u "
269830662f6SVipul Pandya 				      "onchip %u ep tid %u state %u "
2709eccfe10SSteve Wise 				      "%pI4:%u/%u->%pI4:%u/%u\n",
2716a0b6174SRaju Rangoju 				      qp->wq.sq.qid, qp->srq ? "srq" : "rq",
2726a0b6174SRaju Rangoju 				      qp->srq ? qp->srq->idx : qp->wq.rq.qid,
273830662f6SVipul Pandya 				      (int)qp->attr.state,
274db5d040dSSteve Wise 				      qp->wq.sq.flags & T4_SQ_ONCHIP,
275bab572f1SGanesh Goudar 				      ep->hwtid, (int)ep->com.state,
276830662f6SVipul Pandya 				      &lsin->sin_addr, ntohs(lsin->sin_port),
277bab572f1SGanesh Goudar 				      ntohs(m_lsin->sin_port),
2789eccfe10SSteve Wise 				      &rsin->sin_addr, ntohs(rsin->sin_port),
279bab572f1SGanesh Goudar 				      ntohs(m_rsin->sin_port));
280830662f6SVipul Pandya 		} else {
281bab572f1SGanesh Goudar 			struct sockaddr_in6 *lsin6;
282bab572f1SGanesh Goudar 			struct sockaddr_in6 *rsin6;
283bab572f1SGanesh Goudar 			struct sockaddr_in6 *m_lsin6;
284bab572f1SGanesh Goudar 			struct sockaddr_in6 *m_rsin6;
285830662f6SVipul Pandya 
286bab572f1SGanesh Goudar 			set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6,
287bab572f1SGanesh Goudar 					  &m_rsin6);
288830662f6SVipul Pandya 			cc = snprintf(qpd->buf + qpd->pos, space,
289830662f6SVipul Pandya 				      "rc qp sq id %u rq id %u state %u "
290830662f6SVipul Pandya 				      "onchip %u ep tid %u state %u "
2919eccfe10SSteve Wise 				      "%pI6:%u/%u->%pI6:%u/%u\n",
292830662f6SVipul Pandya 				      qp->wq.sq.qid, qp->wq.rq.qid,
293830662f6SVipul Pandya 				      (int)qp->attr.state,
294830662f6SVipul Pandya 				      qp->wq.sq.flags & T4_SQ_ONCHIP,
295bab572f1SGanesh Goudar 				      ep->hwtid, (int)ep->com.state,
296830662f6SVipul Pandya 				      &lsin6->sin6_addr,
297830662f6SVipul Pandya 				      ntohs(lsin6->sin6_port),
298bab572f1SGanesh Goudar 				      ntohs(m_lsin6->sin6_port),
299830662f6SVipul Pandya 				      &rsin6->sin6_addr,
3009eccfe10SSteve Wise 				      ntohs(rsin6->sin6_port),
301bab572f1SGanesh Goudar 				      ntohs(m_rsin6->sin6_port));
302830662f6SVipul Pandya 		}
303830662f6SVipul Pandya 	} else
304db5d040dSSteve Wise 		cc = snprintf(qpd->buf + qpd->pos, space,
305db5d040dSSteve Wise 			     "qp sq id %u rq id %u state %u onchip %u\n",
306db5d040dSSteve Wise 			      qp->wq.sq.qid, qp->wq.rq.qid,
307db5d040dSSteve Wise 			      (int)qp->attr.state,
308db5d040dSSteve Wise 			      qp->wq.sq.flags & T4_SQ_ONCHIP);
309cfdda9d7SSteve Wise 	if (cc < space)
310cfdda9d7SSteve Wise 		qpd->pos += cc;
311cfdda9d7SSteve Wise 	return 0;
312cfdda9d7SSteve Wise }
313cfdda9d7SSteve Wise 
qp_release(struct inode * inode,struct file * file)314cfdda9d7SSteve Wise static int qp_release(struct inode *inode, struct file *file)
315cfdda9d7SSteve Wise {
3169e8d1fa3SSteve Wise 	struct c4iw_debugfs_data *qpd = file->private_data;
317cfdda9d7SSteve Wise 	if (!qpd) {
318700456bdSJoe Perches 		pr_info("%s null qpd?\n", __func__);
319cfdda9d7SSteve Wise 		return 0;
320cfdda9d7SSteve Wise 	}
321d716a2a0SVipul Pandya 	vfree(qpd->buf);
322cfdda9d7SSteve Wise 	kfree(qpd);
323cfdda9d7SSteve Wise 	return 0;
324cfdda9d7SSteve Wise }
325cfdda9d7SSteve Wise 
qp_open(struct inode * inode,struct file * file)326cfdda9d7SSteve Wise static int qp_open(struct inode *inode, struct file *file)
327cfdda9d7SSteve Wise {
3282f431291SMatthew Wilcox 	struct c4iw_qp *qp;
3299e8d1fa3SSteve Wise 	struct c4iw_debugfs_data *qpd;
3302f431291SMatthew Wilcox 	unsigned long index;
331cfdda9d7SSteve Wise 	int count = 1;
332cfdda9d7SSteve Wise 
33334d56893SLeon Romanovsky 	qpd = kmalloc(sizeof(*qpd), GFP_KERNEL);
3344275a5b2SHariprasad S 	if (!qpd)
3354275a5b2SHariprasad S 		return -ENOMEM;
3364275a5b2SHariprasad S 
337cfdda9d7SSteve Wise 	qpd->devp = inode->i_private;
338cfdda9d7SSteve Wise 	qpd->pos = 0;
339cfdda9d7SSteve Wise 
3402f431291SMatthew Wilcox 	/*
3412f431291SMatthew Wilcox 	 * No need to lock; we drop the lock to call vmalloc so it's racy
3422f431291SMatthew Wilcox 	 * anyway.  Someone who cares should switch this over to seq_file
3432f431291SMatthew Wilcox 	 */
3442f431291SMatthew Wilcox 	xa_for_each(&qpd->devp->qps, index, qp)
3452f431291SMatthew Wilcox 		count++;
346cfdda9d7SSteve Wise 
34768cebcabSHariprasad S 	qpd->bufsize = count * 180;
348d716a2a0SVipul Pandya 	qpd->buf = vmalloc(qpd->bufsize);
349cfdda9d7SSteve Wise 	if (!qpd->buf) {
3504275a5b2SHariprasad S 		kfree(qpd);
3514275a5b2SHariprasad S 		return -ENOMEM;
352cfdda9d7SSteve Wise 	}
353cfdda9d7SSteve Wise 
3542f431291SMatthew Wilcox 	xa_lock_irq(&qpd->devp->qps);
3552f431291SMatthew Wilcox 	xa_for_each(&qpd->devp->qps, index, qp)
35691724c1eSPotnuri Bharat Teja 		dump_qp(index, qp, qpd);
3572f431291SMatthew Wilcox 	xa_unlock_irq(&qpd->devp->qps);
358cfdda9d7SSteve Wise 
359cfdda9d7SSteve Wise 	qpd->buf[qpd->pos++] = 0;
360cfdda9d7SSteve Wise 	file->private_data = qpd;
3614275a5b2SHariprasad S 	return 0;
362cfdda9d7SSteve Wise }
363cfdda9d7SSteve Wise 
364cfdda9d7SSteve Wise static const struct file_operations qp_debugfs_fops = {
365cfdda9d7SSteve Wise 	.owner   = THIS_MODULE,
366cfdda9d7SSteve Wise 	.open    = qp_open,
367cfdda9d7SSteve Wise 	.release = qp_release,
3689e8d1fa3SSteve Wise 	.read    = debugfs_read,
3698bbac892SSteve Wise 	.llseek  = default_llseek,
3709e8d1fa3SSteve Wise };
3719e8d1fa3SSteve Wise 
dump_stag(unsigned long id,struct c4iw_debugfs_data * stagd)3727a268a93SMatthew Wilcox static int dump_stag(unsigned long id, struct c4iw_debugfs_data *stagd)
3739e8d1fa3SSteve Wise {
3749e8d1fa3SSteve Wise 	int space;
3759e8d1fa3SSteve Wise 	int cc;
376031cf476SHariprasad Shenai 	struct fw_ri_tpte tpte;
377031cf476SHariprasad Shenai 	int ret;
3789e8d1fa3SSteve Wise 
3799e8d1fa3SSteve Wise 	space = stagd->bufsize - stagd->pos - 1;
3809e8d1fa3SSteve Wise 	if (space == 0)
3819e8d1fa3SSteve Wise 		return 1;
3829e8d1fa3SSteve Wise 
383031cf476SHariprasad Shenai 	ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
384031cf476SHariprasad Shenai 			      (__be32 *)&tpte);
385031cf476SHariprasad Shenai 	if (ret) {
386031cf476SHariprasad Shenai 		dev_err(&stagd->devp->rdev.lldi.pdev->dev,
387031cf476SHariprasad Shenai 			"%s cxgb4_read_tpte err %d\n", __func__, ret);
388031cf476SHariprasad Shenai 		return ret;
389031cf476SHariprasad Shenai 	}
390031cf476SHariprasad Shenai 	cc = snprintf(stagd->buf + stagd->pos, space,
391031cf476SHariprasad Shenai 		      "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
392031cf476SHariprasad Shenai 		      "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
393031cf476SHariprasad Shenai 		      (u32)id<<8,
394cf7fe64aSHariprasad Shenai 		      FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
395cf7fe64aSHariprasad Shenai 		      FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
396cf7fe64aSHariprasad Shenai 		      FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
397cf7fe64aSHariprasad Shenai 		      FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
398cf7fe64aSHariprasad Shenai 		      FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
399cf7fe64aSHariprasad Shenai 		      FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
400031cf476SHariprasad Shenai 		      ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
401031cf476SHariprasad Shenai 		      ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
4029e8d1fa3SSteve Wise 	if (cc < space)
4039e8d1fa3SSteve Wise 		stagd->pos += cc;
4049e8d1fa3SSteve Wise 	return 0;
4059e8d1fa3SSteve Wise }
4069e8d1fa3SSteve Wise 
stag_release(struct inode * inode,struct file * file)4079e8d1fa3SSteve Wise static int stag_release(struct inode *inode, struct file *file)
4089e8d1fa3SSteve Wise {
4099e8d1fa3SSteve Wise 	struct c4iw_debugfs_data *stagd = file->private_data;
4109e8d1fa3SSteve Wise 	if (!stagd) {
411700456bdSJoe Perches 		pr_info("%s null stagd?\n", __func__);
4129e8d1fa3SSteve Wise 		return 0;
4139e8d1fa3SSteve Wise 	}
414031cf476SHariprasad Shenai 	vfree(stagd->buf);
4159e8d1fa3SSteve Wise 	kfree(stagd);
4169e8d1fa3SSteve Wise 	return 0;
4179e8d1fa3SSteve Wise }
4189e8d1fa3SSteve Wise 
stag_open(struct inode * inode,struct file * file)4199e8d1fa3SSteve Wise static int stag_open(struct inode *inode, struct file *file)
4209e8d1fa3SSteve Wise {
4219e8d1fa3SSteve Wise 	struct c4iw_debugfs_data *stagd;
4227a268a93SMatthew Wilcox 	void *p;
4237a268a93SMatthew Wilcox 	unsigned long index;
4249e8d1fa3SSteve Wise 	int ret = 0;
4259e8d1fa3SSteve Wise 	int count = 1;
4269e8d1fa3SSteve Wise 
42734d56893SLeon Romanovsky 	stagd = kmalloc(sizeof(*stagd), GFP_KERNEL);
4289e8d1fa3SSteve Wise 	if (!stagd) {
4299e8d1fa3SSteve Wise 		ret = -ENOMEM;
4309e8d1fa3SSteve Wise 		goto out;
4319e8d1fa3SSteve Wise 	}
4329e8d1fa3SSteve Wise 	stagd->devp = inode->i_private;
4339e8d1fa3SSteve Wise 	stagd->pos = 0;
4349e8d1fa3SSteve Wise 
4357a268a93SMatthew Wilcox 	xa_for_each(&stagd->devp->mrs, index, p)
4367a268a93SMatthew Wilcox 		count++;
4379e8d1fa3SSteve Wise 
438031cf476SHariprasad Shenai 	stagd->bufsize = count * 256;
439031cf476SHariprasad Shenai 	stagd->buf = vmalloc(stagd->bufsize);
4409e8d1fa3SSteve Wise 	if (!stagd->buf) {
4419e8d1fa3SSteve Wise 		ret = -ENOMEM;
4429e8d1fa3SSteve Wise 		goto err1;
4439e8d1fa3SSteve Wise 	}
4449e8d1fa3SSteve Wise 
4457a268a93SMatthew Wilcox 	xa_lock_irq(&stagd->devp->mrs);
4467a268a93SMatthew Wilcox 	xa_for_each(&stagd->devp->mrs, index, p)
4477a268a93SMatthew Wilcox 		dump_stag(index, stagd);
4487a268a93SMatthew Wilcox 	xa_unlock_irq(&stagd->devp->mrs);
4499e8d1fa3SSteve Wise 
4509e8d1fa3SSteve Wise 	stagd->buf[stagd->pos++] = 0;
4519e8d1fa3SSteve Wise 	file->private_data = stagd;
4529e8d1fa3SSteve Wise 	goto out;
4539e8d1fa3SSteve Wise err1:
4549e8d1fa3SSteve Wise 	kfree(stagd);
4559e8d1fa3SSteve Wise out:
4569e8d1fa3SSteve Wise 	return ret;
4579e8d1fa3SSteve Wise }
4589e8d1fa3SSteve Wise 
4599e8d1fa3SSteve Wise static const struct file_operations stag_debugfs_fops = {
4609e8d1fa3SSteve Wise 	.owner   = THIS_MODULE,
4619e8d1fa3SSteve Wise 	.open    = stag_open,
4629e8d1fa3SSteve Wise 	.release = stag_release,
4639e8d1fa3SSteve Wise 	.read    = debugfs_read,
4648bbac892SSteve Wise 	.llseek  = default_llseek,
465cfdda9d7SSteve Wise };
466cfdda9d7SSteve Wise 
46705eb2389SSteve Wise static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
468422eea0aSVipul Pandya 
stats_show(struct seq_file * seq,void * v)4698d81ef34SVipul Pandya static int stats_show(struct seq_file *seq, void *v)
4708d81ef34SVipul Pandya {
4718d81ef34SVipul Pandya 	struct c4iw_dev *dev = seq->private;
4728d81ef34SVipul Pandya 
473ec3eead2SVipul Pandya 	seq_printf(seq, "   Object: %10s %10s %10s %10s\n", "Total", "Current",
474ec3eead2SVipul Pandya 		   "Max", "Fail");
475ec3eead2SVipul Pandya 	seq_printf(seq, "     PDID: %10llu %10llu %10llu %10llu\n",
4768d81ef34SVipul Pandya 			dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
477ec3eead2SVipul Pandya 			dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
478ec3eead2SVipul Pandya 	seq_printf(seq, "      QID: %10llu %10llu %10llu %10llu\n",
4798d81ef34SVipul Pandya 			dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
480ec3eead2SVipul Pandya 			dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
4816a0b6174SRaju Rangoju 	seq_printf(seq, "     SRQS: %10llu %10llu %10llu %10llu\n",
4826a0b6174SRaju Rangoju 		   dev->rdev.stats.srqt.total, dev->rdev.stats.srqt.cur,
4836a0b6174SRaju Rangoju 			dev->rdev.stats.srqt.max, dev->rdev.stats.srqt.fail);
484ec3eead2SVipul Pandya 	seq_printf(seq, "   TPTMEM: %10llu %10llu %10llu %10llu\n",
4858d81ef34SVipul Pandya 			dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
486ec3eead2SVipul Pandya 			dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
487ec3eead2SVipul Pandya 	seq_printf(seq, "   PBLMEM: %10llu %10llu %10llu %10llu\n",
4888d81ef34SVipul Pandya 			dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
489ec3eead2SVipul Pandya 			dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
490ec3eead2SVipul Pandya 	seq_printf(seq, "   RQTMEM: %10llu %10llu %10llu %10llu\n",
4918d81ef34SVipul Pandya 			dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
492ec3eead2SVipul Pandya 			dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
493ec3eead2SVipul Pandya 	seq_printf(seq, "  OCQPMEM: %10llu %10llu %10llu %10llu\n",
4948d81ef34SVipul Pandya 			dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
495ec3eead2SVipul Pandya 			dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
4962c974781SVipul Pandya 	seq_printf(seq, "  DB FULL: %10llu\n", dev->rdev.stats.db_full);
4972c974781SVipul Pandya 	seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
4982c974781SVipul Pandya 	seq_printf(seq, "  DB DROP: %10llu\n", dev->rdev.stats.db_drop);
49905eb2389SSteve Wise 	seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
500422eea0aSVipul Pandya 		   db_state_str[dev->db_state],
50105eb2389SSteve Wise 		   dev->rdev.stats.db_state_transitions,
50205eb2389SSteve Wise 		   dev->rdev.stats.db_fc_interruptions);
5031cab775cSVipul Pandya 	seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
504793dad94SVipul Pandya 	seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
505793dad94SVipul Pandya 		   dev->rdev.stats.act_ofld_conn_fails);
506793dad94SVipul Pandya 	seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
507793dad94SVipul Pandya 		   dev->rdev.stats.pas_ofld_conn_fails);
508179d03bbSHariprasad S 	seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv);
5094c2c5763SHariprasad Shenai 	seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
5108d81ef34SVipul Pandya 	return 0;
5118d81ef34SVipul Pandya }
5128d81ef34SVipul Pandya 
stats_open(struct inode * inode,struct file * file)5138d81ef34SVipul Pandya static int stats_open(struct inode *inode, struct file *file)
5148d81ef34SVipul Pandya {
5158d81ef34SVipul Pandya 	return single_open(file, stats_show, inode->i_private);
5168d81ef34SVipul Pandya }
5178d81ef34SVipul Pandya 
stats_clear(struct file * file,const char __user * buf,size_t count,loff_t * pos)5188d81ef34SVipul Pandya static ssize_t stats_clear(struct file *file, const char __user *buf,
5198d81ef34SVipul Pandya 		size_t count, loff_t *pos)
5208d81ef34SVipul Pandya {
5218d81ef34SVipul Pandya 	struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
5228d81ef34SVipul Pandya 
5238d81ef34SVipul Pandya 	mutex_lock(&dev->rdev.stats.lock);
5248d81ef34SVipul Pandya 	dev->rdev.stats.pd.max = 0;
525ec3eead2SVipul Pandya 	dev->rdev.stats.pd.fail = 0;
5268d81ef34SVipul Pandya 	dev->rdev.stats.qid.max = 0;
527ec3eead2SVipul Pandya 	dev->rdev.stats.qid.fail = 0;
5288d81ef34SVipul Pandya 	dev->rdev.stats.stag.max = 0;
529ec3eead2SVipul Pandya 	dev->rdev.stats.stag.fail = 0;
5308d81ef34SVipul Pandya 	dev->rdev.stats.pbl.max = 0;
531ec3eead2SVipul Pandya 	dev->rdev.stats.pbl.fail = 0;
5328d81ef34SVipul Pandya 	dev->rdev.stats.rqt.max = 0;
533ec3eead2SVipul Pandya 	dev->rdev.stats.rqt.fail = 0;
5346a0b6174SRaju Rangoju 	dev->rdev.stats.rqt.max = 0;
5356a0b6174SRaju Rangoju 	dev->rdev.stats.rqt.fail = 0;
5368d81ef34SVipul Pandya 	dev->rdev.stats.ocqp.max = 0;
537ec3eead2SVipul Pandya 	dev->rdev.stats.ocqp.fail = 0;
5382c974781SVipul Pandya 	dev->rdev.stats.db_full = 0;
5392c974781SVipul Pandya 	dev->rdev.stats.db_empty = 0;
5402c974781SVipul Pandya 	dev->rdev.stats.db_drop = 0;
541422eea0aSVipul Pandya 	dev->rdev.stats.db_state_transitions = 0;
542793dad94SVipul Pandya 	dev->rdev.stats.tcam_full = 0;
543793dad94SVipul Pandya 	dev->rdev.stats.act_ofld_conn_fails = 0;
544793dad94SVipul Pandya 	dev->rdev.stats.pas_ofld_conn_fails = 0;
5458d81ef34SVipul Pandya 	mutex_unlock(&dev->rdev.stats.lock);
5468d81ef34SVipul Pandya 	return count;
5478d81ef34SVipul Pandya }
5488d81ef34SVipul Pandya 
5498d81ef34SVipul Pandya static const struct file_operations stats_debugfs_fops = {
5508d81ef34SVipul Pandya 	.owner   = THIS_MODULE,
5518d81ef34SVipul Pandya 	.open    = stats_open,
5528d81ef34SVipul Pandya 	.release = single_release,
5538d81ef34SVipul Pandya 	.read    = seq_read,
5548d81ef34SVipul Pandya 	.llseek  = seq_lseek,
5558d81ef34SVipul Pandya 	.write   = stats_clear,
5568d81ef34SVipul Pandya };
5578d81ef34SVipul Pandya 
dump_ep(struct c4iw_ep * ep,struct c4iw_debugfs_data * epd)558f254ba6aSMatthew Wilcox static int dump_ep(struct c4iw_ep *ep, struct c4iw_debugfs_data *epd)
559793dad94SVipul Pandya {
560793dad94SVipul Pandya 	int space;
561793dad94SVipul Pandya 	int cc;
562793dad94SVipul Pandya 
563793dad94SVipul Pandya 	space = epd->bufsize - epd->pos - 1;
564793dad94SVipul Pandya 	if (space == 0)
565793dad94SVipul Pandya 		return 1;
566793dad94SVipul Pandya 
567830662f6SVipul Pandya 	if (ep->com.local_addr.ss_family == AF_INET) {
568bab572f1SGanesh Goudar 		struct sockaddr_in *lsin;
569bab572f1SGanesh Goudar 		struct sockaddr_in *rsin;
570bab572f1SGanesh Goudar 		struct sockaddr_in *m_lsin;
571bab572f1SGanesh Goudar 		struct sockaddr_in *m_rsin;
572830662f6SVipul Pandya 
573bab572f1SGanesh Goudar 		set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin);
574793dad94SVipul Pandya 		cc = snprintf(epd->buf + epd->pos, space,
575830662f6SVipul Pandya 			      "ep %p cm_id %p qp %p state %d flags 0x%lx "
576830662f6SVipul Pandya 			      "history 0x%lx hwtid %d atid %d "
577179d03bbSHariprasad S 			      "conn_na %u abort_na %u "
5789eccfe10SSteve Wise 			      "%pI4:%d/%d <-> %pI4:%d/%d\n",
579830662f6SVipul Pandya 			      ep, ep->com.cm_id, ep->com.qp,
580830662f6SVipul Pandya 			      (int)ep->com.state, ep->com.flags,
581830662f6SVipul Pandya 			      ep->com.history, ep->hwtid, ep->atid,
582179d03bbSHariprasad S 			      ep->stats.connect_neg_adv,
583179d03bbSHariprasad S 			      ep->stats.abort_neg_adv,
584830662f6SVipul Pandya 			      &lsin->sin_addr, ntohs(lsin->sin_port),
585bab572f1SGanesh Goudar 			      ntohs(m_lsin->sin_port),
5869eccfe10SSteve Wise 			      &rsin->sin_addr, ntohs(rsin->sin_port),
587bab572f1SGanesh Goudar 			      ntohs(m_rsin->sin_port));
588830662f6SVipul Pandya 	} else {
589bab572f1SGanesh Goudar 		struct sockaddr_in6 *lsin6;
590bab572f1SGanesh Goudar 		struct sockaddr_in6 *rsin6;
591bab572f1SGanesh Goudar 		struct sockaddr_in6 *m_lsin6;
592bab572f1SGanesh Goudar 		struct sockaddr_in6 *m_rsin6;
593830662f6SVipul Pandya 
594bab572f1SGanesh Goudar 		set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6, &m_rsin6);
595830662f6SVipul Pandya 		cc = snprintf(epd->buf + epd->pos, space,
596830662f6SVipul Pandya 			      "ep %p cm_id %p qp %p state %d flags 0x%lx "
597830662f6SVipul Pandya 			      "history 0x%lx hwtid %d atid %d "
598179d03bbSHariprasad S 			      "conn_na %u abort_na %u "
5999eccfe10SSteve Wise 			      "%pI6:%d/%d <-> %pI6:%d/%d\n",
600830662f6SVipul Pandya 			      ep, ep->com.cm_id, ep->com.qp,
601830662f6SVipul Pandya 			      (int)ep->com.state, ep->com.flags,
602830662f6SVipul Pandya 			      ep->com.history, ep->hwtid, ep->atid,
603179d03bbSHariprasad S 			      ep->stats.connect_neg_adv,
604179d03bbSHariprasad S 			      ep->stats.abort_neg_adv,
605830662f6SVipul Pandya 			      &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
606bab572f1SGanesh Goudar 			      ntohs(m_lsin6->sin6_port),
6079eccfe10SSteve Wise 			      &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
608bab572f1SGanesh Goudar 			      ntohs(m_rsin6->sin6_port));
609830662f6SVipul Pandya 	}
610793dad94SVipul Pandya 	if (cc < space)
611793dad94SVipul Pandya 		epd->pos += cc;
612793dad94SVipul Pandya 	return 0;
613793dad94SVipul Pandya }
614793dad94SVipul Pandya 
615401b4480SMatthew Wilcox static
dump_listen_ep(struct c4iw_listen_ep * ep,struct c4iw_debugfs_data * epd)616401b4480SMatthew Wilcox int dump_listen_ep(struct c4iw_listen_ep *ep, struct c4iw_debugfs_data *epd)
617793dad94SVipul Pandya {
618793dad94SVipul Pandya 	int space;
619793dad94SVipul Pandya 	int cc;
620793dad94SVipul Pandya 
621793dad94SVipul Pandya 	space = epd->bufsize - epd->pos - 1;
622793dad94SVipul Pandya 	if (space == 0)
623793dad94SVipul Pandya 		return 1;
624793dad94SVipul Pandya 
625830662f6SVipul Pandya 	if (ep->com.local_addr.ss_family == AF_INET) {
626830662f6SVipul Pandya 		struct sockaddr_in *lsin = (struct sockaddr_in *)
627170003c8SSteve Wise 			&ep->com.cm_id->local_addr;
628bab572f1SGanesh Goudar 		struct sockaddr_in *m_lsin = (struct sockaddr_in *)
629170003c8SSteve Wise 			&ep->com.cm_id->m_local_addr;
630830662f6SVipul Pandya 
631793dad94SVipul Pandya 		cc = snprintf(epd->buf + epd->pos, space,
632830662f6SVipul Pandya 			      "ep %p cm_id %p state %d flags 0x%lx stid %d "
6339eccfe10SSteve Wise 			      "backlog %d %pI4:%d/%d\n",
634830662f6SVipul Pandya 			      ep, ep->com.cm_id, (int)ep->com.state,
635793dad94SVipul Pandya 			      ep->com.flags, ep->stid, ep->backlog,
6369eccfe10SSteve Wise 			      &lsin->sin_addr, ntohs(lsin->sin_port),
637bab572f1SGanesh Goudar 			      ntohs(m_lsin->sin_port));
638830662f6SVipul Pandya 	} else {
639830662f6SVipul Pandya 		struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
640170003c8SSteve Wise 			&ep->com.cm_id->local_addr;
641bab572f1SGanesh Goudar 		struct sockaddr_in6 *m_lsin6 = (struct sockaddr_in6 *)
642170003c8SSteve Wise 			&ep->com.cm_id->m_local_addr;
643830662f6SVipul Pandya 
644830662f6SVipul Pandya 		cc = snprintf(epd->buf + epd->pos, space,
645830662f6SVipul Pandya 			      "ep %p cm_id %p state %d flags 0x%lx stid %d "
6469eccfe10SSteve Wise 			      "backlog %d %pI6:%d/%d\n",
647830662f6SVipul Pandya 			      ep, ep->com.cm_id, (int)ep->com.state,
648830662f6SVipul Pandya 			      ep->com.flags, ep->stid, ep->backlog,
6499eccfe10SSteve Wise 			      &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
650bab572f1SGanesh Goudar 			      ntohs(m_lsin6->sin6_port));
651830662f6SVipul Pandya 	}
652793dad94SVipul Pandya 	if (cc < space)
653793dad94SVipul Pandya 		epd->pos += cc;
654793dad94SVipul Pandya 	return 0;
655793dad94SVipul Pandya }
656793dad94SVipul Pandya 
ep_release(struct inode * inode,struct file * file)657793dad94SVipul Pandya static int ep_release(struct inode *inode, struct file *file)
658793dad94SVipul Pandya {
659793dad94SVipul Pandya 	struct c4iw_debugfs_data *epd = file->private_data;
660793dad94SVipul Pandya 	if (!epd) {
661793dad94SVipul Pandya 		pr_info("%s null qpd?\n", __func__);
662793dad94SVipul Pandya 		return 0;
663793dad94SVipul Pandya 	}
664793dad94SVipul Pandya 	vfree(epd->buf);
665793dad94SVipul Pandya 	kfree(epd);
666793dad94SVipul Pandya 	return 0;
667793dad94SVipul Pandya }
668793dad94SVipul Pandya 
ep_open(struct inode * inode,struct file * file)669793dad94SVipul Pandya static int ep_open(struct inode *inode, struct file *file)
670793dad94SVipul Pandya {
671f254ba6aSMatthew Wilcox 	struct c4iw_ep *ep;
672401b4480SMatthew Wilcox 	struct c4iw_listen_ep *lep;
673f254ba6aSMatthew Wilcox 	unsigned long index;
674793dad94SVipul Pandya 	struct c4iw_debugfs_data *epd;
675793dad94SVipul Pandya 	int ret = 0;
676793dad94SVipul Pandya 	int count = 1;
677793dad94SVipul Pandya 
678793dad94SVipul Pandya 	epd = kmalloc(sizeof(*epd), GFP_KERNEL);
679793dad94SVipul Pandya 	if (!epd) {
680793dad94SVipul Pandya 		ret = -ENOMEM;
681793dad94SVipul Pandya 		goto out;
682793dad94SVipul Pandya 	}
683793dad94SVipul Pandya 	epd->devp = inode->i_private;
684793dad94SVipul Pandya 	epd->pos = 0;
685793dad94SVipul Pandya 
686f254ba6aSMatthew Wilcox 	xa_for_each(&epd->devp->hwtids, index, ep)
687f254ba6aSMatthew Wilcox 		count++;
6889f5a9632SMatthew Wilcox 	xa_for_each(&epd->devp->atids, index, ep)
6899f5a9632SMatthew Wilcox 		count++;
690401b4480SMatthew Wilcox 	xa_for_each(&epd->devp->stids, index, lep)
691401b4480SMatthew Wilcox 		count++;
692793dad94SVipul Pandya 
69363a71ba6SPramod Kumar 	epd->bufsize = count * 240;
694793dad94SVipul Pandya 	epd->buf = vmalloc(epd->bufsize);
695793dad94SVipul Pandya 	if (!epd->buf) {
696793dad94SVipul Pandya 		ret = -ENOMEM;
697793dad94SVipul Pandya 		goto err1;
698793dad94SVipul Pandya 	}
699793dad94SVipul Pandya 
700f254ba6aSMatthew Wilcox 	xa_lock_irq(&epd->devp->hwtids);
701f254ba6aSMatthew Wilcox 	xa_for_each(&epd->devp->hwtids, index, ep)
702f254ba6aSMatthew Wilcox 		dump_ep(ep, epd);
703f254ba6aSMatthew Wilcox 	xa_unlock_irq(&epd->devp->hwtids);
7049f5a9632SMatthew Wilcox 	xa_lock_irq(&epd->devp->atids);
7059f5a9632SMatthew Wilcox 	xa_for_each(&epd->devp->atids, index, ep)
7069f5a9632SMatthew Wilcox 		dump_ep(ep, epd);
7079f5a9632SMatthew Wilcox 	xa_unlock_irq(&epd->devp->atids);
708401b4480SMatthew Wilcox 	xa_lock_irq(&epd->devp->stids);
709401b4480SMatthew Wilcox 	xa_for_each(&epd->devp->stids, index, lep)
710401b4480SMatthew Wilcox 		dump_listen_ep(lep, epd);
711401b4480SMatthew Wilcox 	xa_unlock_irq(&epd->devp->stids);
712793dad94SVipul Pandya 
713793dad94SVipul Pandya 	file->private_data = epd;
714793dad94SVipul Pandya 	goto out;
715793dad94SVipul Pandya err1:
716793dad94SVipul Pandya 	kfree(epd);
717793dad94SVipul Pandya out:
718793dad94SVipul Pandya 	return ret;
719793dad94SVipul Pandya }
720793dad94SVipul Pandya 
721793dad94SVipul Pandya static const struct file_operations ep_debugfs_fops = {
722793dad94SVipul Pandya 	.owner   = THIS_MODULE,
723793dad94SVipul Pandya 	.open    = ep_open,
724793dad94SVipul Pandya 	.release = ep_release,
725793dad94SVipul Pandya 	.read    = debugfs_read,
726793dad94SVipul Pandya };
727793dad94SVipul Pandya 
setup_debugfs(struct c4iw_dev * devp)7288283d787SGreg Kroah-Hartman static void setup_debugfs(struct c4iw_dev *devp)
729cfdda9d7SSteve Wise {
730e59b4e91SDavid Howells 	debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root,
731e59b4e91SDavid Howells 				 (void *)devp, &qp_debugfs_fops, 4096);
7329e8d1fa3SSteve Wise 
733e59b4e91SDavid Howells 	debugfs_create_file_size("stags", S_IWUSR, devp->debugfs_root,
734e59b4e91SDavid Howells 				 (void *)devp, &stag_debugfs_fops, 4096);
7358d81ef34SVipul Pandya 
736e59b4e91SDavid Howells 	debugfs_create_file_size("stats", S_IWUSR, devp->debugfs_root,
737e59b4e91SDavid Howells 				 (void *)devp, &stats_debugfs_fops, 4096);
7388d81ef34SVipul Pandya 
739e59b4e91SDavid Howells 	debugfs_create_file_size("eps", S_IWUSR, devp->debugfs_root,
740e59b4e91SDavid Howells 				 (void *)devp, &ep_debugfs_fops, 4096);
741793dad94SVipul Pandya 
742e59b4e91SDavid Howells 	if (c4iw_wr_log)
743e59b4e91SDavid Howells 		debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root,
744e59b4e91SDavid Howells 					 (void *)devp, &wr_log_debugfs_fops, 4096);
745cfdda9d7SSteve Wise }
746cfdda9d7SSteve Wise 
c4iw_release_dev_ucontext(struct c4iw_rdev * rdev,struct c4iw_dev_ucontext * uctx)747cfdda9d7SSteve Wise void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
748cfdda9d7SSteve Wise 			       struct c4iw_dev_ucontext *uctx)
749cfdda9d7SSteve Wise {
750cfdda9d7SSteve Wise 	struct list_head *pos, *nxt;
751cfdda9d7SSteve Wise 	struct c4iw_qid_list *entry;
752cfdda9d7SSteve Wise 
753cfdda9d7SSteve Wise 	mutex_lock(&uctx->lock);
754cfdda9d7SSteve Wise 	list_for_each_safe(pos, nxt, &uctx->qpids) {
755cfdda9d7SSteve Wise 		entry = list_entry(pos, struct c4iw_qid_list, entry);
756cfdda9d7SSteve Wise 		list_del_init(&entry->entry);
7578d81ef34SVipul Pandya 		if (!(entry->qid & rdev->qpmask)) {
758ec3eead2SVipul Pandya 			c4iw_put_resource(&rdev->resource.qid_table,
759ec3eead2SVipul Pandya 					  entry->qid);
7608d81ef34SVipul Pandya 			mutex_lock(&rdev->stats.lock);
7618d81ef34SVipul Pandya 			rdev->stats.qid.cur -= rdev->qpmask + 1;
7628d81ef34SVipul Pandya 			mutex_unlock(&rdev->stats.lock);
7638d81ef34SVipul Pandya 		}
764cfdda9d7SSteve Wise 		kfree(entry);
765cfdda9d7SSteve Wise 	}
766cfdda9d7SSteve Wise 
767d4702645SRaju Rangoju 	list_for_each_safe(pos, nxt, &uctx->cqids) {
768cfdda9d7SSteve Wise 		entry = list_entry(pos, struct c4iw_qid_list, entry);
769cfdda9d7SSteve Wise 		list_del_init(&entry->entry);
770cfdda9d7SSteve Wise 		kfree(entry);
771cfdda9d7SSteve Wise 	}
772cfdda9d7SSteve Wise 	mutex_unlock(&uctx->lock);
773cfdda9d7SSteve Wise }
774cfdda9d7SSteve Wise 
c4iw_init_dev_ucontext(struct c4iw_rdev * rdev,struct c4iw_dev_ucontext * uctx)775cfdda9d7SSteve Wise void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
776cfdda9d7SSteve Wise 			    struct c4iw_dev_ucontext *uctx)
777cfdda9d7SSteve Wise {
778cfdda9d7SSteve Wise 	INIT_LIST_HEAD(&uctx->qpids);
779cfdda9d7SSteve Wise 	INIT_LIST_HEAD(&uctx->cqids);
780cfdda9d7SSteve Wise 	mutex_init(&uctx->lock);
781cfdda9d7SSteve Wise }
782cfdda9d7SSteve Wise 
783cfdda9d7SSteve Wise /* Caller takes care of locking if needed */
c4iw_rdev_open(struct c4iw_rdev * rdev)784cfdda9d7SSteve Wise static int c4iw_rdev_open(struct c4iw_rdev *rdev)
785cfdda9d7SSteve Wise {
786cfdda9d7SSteve Wise 	int err;
787f09ef134SRaju Rangoju 	unsigned int factor;
788cfdda9d7SSteve Wise 
789cfdda9d7SSteve Wise 	c4iw_init_dev_ucontext(rdev, &rdev->uctx);
790cfdda9d7SSteve Wise 
791cfdda9d7SSteve Wise 	/*
7924a75a86cSHariprasad S 	 * This implementation assumes udb_density == ucq_density!  Eventually
7934a75a86cSHariprasad S 	 * we might need to support this but for now fail the open. Also the
7944a75a86cSHariprasad S 	 * cqid and qpid range must match for now.
7954a75a86cSHariprasad S 	 */
7964a75a86cSHariprasad S 	if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
797700456bdSJoe Perches 		pr_err("%s: unsupported udb/ucq densities %u/%u\n",
7984a75a86cSHariprasad S 		       pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
7994a75a86cSHariprasad S 		       rdev->lldi.ucq_density);
8004275a5b2SHariprasad S 		return -EINVAL;
8014a75a86cSHariprasad S 	}
8024a75a86cSHariprasad S 	if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
8034a75a86cSHariprasad S 	    rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
804700456bdSJoe Perches 		pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n",
8054a75a86cSHariprasad S 		       pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
8064a75a86cSHariprasad S 		       rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
8074a75a86cSHariprasad S 		       rdev->lldi.vr->cq.size);
8084275a5b2SHariprasad S 		return -EINVAL;
8094a75a86cSHariprasad S 	}
8104a75a86cSHariprasad S 
811f09ef134SRaju Rangoju 	/* This implementation requires a sge_host_page_size <= PAGE_SIZE. */
812f09ef134SRaju Rangoju 	if (rdev->lldi.sge_host_page_size > PAGE_SIZE) {
813f09ef134SRaju Rangoju 		pr_err("%s: unsupported sge host page size %u\n",
814f09ef134SRaju Rangoju 		       pci_name(rdev->lldi.pdev),
815f09ef134SRaju Rangoju 		       rdev->lldi.sge_host_page_size);
816f09ef134SRaju Rangoju 		return -EINVAL;
817f09ef134SRaju Rangoju 	}
818f09ef134SRaju Rangoju 
819f09ef134SRaju Rangoju 	factor = PAGE_SIZE / rdev->lldi.sge_host_page_size;
820f09ef134SRaju Rangoju 	rdev->qpmask = (rdev->lldi.udb_density * factor) - 1;
821f09ef134SRaju Rangoju 	rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1;
822f09ef134SRaju Rangoju 
8236a0b6174SRaju Rangoju 	pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
824548ddb19SBharat Potnuri 		 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
825cfdda9d7SSteve Wise 		 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
826cfdda9d7SSteve Wise 		 rdev->lldi.vr->pbl.start,
827cfdda9d7SSteve Wise 		 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
82893fb72e4SSteve Wise 		 rdev->lldi.vr->rq.size,
82993fb72e4SSteve Wise 		 rdev->lldi.vr->qp.start,
83093fb72e4SSteve Wise 		 rdev->lldi.vr->qp.size,
83193fb72e4SSteve Wise 		 rdev->lldi.vr->cq.start,
8326a0b6174SRaju Rangoju 		 rdev->lldi.vr->cq.size,
8336a0b6174SRaju Rangoju 		 rdev->lldi.vr->srq.size);
834a9a42886SJoe Perches 	pr_debug("udb %pR db_reg %p gts_reg %p qpmask 0x%x cqmask 0x%x\n",
8353021376dSArnd Bergmann 		 &rdev->lldi.pdev->resource[2],
83674217d4cSHariprasad S 		 rdev->lldi.db_reg, rdev->lldi.gts_reg,
83774217d4cSHariprasad S 		 rdev->qpmask, rdev->cqmask);
838cfdda9d7SSteve Wise 
8394275a5b2SHariprasad S 	if (c4iw_num_stags(rdev) == 0)
8404275a5b2SHariprasad S 		return -EINVAL;
841cfdda9d7SSteve Wise 
8428d81ef34SVipul Pandya 	rdev->stats.pd.total = T4_MAX_NUM_PD;
8438d81ef34SVipul Pandya 	rdev->stats.stag.total = rdev->lldi.vr->stag.size;
8448d81ef34SVipul Pandya 	rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
8458d81ef34SVipul Pandya 	rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
8466a0b6174SRaju Rangoju 	rdev->stats.srqt.total = rdev->lldi.vr->srq.size;
8478d81ef34SVipul Pandya 	rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
8488d81ef34SVipul Pandya 	rdev->stats.qid.total = rdev->lldi.vr->qp.size;
8498d81ef34SVipul Pandya 
8506a0b6174SRaju Rangoju 	err = c4iw_init_resource(rdev, c4iw_num_stags(rdev),
8516a0b6174SRaju Rangoju 				 T4_MAX_NUM_PD, rdev->lldi.vr->srq.size);
852cfdda9d7SSteve Wise 	if (err) {
853700456bdSJoe Perches 		pr_err("error %d initializing resources\n", err);
8544275a5b2SHariprasad S 		return err;
855cfdda9d7SSteve Wise 	}
856cfdda9d7SSteve Wise 	err = c4iw_pblpool_create(rdev);
857cfdda9d7SSteve Wise 	if (err) {
858700456bdSJoe Perches 		pr_err("error %d initializing pbl pool\n", err);
8594275a5b2SHariprasad S 		goto destroy_resource;
860cfdda9d7SSteve Wise 	}
861cfdda9d7SSteve Wise 	err = c4iw_rqtpool_create(rdev);
862cfdda9d7SSteve Wise 	if (err) {
863700456bdSJoe Perches 		pr_err("error %d initializing rqt pool\n", err);
8644275a5b2SHariprasad S 		goto destroy_pblpool;
865cfdda9d7SSteve Wise 	}
866c6d7b267SSteve Wise 	err = c4iw_ocqp_pool_create(rdev);
867c6d7b267SSteve Wise 	if (err) {
868700456bdSJoe Perches 		pr_err("error %d initializing ocqp pool\n", err);
8694275a5b2SHariprasad S 		goto destroy_rqtpool;
870c6d7b267SSteve Wise 	}
87105eb2389SSteve Wise 	rdev->status_page = (struct t4_dev_status_page *)
87205eb2389SSteve Wise 			    __get_free_page(GFP_KERNEL);
87315f7e3c2SWei Yongjun 	if (!rdev->status_page) {
87415f7e3c2SWei Yongjun 		err = -ENOMEM;
87582b1df1bSHariprasad S 		goto destroy_ocqp_pool;
87615f7e3c2SWei Yongjun 	}
877c5dfb000SHariprasad S 	rdev->status_page->qp_start = rdev->lldi.vr->qp.start;
878c5dfb000SHariprasad S 	rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
879c5dfb000SHariprasad S 	rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
880c5dfb000SHariprasad S 	rdev->status_page->cq_size = rdev->lldi.vr->cq.size;
88194245f4aSPotnuri Bharat Teja 	rdev->status_page->write_cmpl_supported = rdev->lldi.write_cmpl_support;
8828fd90bb8SDavid S. Miller 
8837730b4c7SHariprasad Shenai 	if (c4iw_wr_log) {
8846396bb22SKees Cook 		rdev->wr_log = kcalloc(1 << c4iw_wr_log_size_order,
8856396bb22SKees Cook 				       sizeof(*rdev->wr_log),
8866396bb22SKees Cook 				       GFP_KERNEL);
8877730b4c7SHariprasad Shenai 		if (rdev->wr_log) {
8887730b4c7SHariprasad Shenai 			rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
8897730b4c7SHariprasad Shenai 			atomic_set(&rdev->wr_log_idx, 0);
8907730b4c7SHariprasad Shenai 		}
8917730b4c7SHariprasad Shenai 	}
8928fd90bb8SDavid S. Miller 
893c12a67feSSteve Wise 	rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
894c12a67feSSteve Wise 	if (!rdev->free_workq) {
895c12a67feSSteve Wise 		err = -ENOMEM;
896d4702645SRaju Rangoju 		goto err_free_status_page_and_wr_log;
897c12a67feSSteve Wise 	}
898c12a67feSSteve Wise 
8996b54d54dSSteve Wise 	rdev->status_page->db_off = 0;
9008fd90bb8SDavid S. Miller 
90126bff1bdSRaju Rangoju 	init_completion(&rdev->rqt_compl);
90226bff1bdSRaju Rangoju 	init_completion(&rdev->pbl_compl);
90326bff1bdSRaju Rangoju 	kref_init(&rdev->rqt_kref);
90426bff1bdSRaju Rangoju 	kref_init(&rdev->pbl_kref);
90526bff1bdSRaju Rangoju 
906cfdda9d7SSteve Wise 	return 0;
907d4702645SRaju Rangoju err_free_status_page_and_wr_log:
908d4702645SRaju Rangoju 	if (c4iw_wr_log && rdev->wr_log)
909d4702645SRaju Rangoju 		kfree(rdev->wr_log);
910c12a67feSSteve Wise 	free_page((unsigned long)rdev->status_page);
91182b1df1bSHariprasad S destroy_ocqp_pool:
91282b1df1bSHariprasad S 	c4iw_ocqp_pool_destroy(rdev);
9134275a5b2SHariprasad S destroy_rqtpool:
914c6d7b267SSteve Wise 	c4iw_rqtpool_destroy(rdev);
9154275a5b2SHariprasad S destroy_pblpool:
916cfdda9d7SSteve Wise 	c4iw_pblpool_destroy(rdev);
9174275a5b2SHariprasad S destroy_resource:
918cfdda9d7SSteve Wise 	c4iw_destroy_resource(&rdev->resource);
919cfdda9d7SSteve Wise 	return err;
920cfdda9d7SSteve Wise }
921cfdda9d7SSteve Wise 
c4iw_rdev_close(struct c4iw_rdev * rdev)922cfdda9d7SSteve Wise static void c4iw_rdev_close(struct c4iw_rdev *rdev)
923cfdda9d7SSteve Wise {
9247730b4c7SHariprasad Shenai 	kfree(rdev->wr_log);
925d4702645SRaju Rangoju 	c4iw_release_dev_ucontext(rdev, &rdev->uctx);
92605eb2389SSteve Wise 	free_page((unsigned long)rdev->status_page);
927cfdda9d7SSteve Wise 	c4iw_pblpool_destroy(rdev);
928cfdda9d7SSteve Wise 	c4iw_rqtpool_destroy(rdev);
92926bff1bdSRaju Rangoju 	wait_for_completion(&rdev->pbl_compl);
93026bff1bdSRaju Rangoju 	wait_for_completion(&rdev->rqt_compl);
931d4702645SRaju Rangoju 	c4iw_ocqp_pool_destroy(rdev);
93226bff1bdSRaju Rangoju 	destroy_workqueue(rdev->free_workq);
933cfdda9d7SSteve Wise 	c4iw_destroy_resource(&rdev->resource);
934cfdda9d7SSteve Wise }
935cfdda9d7SSteve Wise 
c4iw_dealloc(struct uld_ctx * ctx)9361c8f1da5SBharat Potnuri void c4iw_dealloc(struct uld_ctx *ctx)
937cfdda9d7SSteve Wise {
9382f25e9a5SSteve Wise 	c4iw_rdev_close(&ctx->dev->rdev);
93952e124c2SMatthew Wilcox 	WARN_ON(!xa_empty(&ctx->dev->cqs));
9402f431291SMatthew Wilcox 	WARN_ON(!xa_empty(&ctx->dev->qps));
9417a268a93SMatthew Wilcox 	WARN_ON(!xa_empty(&ctx->dev->mrs));
942f254ba6aSMatthew Wilcox 	wait_event(ctx->dev->wait, xa_empty(&ctx->dev->hwtids));
943401b4480SMatthew Wilcox 	WARN_ON(!xa_empty(&ctx->dev->stids));
9449f5a9632SMatthew Wilcox 	WARN_ON(!xa_empty(&ctx->dev->atids));
945fa658a98SSteve Wise 	if (ctx->dev->rdev.bar2_kva)
946fa658a98SSteve Wise 		iounmap(ctx->dev->rdev.bar2_kva);
947fa658a98SSteve Wise 	if (ctx->dev->rdev.oc_mw_kva)
9482f25e9a5SSteve Wise 		iounmap(ctx->dev->rdev.oc_mw_kva);
9492f25e9a5SSteve Wise 	ib_dealloc_device(&ctx->dev->ibdev);
9502f25e9a5SSteve Wise 	ctx->dev = NULL;
951cfdda9d7SSteve Wise }
952cfdda9d7SSteve Wise 
c4iw_remove(struct uld_ctx * ctx)9539efe10a1SSteve Wise static void c4iw_remove(struct uld_ctx *ctx)
9549efe10a1SSteve Wise {
955548ddb19SBharat Potnuri 	pr_debug("c4iw_dev %p\n", ctx->dev);
95649ea0c03SPotnuri Bharat Teja 	debugfs_remove_recursive(ctx->dev->debugfs_root);
9579efe10a1SSteve Wise 	c4iw_unregister_device(ctx->dev);
9589efe10a1SSteve Wise 	c4iw_dealloc(ctx);
9599efe10a1SSteve Wise }
9609efe10a1SSteve Wise 
rdma_supported(const struct cxgb4_lld_info * infop)9619efe10a1SSteve Wise static int rdma_supported(const struct cxgb4_lld_info *infop)
9629efe10a1SSteve Wise {
9639efe10a1SSteve Wise 	return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
9649efe10a1SSteve Wise 	       infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
965f079af7aSVipul Pandya 	       infop->vr->cq.size > 0;
9669efe10a1SSteve Wise }
9679efe10a1SSteve Wise 
c4iw_alloc(const struct cxgb4_lld_info * infop)968cfdda9d7SSteve Wise static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
969cfdda9d7SSteve Wise {
970cfdda9d7SSteve Wise 	struct c4iw_dev *devp;
971cfdda9d7SSteve Wise 	int ret;
972cfdda9d7SSteve Wise 
9739efe10a1SSteve Wise 	if (!rdma_supported(infop)) {
974700456bdSJoe Perches 		pr_info("%s: RDMA not supported on this device\n",
9759efe10a1SSteve Wise 			pci_name(infop->pdev));
9769efe10a1SSteve Wise 		return ERR_PTR(-ENOSYS);
9779efe10a1SSteve Wise 	}
978f079af7aSVipul Pandya 	if (!ocqp_supported(infop))
979700456bdSJoe Perches 		pr_info("%s: On-Chip Queues not supported on this device\n",
980f079af7aSVipul Pandya 			pci_name(infop->pdev));
98180ccdd60SVipul Pandya 
982459cc69fSLeon Romanovsky 	devp = ib_alloc_device(c4iw_dev, ibdev);
983cfdda9d7SSteve Wise 	if (!devp) {
984700456bdSJoe Perches 		pr_err("Cannot allocate ib device\n");
985bbe9a0a2SSteve Wise 		return ERR_PTR(-ENOMEM);
986cfdda9d7SSteve Wise 	}
987cfdda9d7SSteve Wise 	devp->rdev.lldi = *infop;
988cfdda9d7SSteve Wise 
98904e10e21SHariprasad Shenai 	/* init various hw-queue params based on lld info */
990548ddb19SBharat Potnuri 	pr_debug("Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
991548ddb19SBharat Potnuri 		 devp->rdev.lldi.sge_ingpadboundary,
99204e10e21SHariprasad Shenai 		 devp->rdev.lldi.sge_egrstatuspagesize);
99304e10e21SHariprasad Shenai 
99404e10e21SHariprasad Shenai 	devp->rdev.hw_queue.t4_eq_status_entries =
9954bbfabedSGanesh Goudar 		devp->rdev.lldi.sge_egrstatuspagesize / 64;
99666eb19afSHariprasad Shenai 	devp->rdev.hw_queue.t4_max_eq_size = 65520;
99766eb19afSHariprasad Shenai 	devp->rdev.hw_queue.t4_max_iq_size = 65520;
99866eb19afSHariprasad Shenai 	devp->rdev.hw_queue.t4_max_rq_size = 8192 -
99966eb19afSHariprasad Shenai 		devp->rdev.hw_queue.t4_eq_status_entries - 1;
100004e10e21SHariprasad Shenai 	devp->rdev.hw_queue.t4_max_sq_size =
100166eb19afSHariprasad Shenai 		devp->rdev.hw_queue.t4_max_eq_size -
100266eb19afSHariprasad Shenai 		devp->rdev.hw_queue.t4_eq_status_entries - 1;
100304e10e21SHariprasad Shenai 	devp->rdev.hw_queue.t4_max_qp_depth =
100466eb19afSHariprasad Shenai 		devp->rdev.hw_queue.t4_max_rq_size;
100504e10e21SHariprasad Shenai 	devp->rdev.hw_queue.t4_max_cq_depth =
100666eb19afSHariprasad Shenai 		devp->rdev.hw_queue.t4_max_iq_size - 2;
100704e10e21SHariprasad Shenai 	devp->rdev.hw_queue.t4_stat_len =
100804e10e21SHariprasad Shenai 		devp->rdev.lldi.sge_egrstatuspagesize;
100904e10e21SHariprasad Shenai 
1010fa658a98SSteve Wise 	/*
1011963cab50SHariprasad S 	 * For T5/T6 devices, we map all of BAR2 with WC.
1012fa658a98SSteve Wise 	 * For T4 devices with onchip qp mem, we map only that part
1013fa658a98SSteve Wise 	 * of BAR2 with WC.
1014fa658a98SSteve Wise 	 */
1015fa658a98SSteve Wise 	devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
1016963cab50SHariprasad S 	if (!is_t4(devp->rdev.lldi.adapter_type)) {
1017fa658a98SSteve Wise 		devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
1018fa658a98SSteve Wise 			pci_resource_len(devp->rdev.lldi.pdev, 2));
1019fa658a98SSteve Wise 		if (!devp->rdev.bar2_kva) {
1020700456bdSJoe Perches 			pr_err("Unable to ioremap BAR2\n");
102165b302adSChristoph Jaeger 			ib_dealloc_device(&devp->ibdev);
1022fa658a98SSteve Wise 			return ERR_PTR(-EINVAL);
1023fa658a98SSteve Wise 		}
1024fa658a98SSteve Wise 	} else if (ocqp_supported(infop)) {
1025fa658a98SSteve Wise 		devp->rdev.oc_mw_pa =
1026fa658a98SSteve Wise 			pci_resource_start(devp->rdev.lldi.pdev, 2) +
1027fa658a98SSteve Wise 			pci_resource_len(devp->rdev.lldi.pdev, 2) -
1028fa658a98SSteve Wise 			roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size);
1029c6d7b267SSteve Wise 		devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
1030c6d7b267SSteve Wise 			devp->rdev.lldi.vr->ocq.size);
1031fa658a98SSteve Wise 		if (!devp->rdev.oc_mw_kva) {
1032700456bdSJoe Perches 			pr_err("Unable to ioremap onchip mem\n");
103365b302adSChristoph Jaeger 			ib_dealloc_device(&devp->ibdev);
1034fa658a98SSteve Wise 			return ERR_PTR(-EINVAL);
1035fa658a98SSteve Wise 		}
1036fa658a98SSteve Wise 	}
1037c6d7b267SSteve Wise 
1038a9a42886SJoe Perches 	pr_debug("ocq memory: hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
1039c6d7b267SSteve Wise 		 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
1040c6d7b267SSteve Wise 		 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
1041c6d7b267SSteve Wise 
1042cfdda9d7SSteve Wise 	ret = c4iw_rdev_open(&devp->rdev);
1043cfdda9d7SSteve Wise 	if (ret) {
1044700456bdSJoe Perches 		pr_err("Unable to open CXIO rdev err %d\n", ret);
1045cfdda9d7SSteve Wise 		ib_dealloc_device(&devp->ibdev);
1046bbe9a0a2SSteve Wise 		return ERR_PTR(ret);
1047cfdda9d7SSteve Wise 	}
1048cfdda9d7SSteve Wise 
104952e124c2SMatthew Wilcox 	xa_init_flags(&devp->cqs, XA_FLAGS_LOCK_IRQ);
10502f431291SMatthew Wilcox 	xa_init_flags(&devp->qps, XA_FLAGS_LOCK_IRQ);
10517a268a93SMatthew Wilcox 	xa_init_flags(&devp->mrs, XA_FLAGS_LOCK_IRQ);
1052f254ba6aSMatthew Wilcox 	xa_init_flags(&devp->hwtids, XA_FLAGS_LOCK_IRQ);
10539f5a9632SMatthew Wilcox 	xa_init_flags(&devp->atids, XA_FLAGS_LOCK_IRQ);
1054401b4480SMatthew Wilcox 	xa_init_flags(&devp->stids, XA_FLAGS_LOCK_IRQ);
10558d81ef34SVipul Pandya 	mutex_init(&devp->rdev.stats.lock);
10562c974781SVipul Pandya 	mutex_init(&devp->db_mutex);
105705eb2389SSteve Wise 	INIT_LIST_HEAD(&devp->db_fc_list);
105837eb816cSSteve Wise 	init_waitqueue_head(&devp->wait);
10594c2c5763SHariprasad Shenai 	devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
1060cfdda9d7SSteve Wise 
1061cfdda9d7SSteve Wise 	if (c4iw_debugfs_root) {
1062cfdda9d7SSteve Wise 		devp->debugfs_root = debugfs_create_dir(
1063cfdda9d7SSteve Wise 					pci_name(devp->rdev.lldi.pdev),
1064cfdda9d7SSteve Wise 					c4iw_debugfs_root);
1065cfdda9d7SSteve Wise 		setup_debugfs(devp);
1066cfdda9d7SSteve Wise 	}
10679eccfe10SSteve Wise 
10689eccfe10SSteve Wise 
1069cfdda9d7SSteve Wise 	return devp;
1070cfdda9d7SSteve Wise }
1071cfdda9d7SSteve Wise 
c4iw_uld_add(const struct cxgb4_lld_info * infop)1072cfdda9d7SSteve Wise static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
1073cfdda9d7SSteve Wise {
10742f25e9a5SSteve Wise 	struct uld_ctx *ctx;
1075cfdda9d7SSteve Wise 	static int vers_printed;
1076cfdda9d7SSteve Wise 	int i;
1077cfdda9d7SSteve Wise 
1078cfdda9d7SSteve Wise 	if (!vers_printed++)
1079f079af7aSVipul Pandya 		pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
1080cfdda9d7SSteve Wise 			DRV_VERSION);
1081cfdda9d7SSteve Wise 
108234d56893SLeon Romanovsky 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
10832f25e9a5SSteve Wise 	if (!ctx) {
10842f25e9a5SSteve Wise 		ctx = ERR_PTR(-ENOMEM);
1085cfdda9d7SSteve Wise 		goto out;
10862f25e9a5SSteve Wise 	}
10872f25e9a5SSteve Wise 	ctx->lldi = *infop;
1088cfdda9d7SSteve Wise 
1089548ddb19SBharat Potnuri 	pr_debug("found device %s nchan %u nrxq %u ntxq %u nports %u\n",
1090548ddb19SBharat Potnuri 		 pci_name(ctx->lldi.pdev),
10912f25e9a5SSteve Wise 		 ctx->lldi.nchan, ctx->lldi.nrxq,
10922f25e9a5SSteve Wise 		 ctx->lldi.ntxq, ctx->lldi.nports);
1093cfdda9d7SSteve Wise 
10942f25e9a5SSteve Wise 	mutex_lock(&dev_mutex);
10952f25e9a5SSteve Wise 	list_add_tail(&ctx->entry, &uld_ctx_list);
10962f25e9a5SSteve Wise 	mutex_unlock(&dev_mutex);
10972f25e9a5SSteve Wise 
10982f25e9a5SSteve Wise 	for (i = 0; i < ctx->lldi.nrxq; i++)
1099a9a42886SJoe Perches 		pr_debug("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
1100cfdda9d7SSteve Wise out:
11012f25e9a5SSteve Wise 	return ctx;
1102cfdda9d7SSteve Wise }
1103cfdda9d7SSteve Wise 
copy_gl_to_skb_pkt(const struct pkt_gl * gl,const __be64 * rsp,u32 pktshift)11041cab775cSVipul Pandya static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
11051cab775cSVipul Pandya 						 const __be64 *rsp,
11061cab775cSVipul Pandya 						 u32 pktshift)
11071cab775cSVipul Pandya {
11081cab775cSVipul Pandya 	struct sk_buff *skb;
11091cab775cSVipul Pandya 
11101cab775cSVipul Pandya 	/*
11111cab775cSVipul Pandya 	 * Allocate space for cpl_pass_accept_req which will be synthesized by
11121cab775cSVipul Pandya 	 * driver. Once the driver synthesizes the request the skb will go
11131cab775cSVipul Pandya 	 * through the regular cpl_pass_accept_req processing.
11141cab775cSVipul Pandya 	 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
11151cab775cSVipul Pandya 	 * cpl_rx_pkt.
11161cab775cSVipul Pandya 	 */
11171cab775cSVipul Pandya 	skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
11181cab775cSVipul Pandya 			sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
11191cab775cSVipul Pandya 	if (unlikely(!skb))
11201cab775cSVipul Pandya 		return NULL;
11211cab775cSVipul Pandya 
11221cab775cSVipul Pandya 	__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
11231cab775cSVipul Pandya 		  sizeof(struct rss_header) - pktshift);
11241cab775cSVipul Pandya 
11251cab775cSVipul Pandya 	/*
11261cab775cSVipul Pandya 	 * This skb will contain:
11271cab775cSVipul Pandya 	 *   rss_header from the rspq descriptor (1 flit)
11281cab775cSVipul Pandya 	 *   cpl_rx_pkt struct from the rspq descriptor (2 flits)
11291cab775cSVipul Pandya 	 *   space for the difference between the size of an
11301cab775cSVipul Pandya 	 *      rx_pkt and pass_accept_req cpl (1 flit)
11311cab775cSVipul Pandya 	 *   the packet data from the gl
11321cab775cSVipul Pandya 	 */
11331cab775cSVipul Pandya 	skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
11341cab775cSVipul Pandya 				sizeof(struct rss_header));
11351cab775cSVipul Pandya 	skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
11361cab775cSVipul Pandya 				       sizeof(struct cpl_pass_accept_req),
11371cab775cSVipul Pandya 				       gl->va + pktshift,
11381cab775cSVipul Pandya 				       gl->tot_len - pktshift);
11391cab775cSVipul Pandya 	return skb;
11401cab775cSVipul Pandya }
11411cab775cSVipul Pandya 
recv_rx_pkt(struct c4iw_dev * dev,const struct pkt_gl * gl,const __be64 * rsp)11421cab775cSVipul Pandya static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
11431cab775cSVipul Pandya 			   const __be64 *rsp)
11441cab775cSVipul Pandya {
11451cab775cSVipul Pandya 	unsigned int opcode = *(u8 *)rsp;
11461cab775cSVipul Pandya 	struct sk_buff *skb;
11471cab775cSVipul Pandya 
11481cab775cSVipul Pandya 	if (opcode != CPL_RX_PKT)
11491cab775cSVipul Pandya 		goto out;
11501cab775cSVipul Pandya 
11511cab775cSVipul Pandya 	skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
11521cab775cSVipul Pandya 	if (skb == NULL)
11531cab775cSVipul Pandya 		goto out;
11541cab775cSVipul Pandya 
11551cab775cSVipul Pandya 	if (c4iw_handlers[opcode] == NULL) {
1156700456bdSJoe Perches 		pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
11571cab775cSVipul Pandya 		kfree_skb(skb);
11581cab775cSVipul Pandya 		goto out;
11591cab775cSVipul Pandya 	}
11601cab775cSVipul Pandya 	c4iw_handlers[opcode](dev, skb);
11611cab775cSVipul Pandya 	return 1;
11621cab775cSVipul Pandya out:
11631cab775cSVipul Pandya 	return 0;
11641cab775cSVipul Pandya }
11651cab775cSVipul Pandya 
c4iw_uld_rx_handler(void * handle,const __be64 * rsp,const struct pkt_gl * gl)1166cfdda9d7SSteve Wise static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
1167cfdda9d7SSteve Wise 			const struct pkt_gl *gl)
1168cfdda9d7SSteve Wise {
11692f25e9a5SSteve Wise 	struct uld_ctx *ctx = handle;
11702f25e9a5SSteve Wise 	struct c4iw_dev *dev = ctx->dev;
1171cfdda9d7SSteve Wise 	struct sk_buff *skb;
11721cab775cSVipul Pandya 	u8 opcode;
1173cfdda9d7SSteve Wise 
1174cfdda9d7SSteve Wise 	if (gl == NULL) {
1175cfdda9d7SSteve Wise 		/* omit RSS and rsp_ctrl at end of descriptor */
1176cfdda9d7SSteve Wise 		unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1177cfdda9d7SSteve Wise 
1178cfdda9d7SSteve Wise 		skb = alloc_skb(256, GFP_ATOMIC);
1179cfdda9d7SSteve Wise 		if (!skb)
1180cfdda9d7SSteve Wise 			goto nomem;
1181cfdda9d7SSteve Wise 		__skb_put(skb, len);
1182cfdda9d7SSteve Wise 		skb_copy_to_linear_data(skb, &rsp[1], len);
1183cfdda9d7SSteve Wise 	} else if (gl == CXGB4_MSG_AN) {
1184cfdda9d7SSteve Wise 		const struct rsp_ctrl *rc = (void *)rsp;
1185cfdda9d7SSteve Wise 
1186cfdda9d7SSteve Wise 		u32 qid = be32_to_cpu(rc->pldbuflen_qid);
1187cfdda9d7SSteve Wise 		c4iw_ev_handler(dev, qid);
1188cfdda9d7SSteve Wise 		return 0;
11891cab775cSVipul Pandya 	} else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
11901cab775cSVipul Pandya 		if (recv_rx_pkt(dev, gl, rsp))
11911cab775cSVipul Pandya 			return 0;
11921cab775cSVipul Pandya 
1193700456bdSJoe Perches 		pr_info("%s: unexpected FL contents at %p, RSS %#llx, FL %#llx, len %u\n",
11941cab775cSVipul Pandya 			pci_name(ctx->lldi.pdev), gl->va,
1195700456bdSJoe Perches 			be64_to_cpu(*rsp),
1196700456bdSJoe Perches 			be64_to_cpu(*(__force __be64 *)gl->va),
11971cab775cSVipul Pandya 			gl->tot_len);
11981cab775cSVipul Pandya 
11991cab775cSVipul Pandya 		return 0;
1200cfdda9d7SSteve Wise 	} else {
1201da411ba1SSteve Wise 		skb = cxgb4_pktgl_to_skb(gl, 128, 128);
1202cfdda9d7SSteve Wise 		if (unlikely(!skb))
1203cfdda9d7SSteve Wise 			goto nomem;
1204cfdda9d7SSteve Wise 	}
1205cfdda9d7SSteve Wise 
12061cab775cSVipul Pandya 	opcode = *(u8 *)rsp;
1207dbb084ccSSteve Wise 	if (c4iw_handlers[opcode]) {
1208cfdda9d7SSteve Wise 		c4iw_handlers[opcode](dev, skb);
1209dbb084ccSSteve Wise 	} else {
1210700456bdSJoe Perches 		pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
1211dbb084ccSSteve Wise 		kfree_skb(skb);
1212dbb084ccSSteve Wise 	}
1213cfdda9d7SSteve Wise 
1214cfdda9d7SSteve Wise 	return 0;
1215cfdda9d7SSteve Wise nomem:
1216cfdda9d7SSteve Wise 	return -1;
1217cfdda9d7SSteve Wise }
1218cfdda9d7SSteve Wise 
c4iw_uld_state_change(void * handle,enum cxgb4_state new_state)1219cfdda9d7SSteve Wise static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
1220cfdda9d7SSteve Wise {
12212f25e9a5SSteve Wise 	struct uld_ctx *ctx = handle;
12221c01c538SSteve Wise 
1223548ddb19SBharat Potnuri 	pr_debug("new_state %u\n", new_state);
12241c01c538SSteve Wise 	switch (new_state) {
12251c01c538SSteve Wise 	case CXGB4_STATE_UP:
1226700456bdSJoe Perches 		pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
12272f25e9a5SSteve Wise 		if (!ctx->dev) {
12282f25e9a5SSteve Wise 			ctx->dev = c4iw_alloc(&ctx->lldi);
12299efe10a1SSteve Wise 			if (IS_ERR(ctx->dev)) {
1230700456bdSJoe Perches 				pr_err("%s: initialization failed: %ld\n",
12319efe10a1SSteve Wise 				       pci_name(ctx->lldi.pdev),
12329efe10a1SSteve Wise 				       PTR_ERR(ctx->dev));
12339efe10a1SSteve Wise 				ctx->dev = NULL;
12349efe10a1SSteve Wise 				break;
12359efe10a1SSteve Wise 			}
12361c8f1da5SBharat Potnuri 
12371c8f1da5SBharat Potnuri 			INIT_WORK(&ctx->reg_work, c4iw_register_device);
12381c8f1da5SBharat Potnuri 			queue_work(reg_workq, &ctx->reg_work);
12391c01c538SSteve Wise 		}
12401c01c538SSteve Wise 		break;
12411c01c538SSteve Wise 	case CXGB4_STATE_DOWN:
1242700456bdSJoe Perches 		pr_info("%s: Down\n", pci_name(ctx->lldi.pdev));
12432f25e9a5SSteve Wise 		if (ctx->dev)
12442f25e9a5SSteve Wise 			c4iw_remove(ctx);
12451c01c538SSteve Wise 		break;
12468b7372c1SGanesh Goudar 	case CXGB4_STATE_FATAL_ERROR:
12471c01c538SSteve Wise 	case CXGB4_STATE_START_RECOVERY:
1248700456bdSJoe Perches 		pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev));
12492f25e9a5SSteve Wise 		if (ctx->dev) {
125034d56893SLeon Romanovsky 			struct ib_event event = {};
1251767fbe81SSteve Wise 
12522f25e9a5SSteve Wise 			ctx->dev->rdev.flags |= T4_FATAL_ERROR;
1253767fbe81SSteve Wise 			event.event  = IB_EVENT_DEVICE_FATAL;
12542f25e9a5SSteve Wise 			event.device = &ctx->dev->ibdev;
1255767fbe81SSteve Wise 			ib_dispatch_event(&event);
12562f25e9a5SSteve Wise 			c4iw_remove(ctx);
1257767fbe81SSteve Wise 		}
12581c01c538SSteve Wise 		break;
12591c01c538SSteve Wise 	case CXGB4_STATE_DETACH:
1260700456bdSJoe Perches 		pr_info("%s: Detach\n", pci_name(ctx->lldi.pdev));
12612f25e9a5SSteve Wise 		if (ctx->dev)
12622f25e9a5SSteve Wise 			c4iw_remove(ctx);
12631c01c538SSteve Wise 		break;
12641c01c538SSteve Wise 	}
1265cfdda9d7SSteve Wise 	return 0;
1266cfdda9d7SSteve Wise }
1267cfdda9d7SSteve Wise 
stop_queues(struct uld_ctx * ctx)12682c974781SVipul Pandya static void stop_queues(struct uld_ctx *ctx)
12692c974781SVipul Pandya {
12702f431291SMatthew Wilcox 	struct c4iw_qp *qp;
12712f431291SMatthew Wilcox 	unsigned long index, flags;
127205eb2389SSteve Wise 
12732f431291SMatthew Wilcox 	xa_lock_irqsave(&ctx->dev->qps, flags);
1274422eea0aSVipul Pandya 	ctx->dev->rdev.stats.db_state_transitions++;
127505eb2389SSteve Wise 	ctx->dev->db_state = STOPPED;
12762f431291SMatthew Wilcox 	if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
12772f431291SMatthew Wilcox 		xa_for_each(&ctx->dev->qps, index, qp)
12782f431291SMatthew Wilcox 			t4_disable_wq_db(&qp->wq);
12792f431291SMatthew Wilcox 	} else {
128005eb2389SSteve Wise 		ctx->dev->rdev.status_page->db_off = 1;
12812c974781SVipul Pandya 	}
12822f431291SMatthew Wilcox 	xa_unlock_irqrestore(&ctx->dev->qps, flags);
12832c974781SVipul Pandya }
12842c974781SVipul Pandya 
resume_rc_qp(struct c4iw_qp * qp)128505eb2389SSteve Wise static void resume_rc_qp(struct c4iw_qp *qp)
128605eb2389SSteve Wise {
128705eb2389SSteve Wise 	spin_lock(&qp->lock);
1288963cab50SHariprasad S 	t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL);
128905eb2389SSteve Wise 	qp->wq.sq.wq_pidx_inc = 0;
1290963cab50SHariprasad S 	t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL);
129105eb2389SSteve Wise 	qp->wq.rq.wq_pidx_inc = 0;
129205eb2389SSteve Wise 	spin_unlock(&qp->lock);
129305eb2389SSteve Wise }
129405eb2389SSteve Wise 
resume_a_chunk(struct uld_ctx * ctx)129505eb2389SSteve Wise static void resume_a_chunk(struct uld_ctx *ctx)
129605eb2389SSteve Wise {
129705eb2389SSteve Wise 	int i;
129805eb2389SSteve Wise 	struct c4iw_qp *qp;
129905eb2389SSteve Wise 
130005eb2389SSteve Wise 	for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
130105eb2389SSteve Wise 		qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
130205eb2389SSteve Wise 				      db_fc_entry);
130305eb2389SSteve Wise 		list_del_init(&qp->db_fc_entry);
130405eb2389SSteve Wise 		resume_rc_qp(qp);
130505eb2389SSteve Wise 		if (list_empty(&ctx->dev->db_fc_list))
130605eb2389SSteve Wise 			break;
130705eb2389SSteve Wise 	}
130805eb2389SSteve Wise }
130905eb2389SSteve Wise 
resume_queues(struct uld_ctx * ctx)13102c974781SVipul Pandya static void resume_queues(struct uld_ctx *ctx)
13112c974781SVipul Pandya {
13122f431291SMatthew Wilcox 	xa_lock_irq(&ctx->dev->qps);
131305eb2389SSteve Wise 	if (ctx->dev->db_state != STOPPED)
131405eb2389SSteve Wise 		goto out;
131505eb2389SSteve Wise 	ctx->dev->db_state = FLOW_CONTROL;
131605eb2389SSteve Wise 	while (1) {
131705eb2389SSteve Wise 		if (list_empty(&ctx->dev->db_fc_list)) {
13182f431291SMatthew Wilcox 			struct c4iw_qp *qp;
13192f431291SMatthew Wilcox 			unsigned long index;
13202f431291SMatthew Wilcox 
132105eb2389SSteve Wise 			WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
1322422eea0aSVipul Pandya 			ctx->dev->db_state = NORMAL;
1323422eea0aSVipul Pandya 			ctx->dev->rdev.stats.db_state_transitions++;
132405eb2389SSteve Wise 			if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
13252f431291SMatthew Wilcox 				xa_for_each(&ctx->dev->qps, index, qp)
13262f431291SMatthew Wilcox 					t4_enable_wq_db(&qp->wq);
132705eb2389SSteve Wise 			} else {
132805eb2389SSteve Wise 				ctx->dev->rdev.status_page->db_off = 0;
1329422eea0aSVipul Pandya 			}
133005eb2389SSteve Wise 			break;
133105eb2389SSteve Wise 		} else {
133205eb2389SSteve Wise 			if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
133305eb2389SSteve Wise 			    < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
133405eb2389SSteve Wise 			       DB_FC_DRAIN_THRESH)) {
133505eb2389SSteve Wise 				resume_a_chunk(ctx);
133605eb2389SSteve Wise 			}
133705eb2389SSteve Wise 			if (!list_empty(&ctx->dev->db_fc_list)) {
13382f431291SMatthew Wilcox 				xa_unlock_irq(&ctx->dev->qps);
133905eb2389SSteve Wise 				if (DB_FC_RESUME_DELAY) {
134005eb2389SSteve Wise 					set_current_state(TASK_UNINTERRUPTIBLE);
134105eb2389SSteve Wise 					schedule_timeout(DB_FC_RESUME_DELAY);
134205eb2389SSteve Wise 				}
13432f431291SMatthew Wilcox 				xa_lock_irq(&ctx->dev->qps);
134405eb2389SSteve Wise 				if (ctx->dev->db_state != FLOW_CONTROL)
134505eb2389SSteve Wise 					break;
134605eb2389SSteve Wise 			}
134705eb2389SSteve Wise 		}
134805eb2389SSteve Wise 	}
134905eb2389SSteve Wise out:
135005eb2389SSteve Wise 	if (ctx->dev->db_state != NORMAL)
135105eb2389SSteve Wise 		ctx->dev->rdev.stats.db_fc_interruptions++;
13522f431291SMatthew Wilcox 	xa_unlock_irq(&ctx->dev->qps);
1353422eea0aSVipul Pandya }
1354422eea0aSVipul Pandya 
1355422eea0aSVipul Pandya struct qp_list {
1356422eea0aSVipul Pandya 	unsigned idx;
1357422eea0aSVipul Pandya 	struct c4iw_qp **qps;
1358422eea0aSVipul Pandya };
1359422eea0aSVipul Pandya 
deref_qps(struct qp_list * qp_list)136005eb2389SSteve Wise static void deref_qps(struct qp_list *qp_list)
1361422eea0aSVipul Pandya {
1362422eea0aSVipul Pandya 	int idx;
1363422eea0aSVipul Pandya 
136405eb2389SSteve Wise 	for (idx = 0; idx < qp_list->idx; idx++)
136505eb2389SSteve Wise 		c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
1366422eea0aSVipul Pandya }
1367422eea0aSVipul Pandya 
recover_lost_dbs(struct uld_ctx * ctx,struct qp_list * qp_list)1368422eea0aSVipul Pandya static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1369422eea0aSVipul Pandya {
1370422eea0aSVipul Pandya 	int idx;
1371422eea0aSVipul Pandya 	int ret;
1372422eea0aSVipul Pandya 
1373422eea0aSVipul Pandya 	for (idx = 0; idx < qp_list->idx; idx++) {
1374422eea0aSVipul Pandya 		struct c4iw_qp *qp = qp_list->qps[idx];
1375422eea0aSVipul Pandya 
13762f431291SMatthew Wilcox 		xa_lock_irq(&qp->rhp->qps);
137705eb2389SSteve Wise 		spin_lock(&qp->lock);
1378422eea0aSVipul Pandya 		ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1379422eea0aSVipul Pandya 					  qp->wq.sq.qid,
1380422eea0aSVipul Pandya 					  t4_sq_host_wq_pidx(&qp->wq),
1381422eea0aSVipul Pandya 					  t4_sq_wq_size(&qp->wq));
1382422eea0aSVipul Pandya 		if (ret) {
1383700456bdSJoe Perches 			pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n",
1384422eea0aSVipul Pandya 			       pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
138505eb2389SSteve Wise 			spin_unlock(&qp->lock);
13862f431291SMatthew Wilcox 			xa_unlock_irq(&qp->rhp->qps);
1387422eea0aSVipul Pandya 			return;
1388422eea0aSVipul Pandya 		}
138905eb2389SSteve Wise 		qp->wq.sq.wq_pidx_inc = 0;
1390422eea0aSVipul Pandya 
1391422eea0aSVipul Pandya 		ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1392422eea0aSVipul Pandya 					  qp->wq.rq.qid,
1393422eea0aSVipul Pandya 					  t4_rq_host_wq_pidx(&qp->wq),
1394422eea0aSVipul Pandya 					  t4_rq_wq_size(&qp->wq));
1395422eea0aSVipul Pandya 
1396422eea0aSVipul Pandya 		if (ret) {
1397700456bdSJoe Perches 			pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n",
1398422eea0aSVipul Pandya 			       pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
139905eb2389SSteve Wise 			spin_unlock(&qp->lock);
14002f431291SMatthew Wilcox 			xa_unlock_irq(&qp->rhp->qps);
1401422eea0aSVipul Pandya 			return;
1402422eea0aSVipul Pandya 		}
140305eb2389SSteve Wise 		qp->wq.rq.wq_pidx_inc = 0;
140405eb2389SSteve Wise 		spin_unlock(&qp->lock);
14052f431291SMatthew Wilcox 		xa_unlock_irq(&qp->rhp->qps);
1406422eea0aSVipul Pandya 
1407422eea0aSVipul Pandya 		/* Wait for the dbfifo to drain */
1408422eea0aSVipul Pandya 		while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1409422eea0aSVipul Pandya 			set_current_state(TASK_UNINTERRUPTIBLE);
1410422eea0aSVipul Pandya 			schedule_timeout(usecs_to_jiffies(10));
1411422eea0aSVipul Pandya 		}
1412422eea0aSVipul Pandya 	}
1413422eea0aSVipul Pandya }
1414422eea0aSVipul Pandya 
recover_queues(struct uld_ctx * ctx)1415422eea0aSVipul Pandya static void recover_queues(struct uld_ctx *ctx)
1416422eea0aSVipul Pandya {
14172f431291SMatthew Wilcox 	struct c4iw_qp *qp;
14182f431291SMatthew Wilcox 	unsigned long index;
1419422eea0aSVipul Pandya 	int count = 0;
1420422eea0aSVipul Pandya 	struct qp_list qp_list;
1421422eea0aSVipul Pandya 	int ret;
1422422eea0aSVipul Pandya 
1423422eea0aSVipul Pandya 	/* slow everybody down */
1424422eea0aSVipul Pandya 	set_current_state(TASK_UNINTERRUPTIBLE);
1425422eea0aSVipul Pandya 	schedule_timeout(usecs_to_jiffies(1000));
1426422eea0aSVipul Pandya 
1427422eea0aSVipul Pandya 	/* flush the SGE contexts */
1428422eea0aSVipul Pandya 	ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1429422eea0aSVipul Pandya 	if (ret) {
1430700456bdSJoe Perches 		pr_err("%s: Fatal error - DB overflow recovery failed\n",
1431422eea0aSVipul Pandya 		       pci_name(ctx->lldi.pdev));
143205eb2389SSteve Wise 		return;
1433422eea0aSVipul Pandya 	}
1434422eea0aSVipul Pandya 
1435422eea0aSVipul Pandya 	/* Count active queues so we can build a list of queues to recover */
14362f431291SMatthew Wilcox 	xa_lock_irq(&ctx->dev->qps);
143705eb2389SSteve Wise 	WARN_ON(ctx->dev->db_state != STOPPED);
143805eb2389SSteve Wise 	ctx->dev->db_state = RECOVERY;
14392f431291SMatthew Wilcox 	xa_for_each(&ctx->dev->qps, index, qp)
14402f431291SMatthew Wilcox 		count++;
1441422eea0aSVipul Pandya 
14426396bb22SKees Cook 	qp_list.qps = kcalloc(count, sizeof(*qp_list.qps), GFP_ATOMIC);
1443422eea0aSVipul Pandya 	if (!qp_list.qps) {
14442f431291SMatthew Wilcox 		xa_unlock_irq(&ctx->dev->qps);
144505eb2389SSteve Wise 		return;
1446422eea0aSVipul Pandya 	}
1447422eea0aSVipul Pandya 	qp_list.idx = 0;
1448422eea0aSVipul Pandya 
1449422eea0aSVipul Pandya 	/* add and ref each qp so it doesn't get freed */
14502f431291SMatthew Wilcox 	xa_for_each(&ctx->dev->qps, index, qp) {
14512f431291SMatthew Wilcox 		c4iw_qp_add_ref(&qp->ibqp);
14522f431291SMatthew Wilcox 		qp_list.qps[qp_list.idx++] = qp;
14532f431291SMatthew Wilcox 	}
1454422eea0aSVipul Pandya 
14552f431291SMatthew Wilcox 	xa_unlock_irq(&ctx->dev->qps);
1456422eea0aSVipul Pandya 
1457422eea0aSVipul Pandya 	/* now traverse the list in a safe context to recover the db state*/
1458422eea0aSVipul Pandya 	recover_lost_dbs(ctx, &qp_list);
1459422eea0aSVipul Pandya 
1460422eea0aSVipul Pandya 	/* we're almost done!  deref the qps and clean up */
146105eb2389SSteve Wise 	deref_qps(&qp_list);
1462422eea0aSVipul Pandya 	kfree(qp_list.qps);
1463422eea0aSVipul Pandya 
14642f431291SMatthew Wilcox 	xa_lock_irq(&ctx->dev->qps);
146505eb2389SSteve Wise 	WARN_ON(ctx->dev->db_state != RECOVERY);
146605eb2389SSteve Wise 	ctx->dev->db_state = STOPPED;
14672f431291SMatthew Wilcox 	xa_unlock_irq(&ctx->dev->qps);
14682c974781SVipul Pandya }
14692c974781SVipul Pandya 
c4iw_uld_control(void * handle,enum cxgb4_control control,...)14702c974781SVipul Pandya static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
14712c974781SVipul Pandya {
14722c974781SVipul Pandya 	struct uld_ctx *ctx = handle;
14732c974781SVipul Pandya 
14742c974781SVipul Pandya 	switch (control) {
14752c974781SVipul Pandya 	case CXGB4_CONTROL_DB_FULL:
14762c974781SVipul Pandya 		stop_queues(ctx);
14772c974781SVipul Pandya 		ctx->dev->rdev.stats.db_full++;
14782c974781SVipul Pandya 		break;
14792c974781SVipul Pandya 	case CXGB4_CONTROL_DB_EMPTY:
14802c974781SVipul Pandya 		resume_queues(ctx);
14812c974781SVipul Pandya 		mutex_lock(&ctx->dev->rdev.stats.lock);
14822c974781SVipul Pandya 		ctx->dev->rdev.stats.db_empty++;
14832c974781SVipul Pandya 		mutex_unlock(&ctx->dev->rdev.stats.lock);
14842c974781SVipul Pandya 		break;
14852c974781SVipul Pandya 	case CXGB4_CONTROL_DB_DROP:
1486422eea0aSVipul Pandya 		recover_queues(ctx);
14872c974781SVipul Pandya 		mutex_lock(&ctx->dev->rdev.stats.lock);
14882c974781SVipul Pandya 		ctx->dev->rdev.stats.db_drop++;
14892c974781SVipul Pandya 		mutex_unlock(&ctx->dev->rdev.stats.lock);
14902c974781SVipul Pandya 		break;
14912c974781SVipul Pandya 	default:
1492700456bdSJoe Perches 		pr_warn("%s: unknown control cmd %u\n",
14932c974781SVipul Pandya 			pci_name(ctx->lldi.pdev), control);
14942c974781SVipul Pandya 		break;
14952c974781SVipul Pandya 	}
14962c974781SVipul Pandya 	return 0;
14972c974781SVipul Pandya }
14982c974781SVipul Pandya 
1499cfdda9d7SSteve Wise static struct cxgb4_uld_info c4iw_uld_info = {
1500cfdda9d7SSteve Wise 	.name = DRV_NAME,
15010fbc81b3SHariprasad Shenai 	.nrxq = MAX_ULD_QSETS,
1502ab677ff4SHariprasad Shenai 	.ntxq = MAX_ULD_QSETS,
15030fbc81b3SHariprasad Shenai 	.rxq_size = 511,
15040fbc81b3SHariprasad Shenai 	.ciq = true,
15050fbc81b3SHariprasad Shenai 	.lro = false,
1506cfdda9d7SSteve Wise 	.add = c4iw_uld_add,
1507cfdda9d7SSteve Wise 	.rx_handler = c4iw_uld_rx_handler,
1508cfdda9d7SSteve Wise 	.state_change = c4iw_uld_state_change,
15092c974781SVipul Pandya 	.control = c4iw_uld_control,
1510cfdda9d7SSteve Wise };
1511cfdda9d7SSteve Wise 
_c4iw_free_wr_wait(struct kref * kref)15122015f26cSSteve Wise void _c4iw_free_wr_wait(struct kref *kref)
15132015f26cSSteve Wise {
15142015f26cSSteve Wise 	struct c4iw_wr_wait *wr_waitp;
15152015f26cSSteve Wise 
15162015f26cSSteve Wise 	wr_waitp = container_of(kref, struct c4iw_wr_wait, kref);
15172015f26cSSteve Wise 	pr_debug("Free wr_wait %p\n", wr_waitp);
15182015f26cSSteve Wise 	kfree(wr_waitp);
15192015f26cSSteve Wise }
15202015f26cSSteve Wise 
c4iw_alloc_wr_wait(gfp_t gfp)15212015f26cSSteve Wise struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp)
15222015f26cSSteve Wise {
15232015f26cSSteve Wise 	struct c4iw_wr_wait *wr_waitp;
15242015f26cSSteve Wise 
15252015f26cSSteve Wise 	wr_waitp = kzalloc(sizeof(*wr_waitp), gfp);
15262015f26cSSteve Wise 	if (wr_waitp) {
15272015f26cSSteve Wise 		kref_init(&wr_waitp->kref);
15282015f26cSSteve Wise 		pr_debug("wr_wait %p\n", wr_waitp);
15292015f26cSSteve Wise 	}
15302015f26cSSteve Wise 	return wr_waitp;
15312015f26cSSteve Wise }
15322015f26cSSteve Wise 
c4iw_init_module(void)1533cfdda9d7SSteve Wise static int __init c4iw_init_module(void)
1534cfdda9d7SSteve Wise {
1535cfdda9d7SSteve Wise 	int err;
1536cfdda9d7SSteve Wise 
1537cfdda9d7SSteve Wise 	err = c4iw_cm_init();
1538cfdda9d7SSteve Wise 	if (err)
1539cfdda9d7SSteve Wise 		return err;
1540cfdda9d7SSteve Wise 
1541cfdda9d7SSteve Wise 	c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1542cfdda9d7SSteve Wise 
15431c8f1da5SBharat Potnuri 	reg_workq = create_singlethread_workqueue("Register_iWARP_device");
15441c8f1da5SBharat Potnuri 	if (!reg_workq) {
15451c8f1da5SBharat Potnuri 		pr_err("Failed creating workqueue to register iwarp device\n");
15461c8f1da5SBharat Potnuri 		return -ENOMEM;
15471c8f1da5SBharat Potnuri 	}
15481c8f1da5SBharat Potnuri 
1549cfdda9d7SSteve Wise 	cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1550cfdda9d7SSteve Wise 
1551cfdda9d7SSteve Wise 	return 0;
1552cfdda9d7SSteve Wise }
1553cfdda9d7SSteve Wise 
c4iw_exit_module(void)1554cfdda9d7SSteve Wise static void __exit c4iw_exit_module(void)
1555cfdda9d7SSteve Wise {
15562f25e9a5SSteve Wise 	struct uld_ctx *ctx, *tmp;
1557cfdda9d7SSteve Wise 
1558cfdda9d7SSteve Wise 	mutex_lock(&dev_mutex);
15592f25e9a5SSteve Wise 	list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
15602f25e9a5SSteve Wise 		if (ctx->dev)
15612f25e9a5SSteve Wise 			c4iw_remove(ctx);
15622f25e9a5SSteve Wise 		kfree(ctx);
1563cfdda9d7SSteve Wise 	}
1564cfdda9d7SSteve Wise 	mutex_unlock(&dev_mutex);
15651c8f1da5SBharat Potnuri 	destroy_workqueue(reg_workq);
1566fd388ce6SSteve Wise 	cxgb4_unregister_uld(CXGB4_ULD_RDMA);
1567cfdda9d7SSteve Wise 	c4iw_cm_term();
1568cfdda9d7SSteve Wise 	debugfs_remove_recursive(c4iw_debugfs_root);
1569cfdda9d7SSteve Wise }
1570cfdda9d7SSteve Wise 
1571cfdda9d7SSteve Wise module_init(c4iw_init_module);
1572cfdda9d7SSteve Wise module_exit(c4iw_exit_module);
1573