1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
35 #include <linux/vmalloc.h>
36 
37 #include <rdma/ib_verbs.h>
38 
39 #include "iw_cxgb4.h"
40 
41 #define DRV_VERSION "0.1"
42 
43 MODULE_AUTHOR("Steve Wise");
44 MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
45 MODULE_LICENSE("Dual BSD/GPL");
46 MODULE_VERSION(DRV_VERSION);
47 
48 static int allow_db_fc_on_t5;
49 module_param(allow_db_fc_on_t5, int, 0644);
50 MODULE_PARM_DESC(allow_db_fc_on_t5,
51 		 "Allow DB Flow Control on T5 (default = 0)");
52 
53 static int allow_db_coalescing_on_t5;
54 module_param(allow_db_coalescing_on_t5, int, 0644);
55 MODULE_PARM_DESC(allow_db_coalescing_on_t5,
56 		 "Allow DB Coalescing on T5 (default = 0)");
57 
58 struct uld_ctx {
59 	struct list_head entry;
60 	struct cxgb4_lld_info lldi;
61 	struct c4iw_dev *dev;
62 };
63 
64 static LIST_HEAD(uld_ctx_list);
65 static DEFINE_MUTEX(dev_mutex);
66 
67 #define DB_FC_RESUME_SIZE 64
68 #define DB_FC_RESUME_DELAY 1
69 #define DB_FC_DRAIN_THRESH 0
70 
71 static struct dentry *c4iw_debugfs_root;
72 
73 struct c4iw_debugfs_data {
74 	struct c4iw_dev *devp;
75 	char *buf;
76 	int bufsize;
77 	int pos;
78 };
79 
80 static int count_idrs(int id, void *p, void *data)
81 {
82 	int *countp = data;
83 
84 	*countp = *countp + 1;
85 	return 0;
86 }
87 
88 static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
89 			    loff_t *ppos)
90 {
91 	struct c4iw_debugfs_data *d = file->private_data;
92 
93 	return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
94 }
95 
96 static int dump_qp(int id, void *p, void *data)
97 {
98 	struct c4iw_qp *qp = p;
99 	struct c4iw_debugfs_data *qpd = data;
100 	int space;
101 	int cc;
102 
103 	if (id != qp->wq.sq.qid)
104 		return 0;
105 
106 	space = qpd->bufsize - qpd->pos - 1;
107 	if (space == 0)
108 		return 1;
109 
110 	if (qp->ep) {
111 		if (qp->ep->com.local_addr.ss_family == AF_INET) {
112 			struct sockaddr_in *lsin = (struct sockaddr_in *)
113 				&qp->ep->com.local_addr;
114 			struct sockaddr_in *rsin = (struct sockaddr_in *)
115 				&qp->ep->com.remote_addr;
116 
117 			cc = snprintf(qpd->buf + qpd->pos, space,
118 				      "rc qp sq id %u rq id %u state %u "
119 				      "onchip %u ep tid %u state %u "
120 				      "%pI4:%u->%pI4:%u\n",
121 				      qp->wq.sq.qid, qp->wq.rq.qid,
122 				      (int)qp->attr.state,
123 				      qp->wq.sq.flags & T4_SQ_ONCHIP,
124 				      qp->ep->hwtid, (int)qp->ep->com.state,
125 				      &lsin->sin_addr, ntohs(lsin->sin_port),
126 				      &rsin->sin_addr, ntohs(rsin->sin_port));
127 		} else {
128 			struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
129 				&qp->ep->com.local_addr;
130 			struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
131 				&qp->ep->com.remote_addr;
132 
133 			cc = snprintf(qpd->buf + qpd->pos, space,
134 				      "rc qp sq id %u rq id %u state %u "
135 				      "onchip %u ep tid %u state %u "
136 				      "%pI6:%u->%pI6:%u\n",
137 				      qp->wq.sq.qid, qp->wq.rq.qid,
138 				      (int)qp->attr.state,
139 				      qp->wq.sq.flags & T4_SQ_ONCHIP,
140 				      qp->ep->hwtid, (int)qp->ep->com.state,
141 				      &lsin6->sin6_addr,
142 				      ntohs(lsin6->sin6_port),
143 				      &rsin6->sin6_addr,
144 				      ntohs(rsin6->sin6_port));
145 		}
146 	} else
147 		cc = snprintf(qpd->buf + qpd->pos, space,
148 			     "qp sq id %u rq id %u state %u onchip %u\n",
149 			      qp->wq.sq.qid, qp->wq.rq.qid,
150 			      (int)qp->attr.state,
151 			      qp->wq.sq.flags & T4_SQ_ONCHIP);
152 	if (cc < space)
153 		qpd->pos += cc;
154 	return 0;
155 }
156 
157 static int qp_release(struct inode *inode, struct file *file)
158 {
159 	struct c4iw_debugfs_data *qpd = file->private_data;
160 	if (!qpd) {
161 		printk(KERN_INFO "%s null qpd?\n", __func__);
162 		return 0;
163 	}
164 	vfree(qpd->buf);
165 	kfree(qpd);
166 	return 0;
167 }
168 
169 static int qp_open(struct inode *inode, struct file *file)
170 {
171 	struct c4iw_debugfs_data *qpd;
172 	int ret = 0;
173 	int count = 1;
174 
175 	qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
176 	if (!qpd) {
177 		ret = -ENOMEM;
178 		goto out;
179 	}
180 	qpd->devp = inode->i_private;
181 	qpd->pos = 0;
182 
183 	spin_lock_irq(&qpd->devp->lock);
184 	idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
185 	spin_unlock_irq(&qpd->devp->lock);
186 
187 	qpd->bufsize = count * 128;
188 	qpd->buf = vmalloc(qpd->bufsize);
189 	if (!qpd->buf) {
190 		ret = -ENOMEM;
191 		goto err1;
192 	}
193 
194 	spin_lock_irq(&qpd->devp->lock);
195 	idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
196 	spin_unlock_irq(&qpd->devp->lock);
197 
198 	qpd->buf[qpd->pos++] = 0;
199 	file->private_data = qpd;
200 	goto out;
201 err1:
202 	kfree(qpd);
203 out:
204 	return ret;
205 }
206 
207 static const struct file_operations qp_debugfs_fops = {
208 	.owner   = THIS_MODULE,
209 	.open    = qp_open,
210 	.release = qp_release,
211 	.read    = debugfs_read,
212 	.llseek  = default_llseek,
213 };
214 
215 static int dump_stag(int id, void *p, void *data)
216 {
217 	struct c4iw_debugfs_data *stagd = data;
218 	int space;
219 	int cc;
220 
221 	space = stagd->bufsize - stagd->pos - 1;
222 	if (space == 0)
223 		return 1;
224 
225 	cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
226 	if (cc < space)
227 		stagd->pos += cc;
228 	return 0;
229 }
230 
231 static int stag_release(struct inode *inode, struct file *file)
232 {
233 	struct c4iw_debugfs_data *stagd = file->private_data;
234 	if (!stagd) {
235 		printk(KERN_INFO "%s null stagd?\n", __func__);
236 		return 0;
237 	}
238 	kfree(stagd->buf);
239 	kfree(stagd);
240 	return 0;
241 }
242 
243 static int stag_open(struct inode *inode, struct file *file)
244 {
245 	struct c4iw_debugfs_data *stagd;
246 	int ret = 0;
247 	int count = 1;
248 
249 	stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
250 	if (!stagd) {
251 		ret = -ENOMEM;
252 		goto out;
253 	}
254 	stagd->devp = inode->i_private;
255 	stagd->pos = 0;
256 
257 	spin_lock_irq(&stagd->devp->lock);
258 	idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
259 	spin_unlock_irq(&stagd->devp->lock);
260 
261 	stagd->bufsize = count * sizeof("0x12345678\n");
262 	stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
263 	if (!stagd->buf) {
264 		ret = -ENOMEM;
265 		goto err1;
266 	}
267 
268 	spin_lock_irq(&stagd->devp->lock);
269 	idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
270 	spin_unlock_irq(&stagd->devp->lock);
271 
272 	stagd->buf[stagd->pos++] = 0;
273 	file->private_data = stagd;
274 	goto out;
275 err1:
276 	kfree(stagd);
277 out:
278 	return ret;
279 }
280 
281 static const struct file_operations stag_debugfs_fops = {
282 	.owner   = THIS_MODULE,
283 	.open    = stag_open,
284 	.release = stag_release,
285 	.read    = debugfs_read,
286 	.llseek  = default_llseek,
287 };
288 
289 static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
290 
291 static int stats_show(struct seq_file *seq, void *v)
292 {
293 	struct c4iw_dev *dev = seq->private;
294 
295 	seq_printf(seq, "   Object: %10s %10s %10s %10s\n", "Total", "Current",
296 		   "Max", "Fail");
297 	seq_printf(seq, "     PDID: %10llu %10llu %10llu %10llu\n",
298 			dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
299 			dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
300 	seq_printf(seq, "      QID: %10llu %10llu %10llu %10llu\n",
301 			dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
302 			dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
303 	seq_printf(seq, "   TPTMEM: %10llu %10llu %10llu %10llu\n",
304 			dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
305 			dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
306 	seq_printf(seq, "   PBLMEM: %10llu %10llu %10llu %10llu\n",
307 			dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
308 			dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
309 	seq_printf(seq, "   RQTMEM: %10llu %10llu %10llu %10llu\n",
310 			dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
311 			dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
312 	seq_printf(seq, "  OCQPMEM: %10llu %10llu %10llu %10llu\n",
313 			dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
314 			dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
315 	seq_printf(seq, "  DB FULL: %10llu\n", dev->rdev.stats.db_full);
316 	seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
317 	seq_printf(seq, "  DB DROP: %10llu\n", dev->rdev.stats.db_drop);
318 	seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
319 		   db_state_str[dev->db_state],
320 		   dev->rdev.stats.db_state_transitions,
321 		   dev->rdev.stats.db_fc_interruptions);
322 	seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
323 	seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
324 		   dev->rdev.stats.act_ofld_conn_fails);
325 	seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
326 		   dev->rdev.stats.pas_ofld_conn_fails);
327 	return 0;
328 }
329 
330 static int stats_open(struct inode *inode, struct file *file)
331 {
332 	return single_open(file, stats_show, inode->i_private);
333 }
334 
335 static ssize_t stats_clear(struct file *file, const char __user *buf,
336 		size_t count, loff_t *pos)
337 {
338 	struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
339 
340 	mutex_lock(&dev->rdev.stats.lock);
341 	dev->rdev.stats.pd.max = 0;
342 	dev->rdev.stats.pd.fail = 0;
343 	dev->rdev.stats.qid.max = 0;
344 	dev->rdev.stats.qid.fail = 0;
345 	dev->rdev.stats.stag.max = 0;
346 	dev->rdev.stats.stag.fail = 0;
347 	dev->rdev.stats.pbl.max = 0;
348 	dev->rdev.stats.pbl.fail = 0;
349 	dev->rdev.stats.rqt.max = 0;
350 	dev->rdev.stats.rqt.fail = 0;
351 	dev->rdev.stats.ocqp.max = 0;
352 	dev->rdev.stats.ocqp.fail = 0;
353 	dev->rdev.stats.db_full = 0;
354 	dev->rdev.stats.db_empty = 0;
355 	dev->rdev.stats.db_drop = 0;
356 	dev->rdev.stats.db_state_transitions = 0;
357 	dev->rdev.stats.tcam_full = 0;
358 	dev->rdev.stats.act_ofld_conn_fails = 0;
359 	dev->rdev.stats.pas_ofld_conn_fails = 0;
360 	mutex_unlock(&dev->rdev.stats.lock);
361 	return count;
362 }
363 
364 static const struct file_operations stats_debugfs_fops = {
365 	.owner   = THIS_MODULE,
366 	.open    = stats_open,
367 	.release = single_release,
368 	.read    = seq_read,
369 	.llseek  = seq_lseek,
370 	.write   = stats_clear,
371 };
372 
373 static int dump_ep(int id, void *p, void *data)
374 {
375 	struct c4iw_ep *ep = p;
376 	struct c4iw_debugfs_data *epd = data;
377 	int space;
378 	int cc;
379 
380 	space = epd->bufsize - epd->pos - 1;
381 	if (space == 0)
382 		return 1;
383 
384 	if (ep->com.local_addr.ss_family == AF_INET) {
385 		struct sockaddr_in *lsin = (struct sockaddr_in *)
386 			&ep->com.local_addr;
387 		struct sockaddr_in *rsin = (struct sockaddr_in *)
388 			&ep->com.remote_addr;
389 
390 		cc = snprintf(epd->buf + epd->pos, space,
391 			      "ep %p cm_id %p qp %p state %d flags 0x%lx "
392 			      "history 0x%lx hwtid %d atid %d "
393 			      "%pI4:%d <-> %pI4:%d\n",
394 			      ep, ep->com.cm_id, ep->com.qp,
395 			      (int)ep->com.state, ep->com.flags,
396 			      ep->com.history, ep->hwtid, ep->atid,
397 			      &lsin->sin_addr, ntohs(lsin->sin_port),
398 			      &rsin->sin_addr, ntohs(rsin->sin_port));
399 	} else {
400 		struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
401 			&ep->com.local_addr;
402 		struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
403 			&ep->com.remote_addr;
404 
405 		cc = snprintf(epd->buf + epd->pos, space,
406 			      "ep %p cm_id %p qp %p state %d flags 0x%lx "
407 			      "history 0x%lx hwtid %d atid %d "
408 			      "%pI6:%d <-> %pI6:%d\n",
409 			      ep, ep->com.cm_id, ep->com.qp,
410 			      (int)ep->com.state, ep->com.flags,
411 			      ep->com.history, ep->hwtid, ep->atid,
412 			      &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
413 			      &rsin6->sin6_addr, ntohs(rsin6->sin6_port));
414 	}
415 	if (cc < space)
416 		epd->pos += cc;
417 	return 0;
418 }
419 
420 static int dump_listen_ep(int id, void *p, void *data)
421 {
422 	struct c4iw_listen_ep *ep = p;
423 	struct c4iw_debugfs_data *epd = data;
424 	int space;
425 	int cc;
426 
427 	space = epd->bufsize - epd->pos - 1;
428 	if (space == 0)
429 		return 1;
430 
431 	if (ep->com.local_addr.ss_family == AF_INET) {
432 		struct sockaddr_in *lsin = (struct sockaddr_in *)
433 			&ep->com.local_addr;
434 
435 		cc = snprintf(epd->buf + epd->pos, space,
436 			      "ep %p cm_id %p state %d flags 0x%lx stid %d "
437 			      "backlog %d %pI4:%d\n",
438 			      ep, ep->com.cm_id, (int)ep->com.state,
439 			      ep->com.flags, ep->stid, ep->backlog,
440 			      &lsin->sin_addr, ntohs(lsin->sin_port));
441 	} else {
442 		struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
443 			&ep->com.local_addr;
444 
445 		cc = snprintf(epd->buf + epd->pos, space,
446 			      "ep %p cm_id %p state %d flags 0x%lx stid %d "
447 			      "backlog %d %pI6:%d\n",
448 			      ep, ep->com.cm_id, (int)ep->com.state,
449 			      ep->com.flags, ep->stid, ep->backlog,
450 			      &lsin6->sin6_addr, ntohs(lsin6->sin6_port));
451 	}
452 	if (cc < space)
453 		epd->pos += cc;
454 	return 0;
455 }
456 
457 static int ep_release(struct inode *inode, struct file *file)
458 {
459 	struct c4iw_debugfs_data *epd = file->private_data;
460 	if (!epd) {
461 		pr_info("%s null qpd?\n", __func__);
462 		return 0;
463 	}
464 	vfree(epd->buf);
465 	kfree(epd);
466 	return 0;
467 }
468 
469 static int ep_open(struct inode *inode, struct file *file)
470 {
471 	struct c4iw_debugfs_data *epd;
472 	int ret = 0;
473 	int count = 1;
474 
475 	epd = kmalloc(sizeof(*epd), GFP_KERNEL);
476 	if (!epd) {
477 		ret = -ENOMEM;
478 		goto out;
479 	}
480 	epd->devp = inode->i_private;
481 	epd->pos = 0;
482 
483 	spin_lock_irq(&epd->devp->lock);
484 	idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
485 	idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
486 	idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
487 	spin_unlock_irq(&epd->devp->lock);
488 
489 	epd->bufsize = count * 160;
490 	epd->buf = vmalloc(epd->bufsize);
491 	if (!epd->buf) {
492 		ret = -ENOMEM;
493 		goto err1;
494 	}
495 
496 	spin_lock_irq(&epd->devp->lock);
497 	idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
498 	idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
499 	idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
500 	spin_unlock_irq(&epd->devp->lock);
501 
502 	file->private_data = epd;
503 	goto out;
504 err1:
505 	kfree(epd);
506 out:
507 	return ret;
508 }
509 
510 static const struct file_operations ep_debugfs_fops = {
511 	.owner   = THIS_MODULE,
512 	.open    = ep_open,
513 	.release = ep_release,
514 	.read    = debugfs_read,
515 };
516 
517 static int setup_debugfs(struct c4iw_dev *devp)
518 {
519 	struct dentry *de;
520 
521 	if (!devp->debugfs_root)
522 		return -1;
523 
524 	de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
525 				 (void *)devp, &qp_debugfs_fops);
526 	if (de && de->d_inode)
527 		de->d_inode->i_size = 4096;
528 
529 	de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
530 				 (void *)devp, &stag_debugfs_fops);
531 	if (de && de->d_inode)
532 		de->d_inode->i_size = 4096;
533 
534 	de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root,
535 			(void *)devp, &stats_debugfs_fops);
536 	if (de && de->d_inode)
537 		de->d_inode->i_size = 4096;
538 
539 	de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
540 			(void *)devp, &ep_debugfs_fops);
541 	if (de && de->d_inode)
542 		de->d_inode->i_size = 4096;
543 
544 	return 0;
545 }
546 
547 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
548 			       struct c4iw_dev_ucontext *uctx)
549 {
550 	struct list_head *pos, *nxt;
551 	struct c4iw_qid_list *entry;
552 
553 	mutex_lock(&uctx->lock);
554 	list_for_each_safe(pos, nxt, &uctx->qpids) {
555 		entry = list_entry(pos, struct c4iw_qid_list, entry);
556 		list_del_init(&entry->entry);
557 		if (!(entry->qid & rdev->qpmask)) {
558 			c4iw_put_resource(&rdev->resource.qid_table,
559 					  entry->qid);
560 			mutex_lock(&rdev->stats.lock);
561 			rdev->stats.qid.cur -= rdev->qpmask + 1;
562 			mutex_unlock(&rdev->stats.lock);
563 		}
564 		kfree(entry);
565 	}
566 
567 	list_for_each_safe(pos, nxt, &uctx->qpids) {
568 		entry = list_entry(pos, struct c4iw_qid_list, entry);
569 		list_del_init(&entry->entry);
570 		kfree(entry);
571 	}
572 	mutex_unlock(&uctx->lock);
573 }
574 
575 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
576 			    struct c4iw_dev_ucontext *uctx)
577 {
578 	INIT_LIST_HEAD(&uctx->qpids);
579 	INIT_LIST_HEAD(&uctx->cqids);
580 	mutex_init(&uctx->lock);
581 }
582 
583 /* Caller takes care of locking if needed */
584 static int c4iw_rdev_open(struct c4iw_rdev *rdev)
585 {
586 	int err;
587 
588 	c4iw_init_dev_ucontext(rdev, &rdev->uctx);
589 
590 	/*
591 	 * qpshift is the number of bits to shift the qpid left in order
592 	 * to get the correct address of the doorbell for that qp.
593 	 */
594 	rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
595 	rdev->qpmask = rdev->lldi.udb_density - 1;
596 	rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
597 	rdev->cqmask = rdev->lldi.ucq_density - 1;
598 	PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
599 	     "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
600 	     "qp qid start %u size %u cq qid start %u size %u\n",
601 	     __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
602 	     rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
603 	     rdev->lldi.vr->pbl.start,
604 	     rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
605 	     rdev->lldi.vr->rq.size,
606 	     rdev->lldi.vr->qp.start,
607 	     rdev->lldi.vr->qp.size,
608 	     rdev->lldi.vr->cq.start,
609 	     rdev->lldi.vr->cq.size);
610 	PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu "
611 	     "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
612 	     (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
613 	     (u64)pci_resource_start(rdev->lldi.pdev, 2),
614 	     rdev->lldi.db_reg,
615 	     rdev->lldi.gts_reg,
616 	     rdev->qpshift, rdev->qpmask,
617 	     rdev->cqshift, rdev->cqmask);
618 
619 	if (c4iw_num_stags(rdev) == 0) {
620 		err = -EINVAL;
621 		goto err1;
622 	}
623 
624 	rdev->stats.pd.total = T4_MAX_NUM_PD;
625 	rdev->stats.stag.total = rdev->lldi.vr->stag.size;
626 	rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
627 	rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
628 	rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
629 	rdev->stats.qid.total = rdev->lldi.vr->qp.size;
630 
631 	err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
632 	if (err) {
633 		printk(KERN_ERR MOD "error %d initializing resources\n", err);
634 		goto err1;
635 	}
636 	err = c4iw_pblpool_create(rdev);
637 	if (err) {
638 		printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
639 		goto err2;
640 	}
641 	err = c4iw_rqtpool_create(rdev);
642 	if (err) {
643 		printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
644 		goto err3;
645 	}
646 	err = c4iw_ocqp_pool_create(rdev);
647 	if (err) {
648 		printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
649 		goto err4;
650 	}
651 	rdev->status_page = (struct t4_dev_status_page *)
652 			    __get_free_page(GFP_KERNEL);
653 	if (!rdev->status_page) {
654 		pr_err(MOD "error allocating status page\n");
655 		goto err4;
656 	}
657 	return 0;
658 err4:
659 	c4iw_rqtpool_destroy(rdev);
660 err3:
661 	c4iw_pblpool_destroy(rdev);
662 err2:
663 	c4iw_destroy_resource(&rdev->resource);
664 err1:
665 	return err;
666 }
667 
668 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
669 {
670 	free_page((unsigned long)rdev->status_page);
671 	c4iw_pblpool_destroy(rdev);
672 	c4iw_rqtpool_destroy(rdev);
673 	c4iw_destroy_resource(&rdev->resource);
674 }
675 
676 static void c4iw_dealloc(struct uld_ctx *ctx)
677 {
678 	c4iw_rdev_close(&ctx->dev->rdev);
679 	idr_destroy(&ctx->dev->cqidr);
680 	idr_destroy(&ctx->dev->qpidr);
681 	idr_destroy(&ctx->dev->mmidr);
682 	idr_destroy(&ctx->dev->hwtid_idr);
683 	idr_destroy(&ctx->dev->stid_idr);
684 	idr_destroy(&ctx->dev->atid_idr);
685 	iounmap(ctx->dev->rdev.oc_mw_kva);
686 	ib_dealloc_device(&ctx->dev->ibdev);
687 	ctx->dev = NULL;
688 }
689 
690 static void c4iw_remove(struct uld_ctx *ctx)
691 {
692 	PDBG("%s c4iw_dev %p\n", __func__,  ctx->dev);
693 	c4iw_unregister_device(ctx->dev);
694 	c4iw_dealloc(ctx);
695 }
696 
697 static int rdma_supported(const struct cxgb4_lld_info *infop)
698 {
699 	return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
700 	       infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
701 	       infop->vr->cq.size > 0;
702 }
703 
704 static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
705 {
706 	struct c4iw_dev *devp;
707 	int ret;
708 
709 	if (!rdma_supported(infop)) {
710 		printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
711 		       pci_name(infop->pdev));
712 		return ERR_PTR(-ENOSYS);
713 	}
714 	if (!ocqp_supported(infop))
715 		pr_info("%s: On-Chip Queues not supported on this device.\n",
716 			pci_name(infop->pdev));
717 
718 	devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
719 	if (!devp) {
720 		printk(KERN_ERR MOD "Cannot allocate ib device\n");
721 		return ERR_PTR(-ENOMEM);
722 	}
723 	devp->rdev.lldi = *infop;
724 
725 	devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) +
726 		(pci_resource_len(devp->rdev.lldi.pdev, 2) -
727 		 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size));
728 	devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
729 					       devp->rdev.lldi.vr->ocq.size);
730 
731 	PDBG(KERN_INFO MOD "ocq memory: "
732 	       "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
733 	       devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
734 	       devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
735 
736 	ret = c4iw_rdev_open(&devp->rdev);
737 	if (ret) {
738 		printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
739 		ib_dealloc_device(&devp->ibdev);
740 		return ERR_PTR(ret);
741 	}
742 
743 	idr_init(&devp->cqidr);
744 	idr_init(&devp->qpidr);
745 	idr_init(&devp->mmidr);
746 	idr_init(&devp->hwtid_idr);
747 	idr_init(&devp->stid_idr);
748 	idr_init(&devp->atid_idr);
749 	spin_lock_init(&devp->lock);
750 	mutex_init(&devp->rdev.stats.lock);
751 	mutex_init(&devp->db_mutex);
752 	INIT_LIST_HEAD(&devp->db_fc_list);
753 
754 	if (c4iw_debugfs_root) {
755 		devp->debugfs_root = debugfs_create_dir(
756 					pci_name(devp->rdev.lldi.pdev),
757 					c4iw_debugfs_root);
758 		setup_debugfs(devp);
759 	}
760 	return devp;
761 }
762 
763 static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
764 {
765 	struct uld_ctx *ctx;
766 	static int vers_printed;
767 	int i;
768 
769 	if (!vers_printed++)
770 		pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
771 			DRV_VERSION);
772 
773 	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
774 	if (!ctx) {
775 		ctx = ERR_PTR(-ENOMEM);
776 		goto out;
777 	}
778 	ctx->lldi = *infop;
779 
780 	PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
781 	     __func__, pci_name(ctx->lldi.pdev),
782 	     ctx->lldi.nchan, ctx->lldi.nrxq,
783 	     ctx->lldi.ntxq, ctx->lldi.nports);
784 
785 	mutex_lock(&dev_mutex);
786 	list_add_tail(&ctx->entry, &uld_ctx_list);
787 	mutex_unlock(&dev_mutex);
788 
789 	for (i = 0; i < ctx->lldi.nrxq; i++)
790 		PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
791 out:
792 	return ctx;
793 }
794 
795 static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
796 						 const __be64 *rsp,
797 						 u32 pktshift)
798 {
799 	struct sk_buff *skb;
800 
801 	/*
802 	 * Allocate space for cpl_pass_accept_req which will be synthesized by
803 	 * driver. Once the driver synthesizes the request the skb will go
804 	 * through the regular cpl_pass_accept_req processing.
805 	 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
806 	 * cpl_rx_pkt.
807 	 */
808 	skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
809 			sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
810 	if (unlikely(!skb))
811 		return NULL;
812 
813 	 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
814 		   sizeof(struct rss_header) - pktshift);
815 
816 	/*
817 	 * This skb will contain:
818 	 *   rss_header from the rspq descriptor (1 flit)
819 	 *   cpl_rx_pkt struct from the rspq descriptor (2 flits)
820 	 *   space for the difference between the size of an
821 	 *      rx_pkt and pass_accept_req cpl (1 flit)
822 	 *   the packet data from the gl
823 	 */
824 	skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
825 				sizeof(struct rss_header));
826 	skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
827 				       sizeof(struct cpl_pass_accept_req),
828 				       gl->va + pktshift,
829 				       gl->tot_len - pktshift);
830 	return skb;
831 }
832 
833 static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
834 			   const __be64 *rsp)
835 {
836 	unsigned int opcode = *(u8 *)rsp;
837 	struct sk_buff *skb;
838 
839 	if (opcode != CPL_RX_PKT)
840 		goto out;
841 
842 	skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
843 	if (skb == NULL)
844 		goto out;
845 
846 	if (c4iw_handlers[opcode] == NULL) {
847 		pr_info("%s no handler opcode 0x%x...\n", __func__,
848 		       opcode);
849 		kfree_skb(skb);
850 		goto out;
851 	}
852 	c4iw_handlers[opcode](dev, skb);
853 	return 1;
854 out:
855 	return 0;
856 }
857 
858 static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
859 			const struct pkt_gl *gl)
860 {
861 	struct uld_ctx *ctx = handle;
862 	struct c4iw_dev *dev = ctx->dev;
863 	struct sk_buff *skb;
864 	u8 opcode;
865 
866 	if (gl == NULL) {
867 		/* omit RSS and rsp_ctrl at end of descriptor */
868 		unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
869 
870 		skb = alloc_skb(256, GFP_ATOMIC);
871 		if (!skb)
872 			goto nomem;
873 		__skb_put(skb, len);
874 		skb_copy_to_linear_data(skb, &rsp[1], len);
875 	} else if (gl == CXGB4_MSG_AN) {
876 		const struct rsp_ctrl *rc = (void *)rsp;
877 
878 		u32 qid = be32_to_cpu(rc->pldbuflen_qid);
879 		c4iw_ev_handler(dev, qid);
880 		return 0;
881 	} else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
882 		if (recv_rx_pkt(dev, gl, rsp))
883 			return 0;
884 
885 		pr_info("%s: unexpected FL contents at %p, " \
886 		       "RSS %#llx, FL %#llx, len %u\n",
887 		       pci_name(ctx->lldi.pdev), gl->va,
888 		       (unsigned long long)be64_to_cpu(*rsp),
889 		       (unsigned long long)be64_to_cpu(
890 		       *(__force __be64 *)gl->va),
891 		       gl->tot_len);
892 
893 		return 0;
894 	} else {
895 		skb = cxgb4_pktgl_to_skb(gl, 128, 128);
896 		if (unlikely(!skb))
897 			goto nomem;
898 	}
899 
900 	opcode = *(u8 *)rsp;
901 	if (c4iw_handlers[opcode]) {
902 		c4iw_handlers[opcode](dev, skb);
903 	} else {
904 		pr_info("%s no handler opcode 0x%x...\n", __func__,
905 		       opcode);
906 		kfree_skb(skb);
907 	}
908 
909 	return 0;
910 nomem:
911 	return -1;
912 }
913 
914 static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
915 {
916 	struct uld_ctx *ctx = handle;
917 
918 	PDBG("%s new_state %u\n", __func__, new_state);
919 	switch (new_state) {
920 	case CXGB4_STATE_UP:
921 		printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
922 		if (!ctx->dev) {
923 			int ret;
924 
925 			ctx->dev = c4iw_alloc(&ctx->lldi);
926 			if (IS_ERR(ctx->dev)) {
927 				printk(KERN_ERR MOD
928 				       "%s: initialization failed: %ld\n",
929 				       pci_name(ctx->lldi.pdev),
930 				       PTR_ERR(ctx->dev));
931 				ctx->dev = NULL;
932 				break;
933 			}
934 			ret = c4iw_register_device(ctx->dev);
935 			if (ret) {
936 				printk(KERN_ERR MOD
937 				       "%s: RDMA registration failed: %d\n",
938 				       pci_name(ctx->lldi.pdev), ret);
939 				c4iw_dealloc(ctx);
940 			}
941 		}
942 		break;
943 	case CXGB4_STATE_DOWN:
944 		printk(KERN_INFO MOD "%s: Down\n",
945 		       pci_name(ctx->lldi.pdev));
946 		if (ctx->dev)
947 			c4iw_remove(ctx);
948 		break;
949 	case CXGB4_STATE_START_RECOVERY:
950 		printk(KERN_INFO MOD "%s: Fatal Error\n",
951 		       pci_name(ctx->lldi.pdev));
952 		if (ctx->dev) {
953 			struct ib_event event;
954 
955 			ctx->dev->rdev.flags |= T4_FATAL_ERROR;
956 			memset(&event, 0, sizeof event);
957 			event.event  = IB_EVENT_DEVICE_FATAL;
958 			event.device = &ctx->dev->ibdev;
959 			ib_dispatch_event(&event);
960 			c4iw_remove(ctx);
961 		}
962 		break;
963 	case CXGB4_STATE_DETACH:
964 		printk(KERN_INFO MOD "%s: Detach\n",
965 		       pci_name(ctx->lldi.pdev));
966 		if (ctx->dev)
967 			c4iw_remove(ctx);
968 		break;
969 	}
970 	return 0;
971 }
972 
973 static int disable_qp_db(int id, void *p, void *data)
974 {
975 	struct c4iw_qp *qp = p;
976 
977 	t4_disable_wq_db(&qp->wq);
978 	return 0;
979 }
980 
981 static void stop_queues(struct uld_ctx *ctx)
982 {
983 	unsigned long flags;
984 
985 	spin_lock_irqsave(&ctx->dev->lock, flags);
986 	ctx->dev->rdev.stats.db_state_transitions++;
987 	ctx->dev->db_state = STOPPED;
988 	if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
989 		idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
990 	else
991 		ctx->dev->rdev.status_page->db_off = 1;
992 	spin_unlock_irqrestore(&ctx->dev->lock, flags);
993 }
994 
995 static int enable_qp_db(int id, void *p, void *data)
996 {
997 	struct c4iw_qp *qp = p;
998 
999 	t4_enable_wq_db(&qp->wq);
1000 	return 0;
1001 }
1002 
1003 static void resume_rc_qp(struct c4iw_qp *qp)
1004 {
1005 	spin_lock(&qp->lock);
1006 	t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc);
1007 	qp->wq.sq.wq_pidx_inc = 0;
1008 	t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc);
1009 	qp->wq.rq.wq_pidx_inc = 0;
1010 	spin_unlock(&qp->lock);
1011 }
1012 
1013 static void resume_a_chunk(struct uld_ctx *ctx)
1014 {
1015 	int i;
1016 	struct c4iw_qp *qp;
1017 
1018 	for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
1019 		qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
1020 				      db_fc_entry);
1021 		list_del_init(&qp->db_fc_entry);
1022 		resume_rc_qp(qp);
1023 		if (list_empty(&ctx->dev->db_fc_list))
1024 			break;
1025 	}
1026 }
1027 
1028 static void resume_queues(struct uld_ctx *ctx)
1029 {
1030 	spin_lock_irq(&ctx->dev->lock);
1031 	if (ctx->dev->db_state != STOPPED)
1032 		goto out;
1033 	ctx->dev->db_state = FLOW_CONTROL;
1034 	while (1) {
1035 		if (list_empty(&ctx->dev->db_fc_list)) {
1036 			WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
1037 			ctx->dev->db_state = NORMAL;
1038 			ctx->dev->rdev.stats.db_state_transitions++;
1039 			if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1040 				idr_for_each(&ctx->dev->qpidr, enable_qp_db,
1041 					     NULL);
1042 			} else {
1043 				ctx->dev->rdev.status_page->db_off = 0;
1044 			}
1045 			break;
1046 		} else {
1047 			if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
1048 			    < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
1049 			       DB_FC_DRAIN_THRESH)) {
1050 				resume_a_chunk(ctx);
1051 			}
1052 			if (!list_empty(&ctx->dev->db_fc_list)) {
1053 				spin_unlock_irq(&ctx->dev->lock);
1054 				if (DB_FC_RESUME_DELAY) {
1055 					set_current_state(TASK_UNINTERRUPTIBLE);
1056 					schedule_timeout(DB_FC_RESUME_DELAY);
1057 				}
1058 				spin_lock_irq(&ctx->dev->lock);
1059 				if (ctx->dev->db_state != FLOW_CONTROL)
1060 					break;
1061 			}
1062 		}
1063 	}
1064 out:
1065 	if (ctx->dev->db_state != NORMAL)
1066 		ctx->dev->rdev.stats.db_fc_interruptions++;
1067 	spin_unlock_irq(&ctx->dev->lock);
1068 }
1069 
1070 struct qp_list {
1071 	unsigned idx;
1072 	struct c4iw_qp **qps;
1073 };
1074 
1075 static int add_and_ref_qp(int id, void *p, void *data)
1076 {
1077 	struct qp_list *qp_listp = data;
1078 	struct c4iw_qp *qp = p;
1079 
1080 	c4iw_qp_add_ref(&qp->ibqp);
1081 	qp_listp->qps[qp_listp->idx++] = qp;
1082 	return 0;
1083 }
1084 
1085 static int count_qps(int id, void *p, void *data)
1086 {
1087 	unsigned *countp = data;
1088 	(*countp)++;
1089 	return 0;
1090 }
1091 
1092 static void deref_qps(struct qp_list *qp_list)
1093 {
1094 	int idx;
1095 
1096 	for (idx = 0; idx < qp_list->idx; idx++)
1097 		c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
1098 }
1099 
1100 static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1101 {
1102 	int idx;
1103 	int ret;
1104 
1105 	for (idx = 0; idx < qp_list->idx; idx++) {
1106 		struct c4iw_qp *qp = qp_list->qps[idx];
1107 
1108 		spin_lock_irq(&qp->rhp->lock);
1109 		spin_lock(&qp->lock);
1110 		ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1111 					  qp->wq.sq.qid,
1112 					  t4_sq_host_wq_pidx(&qp->wq),
1113 					  t4_sq_wq_size(&qp->wq));
1114 		if (ret) {
1115 			pr_err(KERN_ERR MOD "%s: Fatal error - "
1116 			       "DB overflow recovery failed - "
1117 			       "error syncing SQ qid %u\n",
1118 			       pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
1119 			spin_unlock(&qp->lock);
1120 			spin_unlock_irq(&qp->rhp->lock);
1121 			return;
1122 		}
1123 		qp->wq.sq.wq_pidx_inc = 0;
1124 
1125 		ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1126 					  qp->wq.rq.qid,
1127 					  t4_rq_host_wq_pidx(&qp->wq),
1128 					  t4_rq_wq_size(&qp->wq));
1129 
1130 		if (ret) {
1131 			pr_err(KERN_ERR MOD "%s: Fatal error - "
1132 			       "DB overflow recovery failed - "
1133 			       "error syncing RQ qid %u\n",
1134 			       pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
1135 			spin_unlock(&qp->lock);
1136 			spin_unlock_irq(&qp->rhp->lock);
1137 			return;
1138 		}
1139 		qp->wq.rq.wq_pidx_inc = 0;
1140 		spin_unlock(&qp->lock);
1141 		spin_unlock_irq(&qp->rhp->lock);
1142 
1143 		/* Wait for the dbfifo to drain */
1144 		while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1145 			set_current_state(TASK_UNINTERRUPTIBLE);
1146 			schedule_timeout(usecs_to_jiffies(10));
1147 		}
1148 	}
1149 }
1150 
1151 static void recover_queues(struct uld_ctx *ctx)
1152 {
1153 	int count = 0;
1154 	struct qp_list qp_list;
1155 	int ret;
1156 
1157 	/* slow everybody down */
1158 	set_current_state(TASK_UNINTERRUPTIBLE);
1159 	schedule_timeout(usecs_to_jiffies(1000));
1160 
1161 	/* flush the SGE contexts */
1162 	ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1163 	if (ret) {
1164 		printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1165 		       pci_name(ctx->lldi.pdev));
1166 		return;
1167 	}
1168 
1169 	/* Count active queues so we can build a list of queues to recover */
1170 	spin_lock_irq(&ctx->dev->lock);
1171 	WARN_ON(ctx->dev->db_state != STOPPED);
1172 	ctx->dev->db_state = RECOVERY;
1173 	idr_for_each(&ctx->dev->qpidr, count_qps, &count);
1174 
1175 	qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
1176 	if (!qp_list.qps) {
1177 		printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1178 		       pci_name(ctx->lldi.pdev));
1179 		spin_unlock_irq(&ctx->dev->lock);
1180 		return;
1181 	}
1182 	qp_list.idx = 0;
1183 
1184 	/* add and ref each qp so it doesn't get freed */
1185 	idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
1186 
1187 	spin_unlock_irq(&ctx->dev->lock);
1188 
1189 	/* now traverse the list in a safe context to recover the db state*/
1190 	recover_lost_dbs(ctx, &qp_list);
1191 
1192 	/* we're almost done!  deref the qps and clean up */
1193 	deref_qps(&qp_list);
1194 	kfree(qp_list.qps);
1195 
1196 	spin_lock_irq(&ctx->dev->lock);
1197 	WARN_ON(ctx->dev->db_state != RECOVERY);
1198 	ctx->dev->db_state = STOPPED;
1199 	spin_unlock_irq(&ctx->dev->lock);
1200 }
1201 
1202 static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
1203 {
1204 	struct uld_ctx *ctx = handle;
1205 
1206 	switch (control) {
1207 	case CXGB4_CONTROL_DB_FULL:
1208 		stop_queues(ctx);
1209 		ctx->dev->rdev.stats.db_full++;
1210 		break;
1211 	case CXGB4_CONTROL_DB_EMPTY:
1212 		resume_queues(ctx);
1213 		mutex_lock(&ctx->dev->rdev.stats.lock);
1214 		ctx->dev->rdev.stats.db_empty++;
1215 		mutex_unlock(&ctx->dev->rdev.stats.lock);
1216 		break;
1217 	case CXGB4_CONTROL_DB_DROP:
1218 		recover_queues(ctx);
1219 		mutex_lock(&ctx->dev->rdev.stats.lock);
1220 		ctx->dev->rdev.stats.db_drop++;
1221 		mutex_unlock(&ctx->dev->rdev.stats.lock);
1222 		break;
1223 	default:
1224 		printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
1225 		       pci_name(ctx->lldi.pdev), control);
1226 		break;
1227 	}
1228 	return 0;
1229 }
1230 
1231 static struct cxgb4_uld_info c4iw_uld_info = {
1232 	.name = DRV_NAME,
1233 	.add = c4iw_uld_add,
1234 	.rx_handler = c4iw_uld_rx_handler,
1235 	.state_change = c4iw_uld_state_change,
1236 	.control = c4iw_uld_control,
1237 };
1238 
1239 static int __init c4iw_init_module(void)
1240 {
1241 	int err;
1242 
1243 	err = c4iw_cm_init();
1244 	if (err)
1245 		return err;
1246 
1247 	c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1248 	if (!c4iw_debugfs_root)
1249 		printk(KERN_WARNING MOD
1250 		       "could not create debugfs entry, continuing\n");
1251 
1252 	cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1253 
1254 	return 0;
1255 }
1256 
1257 static void __exit c4iw_exit_module(void)
1258 {
1259 	struct uld_ctx *ctx, *tmp;
1260 
1261 	mutex_lock(&dev_mutex);
1262 	list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
1263 		if (ctx->dev)
1264 			c4iw_remove(ctx);
1265 		kfree(ctx);
1266 	}
1267 	mutex_unlock(&dev_mutex);
1268 	cxgb4_unregister_uld(CXGB4_ULD_RDMA);
1269 	c4iw_cm_term();
1270 	debugfs_remove_recursive(c4iw_debugfs_root);
1271 }
1272 
1273 module_init(c4iw_init_module);
1274 module_exit(c4iw_exit_module);
1275