xref: /openbmc/linux/drivers/infiniband/hw/qib/qib_qp.c (revision af061a64)
1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3  * All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <linux/jhash.h>
38 
39 #include "qib.h"
40 
41 #define BITS_PER_PAGE           (PAGE_SIZE*BITS_PER_BYTE)
42 #define BITS_PER_PAGE_MASK      (BITS_PER_PAGE-1)
43 
44 static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
45 			      struct qpn_map *map, unsigned off)
46 {
47 	return (map - qpt->map) * BITS_PER_PAGE + off;
48 }
49 
50 static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
51 					struct qpn_map *map, unsigned off,
52 					unsigned n)
53 {
54 	if (qpt->mask) {
55 		off++;
56 		if (((off & qpt->mask) >> 1) >= n)
57 			off = (off | qpt->mask) + 2;
58 	} else
59 		off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
60 	return off;
61 }
62 
63 /*
64  * Convert the AETH credit code into the number of credits.
65  */
66 static u32 credit_table[31] = {
67 	0,                      /* 0 */
68 	1,                      /* 1 */
69 	2,                      /* 2 */
70 	3,                      /* 3 */
71 	4,                      /* 4 */
72 	6,                      /* 5 */
73 	8,                      /* 6 */
74 	12,                     /* 7 */
75 	16,                     /* 8 */
76 	24,                     /* 9 */
77 	32,                     /* A */
78 	48,                     /* B */
79 	64,                     /* C */
80 	96,                     /* D */
81 	128,                    /* E */
82 	192,                    /* F */
83 	256,                    /* 10 */
84 	384,                    /* 11 */
85 	512,                    /* 12 */
86 	768,                    /* 13 */
87 	1024,                   /* 14 */
88 	1536,                   /* 15 */
89 	2048,                   /* 16 */
90 	3072,                   /* 17 */
91 	4096,                   /* 18 */
92 	6144,                   /* 19 */
93 	8192,                   /* 1A */
94 	12288,                  /* 1B */
95 	16384,                  /* 1C */
96 	24576,                  /* 1D */
97 	32768                   /* 1E */
98 };
99 
100 static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
101 {
102 	unsigned long page = get_zeroed_page(GFP_KERNEL);
103 
104 	/*
105 	 * Free the page if someone raced with us installing it.
106 	 */
107 
108 	spin_lock(&qpt->lock);
109 	if (map->page)
110 		free_page(page);
111 	else
112 		map->page = (void *)page;
113 	spin_unlock(&qpt->lock);
114 }
115 
116 /*
117  * Allocate the next available QPN or
118  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
119  */
120 static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
121 		     enum ib_qp_type type, u8 port)
122 {
123 	u32 i, offset, max_scan, qpn;
124 	struct qpn_map *map;
125 	u32 ret;
126 
127 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
128 		unsigned n;
129 
130 		ret = type == IB_QPT_GSI;
131 		n = 1 << (ret + 2 * (port - 1));
132 		spin_lock(&qpt->lock);
133 		if (qpt->flags & n)
134 			ret = -EINVAL;
135 		else
136 			qpt->flags |= n;
137 		spin_unlock(&qpt->lock);
138 		goto bail;
139 	}
140 
141 	qpn = qpt->last + 2;
142 	if (qpn >= QPN_MAX)
143 		qpn = 2;
144 	if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
145 		qpn = (qpn | qpt->mask) + 2;
146 	offset = qpn & BITS_PER_PAGE_MASK;
147 	map = &qpt->map[qpn / BITS_PER_PAGE];
148 	max_scan = qpt->nmaps - !offset;
149 	for (i = 0;;) {
150 		if (unlikely(!map->page)) {
151 			get_map_page(qpt, map);
152 			if (unlikely(!map->page))
153 				break;
154 		}
155 		do {
156 			if (!test_and_set_bit(offset, map->page)) {
157 				qpt->last = qpn;
158 				ret = qpn;
159 				goto bail;
160 			}
161 			offset = find_next_offset(qpt, map, offset,
162 				dd->n_krcv_queues);
163 			qpn = mk_qpn(qpt, map, offset);
164 			/*
165 			 * This test differs from alloc_pidmap().
166 			 * If find_next_offset() does find a zero
167 			 * bit, we don't need to check for QPN
168 			 * wrapping around past our starting QPN.
169 			 * We just need to be sure we don't loop
170 			 * forever.
171 			 */
172 		} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
173 		/*
174 		 * In order to keep the number of pages allocated to a
175 		 * minimum, we scan the all existing pages before increasing
176 		 * the size of the bitmap table.
177 		 */
178 		if (++i > max_scan) {
179 			if (qpt->nmaps == QPNMAP_ENTRIES)
180 				break;
181 			map = &qpt->map[qpt->nmaps++];
182 			offset = 0;
183 		} else if (map < &qpt->map[qpt->nmaps]) {
184 			++map;
185 			offset = 0;
186 		} else {
187 			map = &qpt->map[0];
188 			offset = 2;
189 		}
190 		qpn = mk_qpn(qpt, map, offset);
191 	}
192 
193 	ret = -ENOMEM;
194 
195 bail:
196 	return ret;
197 }
198 
199 static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
200 {
201 	struct qpn_map *map;
202 
203 	map = qpt->map + qpn / BITS_PER_PAGE;
204 	if (map->page)
205 		clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
206 }
207 
208 static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
209 {
210 	return jhash_1word(qpn, dev->qp_rnd) &
211 		(dev->qp_table_size - 1);
212 }
213 
214 
215 /*
216  * Put the QP into the hash table.
217  * The hash table holds a reference to the QP.
218  */
219 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
220 {
221 	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
222 	unsigned long flags;
223 	unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
224 
225 	spin_lock_irqsave(&dev->qpt_lock, flags);
226 	atomic_inc(&qp->refcount);
227 
228 	if (qp->ibqp.qp_num == 0)
229 		rcu_assign_pointer(ibp->qp0, qp);
230 	else if (qp->ibqp.qp_num == 1)
231 		rcu_assign_pointer(ibp->qp1, qp);
232 	else {
233 		qp->next = dev->qp_table[n];
234 		rcu_assign_pointer(dev->qp_table[n], qp);
235 	}
236 
237 	spin_unlock_irqrestore(&dev->qpt_lock, flags);
238 	synchronize_rcu();
239 }
240 
241 /*
242  * Remove the QP from the table so it can't be found asynchronously by
243  * the receive interrupt routine.
244  */
245 static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
246 {
247 	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
248 	unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
249 	unsigned long flags;
250 
251 	spin_lock_irqsave(&dev->qpt_lock, flags);
252 
253 	if (ibp->qp0 == qp) {
254 		atomic_dec(&qp->refcount);
255 		rcu_assign_pointer(ibp->qp0, NULL);
256 	} else if (ibp->qp1 == qp) {
257 		atomic_dec(&qp->refcount);
258 		rcu_assign_pointer(ibp->qp1, NULL);
259 	} else {
260 		struct qib_qp *q, **qpp;
261 
262 		qpp = &dev->qp_table[n];
263 		for (; (q = *qpp) != NULL; qpp = &q->next)
264 			if (q == qp) {
265 				atomic_dec(&qp->refcount);
266 				rcu_assign_pointer(*qpp, qp->next);
267 				qp->next = NULL;
268 				break;
269 			}
270 	}
271 
272 	spin_unlock_irqrestore(&dev->qpt_lock, flags);
273 	synchronize_rcu();
274 }
275 
276 /**
277  * qib_free_all_qps - check for QPs still in use
278  * @qpt: the QP table to empty
279  *
280  * There should not be any QPs still in use.
281  * Free memory for table.
282  */
283 unsigned qib_free_all_qps(struct qib_devdata *dd)
284 {
285 	struct qib_ibdev *dev = &dd->verbs_dev;
286 	unsigned long flags;
287 	struct qib_qp *qp;
288 	unsigned n, qp_inuse = 0;
289 
290 	for (n = 0; n < dd->num_pports; n++) {
291 		struct qib_ibport *ibp = &dd->pport[n].ibport_data;
292 
293 		if (!qib_mcast_tree_empty(ibp))
294 			qp_inuse++;
295 		rcu_read_lock();
296 		if (rcu_dereference(ibp->qp0))
297 			qp_inuse++;
298 		if (rcu_dereference(ibp->qp1))
299 			qp_inuse++;
300 		rcu_read_unlock();
301 	}
302 
303 	spin_lock_irqsave(&dev->qpt_lock, flags);
304 	for (n = 0; n < dev->qp_table_size; n++) {
305 		qp = dev->qp_table[n];
306 		rcu_assign_pointer(dev->qp_table[n], NULL);
307 
308 		for (; qp; qp = qp->next)
309 			qp_inuse++;
310 	}
311 	spin_unlock_irqrestore(&dev->qpt_lock, flags);
312 	synchronize_rcu();
313 
314 	return qp_inuse;
315 }
316 
317 /**
318  * qib_lookup_qpn - return the QP with the given QPN
319  * @qpt: the QP table
320  * @qpn: the QP number to look up
321  *
322  * The caller is responsible for decrementing the QP reference count
323  * when done.
324  */
325 struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
326 {
327 	struct qib_qp *qp = NULL;
328 
329 	if (unlikely(qpn <= 1)) {
330 		rcu_read_lock();
331 		if (qpn == 0)
332 			qp = rcu_dereference(ibp->qp0);
333 		else
334 			qp = rcu_dereference(ibp->qp1);
335 	} else {
336 		struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
337 		unsigned n = qpn_hash(dev, qpn);
338 
339 		rcu_read_lock();
340 		for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next)
341 			if (qp->ibqp.qp_num == qpn)
342 				break;
343 	}
344 	if (qp)
345 		if (unlikely(!atomic_inc_not_zero(&qp->refcount)))
346 			qp = NULL;
347 
348 	rcu_read_unlock();
349 	return qp;
350 }
351 
352 /**
353  * qib_reset_qp - initialize the QP state to the reset state
354  * @qp: the QP to reset
355  * @type: the QP type
356  */
357 static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
358 {
359 	qp->remote_qpn = 0;
360 	qp->qkey = 0;
361 	qp->qp_access_flags = 0;
362 	atomic_set(&qp->s_dma_busy, 0);
363 	qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
364 	qp->s_hdrwords = 0;
365 	qp->s_wqe = NULL;
366 	qp->s_draining = 0;
367 	qp->s_next_psn = 0;
368 	qp->s_last_psn = 0;
369 	qp->s_sending_psn = 0;
370 	qp->s_sending_hpsn = 0;
371 	qp->s_psn = 0;
372 	qp->r_psn = 0;
373 	qp->r_msn = 0;
374 	if (type == IB_QPT_RC) {
375 		qp->s_state = IB_OPCODE_RC_SEND_LAST;
376 		qp->r_state = IB_OPCODE_RC_SEND_LAST;
377 	} else {
378 		qp->s_state = IB_OPCODE_UC_SEND_LAST;
379 		qp->r_state = IB_OPCODE_UC_SEND_LAST;
380 	}
381 	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
382 	qp->r_nak_state = 0;
383 	qp->r_aflags = 0;
384 	qp->r_flags = 0;
385 	qp->s_head = 0;
386 	qp->s_tail = 0;
387 	qp->s_cur = 0;
388 	qp->s_acked = 0;
389 	qp->s_last = 0;
390 	qp->s_ssn = 1;
391 	qp->s_lsn = 0;
392 	qp->s_mig_state = IB_MIG_MIGRATED;
393 	memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
394 	qp->r_head_ack_queue = 0;
395 	qp->s_tail_ack_queue = 0;
396 	qp->s_num_rd_atomic = 0;
397 	if (qp->r_rq.wq) {
398 		qp->r_rq.wq->head = 0;
399 		qp->r_rq.wq->tail = 0;
400 	}
401 	qp->r_sge.num_sge = 0;
402 }
403 
404 static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
405 {
406 	unsigned n;
407 
408 	if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
409 		while (qp->s_rdma_read_sge.num_sge) {
410 			atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
411 			if (--qp->s_rdma_read_sge.num_sge)
412 				qp->s_rdma_read_sge.sge =
413 					*qp->s_rdma_read_sge.sg_list++;
414 		}
415 
416 	while (qp->r_sge.num_sge) {
417 		atomic_dec(&qp->r_sge.sge.mr->refcount);
418 		if (--qp->r_sge.num_sge)
419 			qp->r_sge.sge = *qp->r_sge.sg_list++;
420 	}
421 
422 	if (clr_sends) {
423 		while (qp->s_last != qp->s_head) {
424 			struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
425 			unsigned i;
426 
427 			for (i = 0; i < wqe->wr.num_sge; i++) {
428 				struct qib_sge *sge = &wqe->sg_list[i];
429 
430 				atomic_dec(&sge->mr->refcount);
431 			}
432 			if (qp->ibqp.qp_type == IB_QPT_UD ||
433 			    qp->ibqp.qp_type == IB_QPT_SMI ||
434 			    qp->ibqp.qp_type == IB_QPT_GSI)
435 				atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
436 			if (++qp->s_last >= qp->s_size)
437 				qp->s_last = 0;
438 		}
439 		if (qp->s_rdma_mr) {
440 			atomic_dec(&qp->s_rdma_mr->refcount);
441 			qp->s_rdma_mr = NULL;
442 		}
443 	}
444 
445 	if (qp->ibqp.qp_type != IB_QPT_RC)
446 		return;
447 
448 	for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
449 		struct qib_ack_entry *e = &qp->s_ack_queue[n];
450 
451 		if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
452 		    e->rdma_sge.mr) {
453 			atomic_dec(&e->rdma_sge.mr->refcount);
454 			e->rdma_sge.mr = NULL;
455 		}
456 	}
457 }
458 
459 /**
460  * qib_error_qp - put a QP into the error state
461  * @qp: the QP to put into the error state
462  * @err: the receive completion error to signal if a RWQE is active
463  *
464  * Flushes both send and receive work queues.
465  * Returns true if last WQE event should be generated.
466  * The QP r_lock and s_lock should be held and interrupts disabled.
467  * If we are already in error state, just return.
468  */
469 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
470 {
471 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
472 	struct ib_wc wc;
473 	int ret = 0;
474 
475 	if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
476 		goto bail;
477 
478 	qp->state = IB_QPS_ERR;
479 
480 	if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
481 		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
482 		del_timer(&qp->s_timer);
483 	}
484 
485 	if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
486 		qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
487 
488 	spin_lock(&dev->pending_lock);
489 	if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
490 		qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
491 		list_del_init(&qp->iowait);
492 	}
493 	spin_unlock(&dev->pending_lock);
494 
495 	if (!(qp->s_flags & QIB_S_BUSY)) {
496 		qp->s_hdrwords = 0;
497 		if (qp->s_rdma_mr) {
498 			atomic_dec(&qp->s_rdma_mr->refcount);
499 			qp->s_rdma_mr = NULL;
500 		}
501 		if (qp->s_tx) {
502 			qib_put_txreq(qp->s_tx);
503 			qp->s_tx = NULL;
504 		}
505 	}
506 
507 	/* Schedule the sending tasklet to drain the send work queue. */
508 	if (qp->s_last != qp->s_head)
509 		qib_schedule_send(qp);
510 
511 	clear_mr_refs(qp, 0);
512 
513 	memset(&wc, 0, sizeof(wc));
514 	wc.qp = &qp->ibqp;
515 	wc.opcode = IB_WC_RECV;
516 
517 	if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
518 		wc.wr_id = qp->r_wr_id;
519 		wc.status = err;
520 		qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
521 	}
522 	wc.status = IB_WC_WR_FLUSH_ERR;
523 
524 	if (qp->r_rq.wq) {
525 		struct qib_rwq *wq;
526 		u32 head;
527 		u32 tail;
528 
529 		spin_lock(&qp->r_rq.lock);
530 
531 		/* sanity check pointers before trusting them */
532 		wq = qp->r_rq.wq;
533 		head = wq->head;
534 		if (head >= qp->r_rq.size)
535 			head = 0;
536 		tail = wq->tail;
537 		if (tail >= qp->r_rq.size)
538 			tail = 0;
539 		while (tail != head) {
540 			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
541 			if (++tail >= qp->r_rq.size)
542 				tail = 0;
543 			qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
544 		}
545 		wq->tail = tail;
546 
547 		spin_unlock(&qp->r_rq.lock);
548 	} else if (qp->ibqp.event_handler)
549 		ret = 1;
550 
551 bail:
552 	return ret;
553 }
554 
555 /**
556  * qib_modify_qp - modify the attributes of a queue pair
557  * @ibqp: the queue pair who's attributes we're modifying
558  * @attr: the new attributes
559  * @attr_mask: the mask of attributes to modify
560  * @udata: user data for libibverbs.so
561  *
562  * Returns 0 on success, otherwise returns an errno.
563  */
564 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
565 		  int attr_mask, struct ib_udata *udata)
566 {
567 	struct qib_ibdev *dev = to_idev(ibqp->device);
568 	struct qib_qp *qp = to_iqp(ibqp);
569 	enum ib_qp_state cur_state, new_state;
570 	struct ib_event ev;
571 	int lastwqe = 0;
572 	int mig = 0;
573 	int ret;
574 	u32 pmtu = 0; /* for gcc warning only */
575 
576 	spin_lock_irq(&qp->r_lock);
577 	spin_lock(&qp->s_lock);
578 
579 	cur_state = attr_mask & IB_QP_CUR_STATE ?
580 		attr->cur_qp_state : qp->state;
581 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
582 
583 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
584 				attr_mask))
585 		goto inval;
586 
587 	if (attr_mask & IB_QP_AV) {
588 		if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
589 			goto inval;
590 		if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
591 			goto inval;
592 	}
593 
594 	if (attr_mask & IB_QP_ALT_PATH) {
595 		if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
596 			goto inval;
597 		if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
598 			goto inval;
599 		if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
600 			goto inval;
601 	}
602 
603 	if (attr_mask & IB_QP_PKEY_INDEX)
604 		if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
605 			goto inval;
606 
607 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
608 		if (attr->min_rnr_timer > 31)
609 			goto inval;
610 
611 	if (attr_mask & IB_QP_PORT)
612 		if (qp->ibqp.qp_type == IB_QPT_SMI ||
613 		    qp->ibqp.qp_type == IB_QPT_GSI ||
614 		    attr->port_num == 0 ||
615 		    attr->port_num > ibqp->device->phys_port_cnt)
616 			goto inval;
617 
618 	if (attr_mask & IB_QP_DEST_QPN)
619 		if (attr->dest_qp_num > QIB_QPN_MASK)
620 			goto inval;
621 
622 	if (attr_mask & IB_QP_RETRY_CNT)
623 		if (attr->retry_cnt > 7)
624 			goto inval;
625 
626 	if (attr_mask & IB_QP_RNR_RETRY)
627 		if (attr->rnr_retry > 7)
628 			goto inval;
629 
630 	/*
631 	 * Don't allow invalid path_mtu values.  OK to set greater
632 	 * than the active mtu (or even the max_cap, if we have tuned
633 	 * that to a small mtu.  We'll set qp->path_mtu
634 	 * to the lesser of requested attribute mtu and active,
635 	 * for packetizing messages.
636 	 * Note that the QP port has to be set in INIT and MTU in RTR.
637 	 */
638 	if (attr_mask & IB_QP_PATH_MTU) {
639 		struct qib_devdata *dd = dd_from_dev(dev);
640 		int mtu, pidx = qp->port_num - 1;
641 
642 		mtu = ib_mtu_enum_to_int(attr->path_mtu);
643 		if (mtu == -1)
644 			goto inval;
645 		if (mtu > dd->pport[pidx].ibmtu) {
646 			switch (dd->pport[pidx].ibmtu) {
647 			case 4096:
648 				pmtu = IB_MTU_4096;
649 				break;
650 			case 2048:
651 				pmtu = IB_MTU_2048;
652 				break;
653 			case 1024:
654 				pmtu = IB_MTU_1024;
655 				break;
656 			case 512:
657 				pmtu = IB_MTU_512;
658 				break;
659 			case 256:
660 				pmtu = IB_MTU_256;
661 				break;
662 			default:
663 				pmtu = IB_MTU_2048;
664 			}
665 		} else
666 			pmtu = attr->path_mtu;
667 	}
668 
669 	if (attr_mask & IB_QP_PATH_MIG_STATE) {
670 		if (attr->path_mig_state == IB_MIG_REARM) {
671 			if (qp->s_mig_state == IB_MIG_ARMED)
672 				goto inval;
673 			if (new_state != IB_QPS_RTS)
674 				goto inval;
675 		} else if (attr->path_mig_state == IB_MIG_MIGRATED) {
676 			if (qp->s_mig_state == IB_MIG_REARM)
677 				goto inval;
678 			if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
679 				goto inval;
680 			if (qp->s_mig_state == IB_MIG_ARMED)
681 				mig = 1;
682 		} else
683 			goto inval;
684 	}
685 
686 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
687 		if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
688 			goto inval;
689 
690 	switch (new_state) {
691 	case IB_QPS_RESET:
692 		if (qp->state != IB_QPS_RESET) {
693 			qp->state = IB_QPS_RESET;
694 			spin_lock(&dev->pending_lock);
695 			if (!list_empty(&qp->iowait))
696 				list_del_init(&qp->iowait);
697 			spin_unlock(&dev->pending_lock);
698 			qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
699 			spin_unlock(&qp->s_lock);
700 			spin_unlock_irq(&qp->r_lock);
701 			/* Stop the sending work queue and retry timer */
702 			cancel_work_sync(&qp->s_work);
703 			del_timer_sync(&qp->s_timer);
704 			wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
705 			if (qp->s_tx) {
706 				qib_put_txreq(qp->s_tx);
707 				qp->s_tx = NULL;
708 			}
709 			remove_qp(dev, qp);
710 			wait_event(qp->wait, !atomic_read(&qp->refcount));
711 			spin_lock_irq(&qp->r_lock);
712 			spin_lock(&qp->s_lock);
713 			clear_mr_refs(qp, 1);
714 			qib_reset_qp(qp, ibqp->qp_type);
715 		}
716 		break;
717 
718 	case IB_QPS_RTR:
719 		/* Allow event to retrigger if QP set to RTR more than once */
720 		qp->r_flags &= ~QIB_R_COMM_EST;
721 		qp->state = new_state;
722 		break;
723 
724 	case IB_QPS_SQD:
725 		qp->s_draining = qp->s_last != qp->s_cur;
726 		qp->state = new_state;
727 		break;
728 
729 	case IB_QPS_SQE:
730 		if (qp->ibqp.qp_type == IB_QPT_RC)
731 			goto inval;
732 		qp->state = new_state;
733 		break;
734 
735 	case IB_QPS_ERR:
736 		lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
737 		break;
738 
739 	default:
740 		qp->state = new_state;
741 		break;
742 	}
743 
744 	if (attr_mask & IB_QP_PKEY_INDEX)
745 		qp->s_pkey_index = attr->pkey_index;
746 
747 	if (attr_mask & IB_QP_PORT)
748 		qp->port_num = attr->port_num;
749 
750 	if (attr_mask & IB_QP_DEST_QPN)
751 		qp->remote_qpn = attr->dest_qp_num;
752 
753 	if (attr_mask & IB_QP_SQ_PSN) {
754 		qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
755 		qp->s_psn = qp->s_next_psn;
756 		qp->s_sending_psn = qp->s_next_psn;
757 		qp->s_last_psn = qp->s_next_psn - 1;
758 		qp->s_sending_hpsn = qp->s_last_psn;
759 	}
760 
761 	if (attr_mask & IB_QP_RQ_PSN)
762 		qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
763 
764 	if (attr_mask & IB_QP_ACCESS_FLAGS)
765 		qp->qp_access_flags = attr->qp_access_flags;
766 
767 	if (attr_mask & IB_QP_AV) {
768 		qp->remote_ah_attr = attr->ah_attr;
769 		qp->s_srate = attr->ah_attr.static_rate;
770 	}
771 
772 	if (attr_mask & IB_QP_ALT_PATH) {
773 		qp->alt_ah_attr = attr->alt_ah_attr;
774 		qp->s_alt_pkey_index = attr->alt_pkey_index;
775 	}
776 
777 	if (attr_mask & IB_QP_PATH_MIG_STATE) {
778 		qp->s_mig_state = attr->path_mig_state;
779 		if (mig) {
780 			qp->remote_ah_attr = qp->alt_ah_attr;
781 			qp->port_num = qp->alt_ah_attr.port_num;
782 			qp->s_pkey_index = qp->s_alt_pkey_index;
783 		}
784 	}
785 
786 	if (attr_mask & IB_QP_PATH_MTU) {
787 		qp->path_mtu = pmtu;
788 		qp->pmtu = ib_mtu_enum_to_int(pmtu);
789 	}
790 
791 	if (attr_mask & IB_QP_RETRY_CNT) {
792 		qp->s_retry_cnt = attr->retry_cnt;
793 		qp->s_retry = attr->retry_cnt;
794 	}
795 
796 	if (attr_mask & IB_QP_RNR_RETRY) {
797 		qp->s_rnr_retry_cnt = attr->rnr_retry;
798 		qp->s_rnr_retry = attr->rnr_retry;
799 	}
800 
801 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
802 		qp->r_min_rnr_timer = attr->min_rnr_timer;
803 
804 	if (attr_mask & IB_QP_TIMEOUT)
805 		qp->timeout = attr->timeout;
806 
807 	if (attr_mask & IB_QP_QKEY)
808 		qp->qkey = attr->qkey;
809 
810 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
811 		qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
812 
813 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
814 		qp->s_max_rd_atomic = attr->max_rd_atomic;
815 
816 	spin_unlock(&qp->s_lock);
817 	spin_unlock_irq(&qp->r_lock);
818 
819 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
820 		insert_qp(dev, qp);
821 
822 	if (lastwqe) {
823 		ev.device = qp->ibqp.device;
824 		ev.element.qp = &qp->ibqp;
825 		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
826 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
827 	}
828 	if (mig) {
829 		ev.device = qp->ibqp.device;
830 		ev.element.qp = &qp->ibqp;
831 		ev.event = IB_EVENT_PATH_MIG;
832 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
833 	}
834 	ret = 0;
835 	goto bail;
836 
837 inval:
838 	spin_unlock(&qp->s_lock);
839 	spin_unlock_irq(&qp->r_lock);
840 	ret = -EINVAL;
841 
842 bail:
843 	return ret;
844 }
845 
846 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
847 		 int attr_mask, struct ib_qp_init_attr *init_attr)
848 {
849 	struct qib_qp *qp = to_iqp(ibqp);
850 
851 	attr->qp_state = qp->state;
852 	attr->cur_qp_state = attr->qp_state;
853 	attr->path_mtu = qp->path_mtu;
854 	attr->path_mig_state = qp->s_mig_state;
855 	attr->qkey = qp->qkey;
856 	attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
857 	attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
858 	attr->dest_qp_num = qp->remote_qpn;
859 	attr->qp_access_flags = qp->qp_access_flags;
860 	attr->cap.max_send_wr = qp->s_size - 1;
861 	attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
862 	attr->cap.max_send_sge = qp->s_max_sge;
863 	attr->cap.max_recv_sge = qp->r_rq.max_sge;
864 	attr->cap.max_inline_data = 0;
865 	attr->ah_attr = qp->remote_ah_attr;
866 	attr->alt_ah_attr = qp->alt_ah_attr;
867 	attr->pkey_index = qp->s_pkey_index;
868 	attr->alt_pkey_index = qp->s_alt_pkey_index;
869 	attr->en_sqd_async_notify = 0;
870 	attr->sq_draining = qp->s_draining;
871 	attr->max_rd_atomic = qp->s_max_rd_atomic;
872 	attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
873 	attr->min_rnr_timer = qp->r_min_rnr_timer;
874 	attr->port_num = qp->port_num;
875 	attr->timeout = qp->timeout;
876 	attr->retry_cnt = qp->s_retry_cnt;
877 	attr->rnr_retry = qp->s_rnr_retry_cnt;
878 	attr->alt_port_num = qp->alt_ah_attr.port_num;
879 	attr->alt_timeout = qp->alt_timeout;
880 
881 	init_attr->event_handler = qp->ibqp.event_handler;
882 	init_attr->qp_context = qp->ibqp.qp_context;
883 	init_attr->send_cq = qp->ibqp.send_cq;
884 	init_attr->recv_cq = qp->ibqp.recv_cq;
885 	init_attr->srq = qp->ibqp.srq;
886 	init_attr->cap = attr->cap;
887 	if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
888 		init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
889 	else
890 		init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
891 	init_attr->qp_type = qp->ibqp.qp_type;
892 	init_attr->port_num = qp->port_num;
893 	return 0;
894 }
895 
896 /**
897  * qib_compute_aeth - compute the AETH (syndrome + MSN)
898  * @qp: the queue pair to compute the AETH for
899  *
900  * Returns the AETH.
901  */
902 __be32 qib_compute_aeth(struct qib_qp *qp)
903 {
904 	u32 aeth = qp->r_msn & QIB_MSN_MASK;
905 
906 	if (qp->ibqp.srq) {
907 		/*
908 		 * Shared receive queues don't generate credits.
909 		 * Set the credit field to the invalid value.
910 		 */
911 		aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
912 	} else {
913 		u32 min, max, x;
914 		u32 credits;
915 		struct qib_rwq *wq = qp->r_rq.wq;
916 		u32 head;
917 		u32 tail;
918 
919 		/* sanity check pointers before trusting them */
920 		head = wq->head;
921 		if (head >= qp->r_rq.size)
922 			head = 0;
923 		tail = wq->tail;
924 		if (tail >= qp->r_rq.size)
925 			tail = 0;
926 		/*
927 		 * Compute the number of credits available (RWQEs).
928 		 * XXX Not holding the r_rq.lock here so there is a small
929 		 * chance that the pair of reads are not atomic.
930 		 */
931 		credits = head - tail;
932 		if ((int)credits < 0)
933 			credits += qp->r_rq.size;
934 		/*
935 		 * Binary search the credit table to find the code to
936 		 * use.
937 		 */
938 		min = 0;
939 		max = 31;
940 		for (;;) {
941 			x = (min + max) / 2;
942 			if (credit_table[x] == credits)
943 				break;
944 			if (credit_table[x] > credits)
945 				max = x;
946 			else if (min == x)
947 				break;
948 			else
949 				min = x;
950 		}
951 		aeth |= x << QIB_AETH_CREDIT_SHIFT;
952 	}
953 	return cpu_to_be32(aeth);
954 }
955 
956 /**
957  * qib_create_qp - create a queue pair for a device
958  * @ibpd: the protection domain who's device we create the queue pair for
959  * @init_attr: the attributes of the queue pair
960  * @udata: user data for libibverbs.so
961  *
962  * Returns the queue pair on success, otherwise returns an errno.
963  *
964  * Called by the ib_create_qp() core verbs function.
965  */
966 struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
967 			    struct ib_qp_init_attr *init_attr,
968 			    struct ib_udata *udata)
969 {
970 	struct qib_qp *qp;
971 	int err;
972 	struct qib_swqe *swq = NULL;
973 	struct qib_ibdev *dev;
974 	struct qib_devdata *dd;
975 	size_t sz;
976 	size_t sg_list_sz;
977 	struct ib_qp *ret;
978 
979 	if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
980 	    init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
981 		ret = ERR_PTR(-EINVAL);
982 		goto bail;
983 	}
984 
985 	/* Check receive queue parameters if no SRQ is specified. */
986 	if (!init_attr->srq) {
987 		if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
988 		    init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
989 			ret = ERR_PTR(-EINVAL);
990 			goto bail;
991 		}
992 		if (init_attr->cap.max_send_sge +
993 		    init_attr->cap.max_send_wr +
994 		    init_attr->cap.max_recv_sge +
995 		    init_attr->cap.max_recv_wr == 0) {
996 			ret = ERR_PTR(-EINVAL);
997 			goto bail;
998 		}
999 	}
1000 
1001 	switch (init_attr->qp_type) {
1002 	case IB_QPT_SMI:
1003 	case IB_QPT_GSI:
1004 		if (init_attr->port_num == 0 ||
1005 		    init_attr->port_num > ibpd->device->phys_port_cnt) {
1006 			ret = ERR_PTR(-EINVAL);
1007 			goto bail;
1008 		}
1009 	case IB_QPT_UC:
1010 	case IB_QPT_RC:
1011 	case IB_QPT_UD:
1012 		sz = sizeof(struct qib_sge) *
1013 			init_attr->cap.max_send_sge +
1014 			sizeof(struct qib_swqe);
1015 		swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
1016 		if (swq == NULL) {
1017 			ret = ERR_PTR(-ENOMEM);
1018 			goto bail;
1019 		}
1020 		sz = sizeof(*qp);
1021 		sg_list_sz = 0;
1022 		if (init_attr->srq) {
1023 			struct qib_srq *srq = to_isrq(init_attr->srq);
1024 
1025 			if (srq->rq.max_sge > 1)
1026 				sg_list_sz = sizeof(*qp->r_sg_list) *
1027 					(srq->rq.max_sge - 1);
1028 		} else if (init_attr->cap.max_recv_sge > 1)
1029 			sg_list_sz = sizeof(*qp->r_sg_list) *
1030 				(init_attr->cap.max_recv_sge - 1);
1031 		qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
1032 		if (!qp) {
1033 			ret = ERR_PTR(-ENOMEM);
1034 			goto bail_swq;
1035 		}
1036 		RCU_INIT_POINTER(qp->next, NULL);
1037 		if (init_attr->srq)
1038 			sz = 0;
1039 		else {
1040 			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1041 			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1042 			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1043 				sizeof(struct qib_rwqe);
1044 			qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
1045 						   qp->r_rq.size * sz);
1046 			if (!qp->r_rq.wq) {
1047 				ret = ERR_PTR(-ENOMEM);
1048 				goto bail_qp;
1049 			}
1050 		}
1051 
1052 		/*
1053 		 * ib_create_qp() will initialize qp->ibqp
1054 		 * except for qp->ibqp.qp_num.
1055 		 */
1056 		spin_lock_init(&qp->r_lock);
1057 		spin_lock_init(&qp->s_lock);
1058 		spin_lock_init(&qp->r_rq.lock);
1059 		atomic_set(&qp->refcount, 0);
1060 		init_waitqueue_head(&qp->wait);
1061 		init_waitqueue_head(&qp->wait_dma);
1062 		init_timer(&qp->s_timer);
1063 		qp->s_timer.data = (unsigned long)qp;
1064 		INIT_WORK(&qp->s_work, qib_do_send);
1065 		INIT_LIST_HEAD(&qp->iowait);
1066 		INIT_LIST_HEAD(&qp->rspwait);
1067 		qp->state = IB_QPS_RESET;
1068 		qp->s_wq = swq;
1069 		qp->s_size = init_attr->cap.max_send_wr + 1;
1070 		qp->s_max_sge = init_attr->cap.max_send_sge;
1071 		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1072 			qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1073 		dev = to_idev(ibpd->device);
1074 		dd = dd_from_dev(dev);
1075 		err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
1076 				init_attr->port_num);
1077 		if (err < 0) {
1078 			ret = ERR_PTR(err);
1079 			vfree(qp->r_rq.wq);
1080 			goto bail_qp;
1081 		}
1082 		qp->ibqp.qp_num = err;
1083 		qp->port_num = init_attr->port_num;
1084 		qib_reset_qp(qp, init_attr->qp_type);
1085 		break;
1086 
1087 	default:
1088 		/* Don't support raw QPs */
1089 		ret = ERR_PTR(-ENOSYS);
1090 		goto bail;
1091 	}
1092 
1093 	init_attr->cap.max_inline_data = 0;
1094 
1095 	/*
1096 	 * Return the address of the RWQ as the offset to mmap.
1097 	 * See qib_mmap() for details.
1098 	 */
1099 	if (udata && udata->outlen >= sizeof(__u64)) {
1100 		if (!qp->r_rq.wq) {
1101 			__u64 offset = 0;
1102 
1103 			err = ib_copy_to_udata(udata, &offset,
1104 					       sizeof(offset));
1105 			if (err) {
1106 				ret = ERR_PTR(err);
1107 				goto bail_ip;
1108 			}
1109 		} else {
1110 			u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
1111 
1112 			qp->ip = qib_create_mmap_info(dev, s,
1113 						      ibpd->uobject->context,
1114 						      qp->r_rq.wq);
1115 			if (!qp->ip) {
1116 				ret = ERR_PTR(-ENOMEM);
1117 				goto bail_ip;
1118 			}
1119 
1120 			err = ib_copy_to_udata(udata, &(qp->ip->offset),
1121 					       sizeof(qp->ip->offset));
1122 			if (err) {
1123 				ret = ERR_PTR(err);
1124 				goto bail_ip;
1125 			}
1126 		}
1127 	}
1128 
1129 	spin_lock(&dev->n_qps_lock);
1130 	if (dev->n_qps_allocated == ib_qib_max_qps) {
1131 		spin_unlock(&dev->n_qps_lock);
1132 		ret = ERR_PTR(-ENOMEM);
1133 		goto bail_ip;
1134 	}
1135 
1136 	dev->n_qps_allocated++;
1137 	spin_unlock(&dev->n_qps_lock);
1138 
1139 	if (qp->ip) {
1140 		spin_lock_irq(&dev->pending_lock);
1141 		list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1142 		spin_unlock_irq(&dev->pending_lock);
1143 	}
1144 
1145 	ret = &qp->ibqp;
1146 	goto bail;
1147 
1148 bail_ip:
1149 	if (qp->ip)
1150 		kref_put(&qp->ip->ref, qib_release_mmap_info);
1151 	else
1152 		vfree(qp->r_rq.wq);
1153 	free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1154 bail_qp:
1155 	kfree(qp);
1156 bail_swq:
1157 	vfree(swq);
1158 bail:
1159 	return ret;
1160 }
1161 
1162 /**
1163  * qib_destroy_qp - destroy a queue pair
1164  * @ibqp: the queue pair to destroy
1165  *
1166  * Returns 0 on success.
1167  *
1168  * Note that this can be called while the QP is actively sending or
1169  * receiving!
1170  */
1171 int qib_destroy_qp(struct ib_qp *ibqp)
1172 {
1173 	struct qib_qp *qp = to_iqp(ibqp);
1174 	struct qib_ibdev *dev = to_idev(ibqp->device);
1175 
1176 	/* Make sure HW and driver activity is stopped. */
1177 	spin_lock_irq(&qp->s_lock);
1178 	if (qp->state != IB_QPS_RESET) {
1179 		qp->state = IB_QPS_RESET;
1180 		spin_lock(&dev->pending_lock);
1181 		if (!list_empty(&qp->iowait))
1182 			list_del_init(&qp->iowait);
1183 		spin_unlock(&dev->pending_lock);
1184 		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1185 		spin_unlock_irq(&qp->s_lock);
1186 		cancel_work_sync(&qp->s_work);
1187 		del_timer_sync(&qp->s_timer);
1188 		wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
1189 		if (qp->s_tx) {
1190 			qib_put_txreq(qp->s_tx);
1191 			qp->s_tx = NULL;
1192 		}
1193 		remove_qp(dev, qp);
1194 		wait_event(qp->wait, !atomic_read(&qp->refcount));
1195 		clear_mr_refs(qp, 1);
1196 	} else
1197 		spin_unlock_irq(&qp->s_lock);
1198 
1199 	/* all user's cleaned up, mark it available */
1200 	free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1201 	spin_lock(&dev->n_qps_lock);
1202 	dev->n_qps_allocated--;
1203 	spin_unlock(&dev->n_qps_lock);
1204 
1205 	if (qp->ip)
1206 		kref_put(&qp->ip->ref, qib_release_mmap_info);
1207 	else
1208 		vfree(qp->r_rq.wq);
1209 	vfree(qp->s_wq);
1210 	kfree(qp);
1211 	return 0;
1212 }
1213 
1214 /**
1215  * qib_init_qpn_table - initialize the QP number table for a device
1216  * @qpt: the QPN table
1217  */
1218 void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1219 {
1220 	spin_lock_init(&qpt->lock);
1221 	qpt->last = 1;          /* start with QPN 2 */
1222 	qpt->nmaps = 1;
1223 	qpt->mask = dd->qpn_mask;
1224 }
1225 
1226 /**
1227  * qib_free_qpn_table - free the QP number table for a device
1228  * @qpt: the QPN table
1229  */
1230 void qib_free_qpn_table(struct qib_qpn_table *qpt)
1231 {
1232 	int i;
1233 
1234 	for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1235 		if (qpt->map[i].page)
1236 			free_page((unsigned long) qpt->map[i].page);
1237 }
1238 
1239 /**
1240  * qib_get_credit - flush the send work queue of a QP
1241  * @qp: the qp who's send work queue to flush
1242  * @aeth: the Acknowledge Extended Transport Header
1243  *
1244  * The QP s_lock should be held.
1245  */
1246 void qib_get_credit(struct qib_qp *qp, u32 aeth)
1247 {
1248 	u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1249 
1250 	/*
1251 	 * If the credit is invalid, we can send
1252 	 * as many packets as we like.  Otherwise, we have to
1253 	 * honor the credit field.
1254 	 */
1255 	if (credit == QIB_AETH_CREDIT_INVAL) {
1256 		if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1257 			qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1258 			if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1259 				qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1260 				qib_schedule_send(qp);
1261 			}
1262 		}
1263 	} else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1264 		/* Compute new LSN (i.e., MSN + credit) */
1265 		credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1266 		if (qib_cmp24(credit, qp->s_lsn) > 0) {
1267 			qp->s_lsn = credit;
1268 			if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1269 				qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1270 				qib_schedule_send(qp);
1271 			}
1272 		}
1273 	}
1274 }
1275