xref: /openbmc/linux/drivers/infiniband/hw/qib/qib_qp.c (revision 8e4c0666)
1 /*
2  * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <rdma/rdma_vt.h>
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/seq_file.h>
40 #endif
41 
42 #include "qib.h"
43 
44 /*
45  * mask field which was present in now deleted qib_qpn_table
46  * is not present in rvt_qpn_table. Defining the same field
47  * as qpt_mask here instead of adding the mask field to
48  * rvt_qpn_table.
49  */
50 u16 qpt_mask;
51 
52 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
53 			      struct rvt_qpn_map *map, unsigned off)
54 {
55 	return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
56 }
57 
58 static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
59 					struct rvt_qpn_map *map, unsigned off,
60 					unsigned n)
61 {
62 	if (qpt_mask) {
63 		off++;
64 		if (((off & qpt_mask) >> 1) >= n)
65 			off = (off | qpt_mask) + 2;
66 	} else {
67 		off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
68 	}
69 	return off;
70 }
71 
72 /*
73  * Convert the AETH credit code into the number of credits.
74  */
75 static u32 credit_table[31] = {
76 	0,                      /* 0 */
77 	1,                      /* 1 */
78 	2,                      /* 2 */
79 	3,                      /* 3 */
80 	4,                      /* 4 */
81 	6,                      /* 5 */
82 	8,                      /* 6 */
83 	12,                     /* 7 */
84 	16,                     /* 8 */
85 	24,                     /* 9 */
86 	32,                     /* A */
87 	48,                     /* B */
88 	64,                     /* C */
89 	96,                     /* D */
90 	128,                    /* E */
91 	192,                    /* F */
92 	256,                    /* 10 */
93 	384,                    /* 11 */
94 	512,                    /* 12 */
95 	768,                    /* 13 */
96 	1024,                   /* 14 */
97 	1536,                   /* 15 */
98 	2048,                   /* 16 */
99 	3072,                   /* 17 */
100 	4096,                   /* 18 */
101 	6144,                   /* 19 */
102 	8192,                   /* 1A */
103 	12288,                  /* 1B */
104 	16384,                  /* 1C */
105 	24576,                  /* 1D */
106 	32768                   /* 1E */
107 };
108 
109 static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
110 			 gfp_t gfp)
111 {
112 	unsigned long page = get_zeroed_page(gfp);
113 
114 	/*
115 	 * Free the page if someone raced with us installing it.
116 	 */
117 
118 	spin_lock(&qpt->lock);
119 	if (map->page)
120 		free_page(page);
121 	else
122 		map->page = (void *)page;
123 	spin_unlock(&qpt->lock);
124 }
125 
126 /*
127  * Allocate the next available QPN or
128  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
129  */
130 int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
131 	      enum ib_qp_type type, u8 port, gfp_t gfp)
132 {
133 	u32 i, offset, max_scan, qpn;
134 	struct rvt_qpn_map *map;
135 	u32 ret;
136 	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
137 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
138 					      verbs_dev);
139 
140 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
141 		unsigned n;
142 
143 		ret = type == IB_QPT_GSI;
144 		n = 1 << (ret + 2 * (port - 1));
145 		spin_lock(&qpt->lock);
146 		if (qpt->flags & n)
147 			ret = -EINVAL;
148 		else
149 			qpt->flags |= n;
150 		spin_unlock(&qpt->lock);
151 		goto bail;
152 	}
153 
154 	qpn = qpt->last + 2;
155 	if (qpn >= RVT_QPN_MAX)
156 		qpn = 2;
157 	if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
158 		qpn = (qpn | qpt_mask) + 2;
159 	offset = qpn & RVT_BITS_PER_PAGE_MASK;
160 	map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
161 	max_scan = qpt->nmaps - !offset;
162 	for (i = 0;;) {
163 		if (unlikely(!map->page)) {
164 			get_map_page(qpt, map, gfp);
165 			if (unlikely(!map->page))
166 				break;
167 		}
168 		do {
169 			if (!test_and_set_bit(offset, map->page)) {
170 				qpt->last = qpn;
171 				ret = qpn;
172 				goto bail;
173 			}
174 			offset = find_next_offset(qpt, map, offset,
175 				dd->n_krcv_queues);
176 			qpn = mk_qpn(qpt, map, offset);
177 			/*
178 			 * This test differs from alloc_pidmap().
179 			 * If find_next_offset() does find a zero
180 			 * bit, we don't need to check for QPN
181 			 * wrapping around past our starting QPN.
182 			 * We just need to be sure we don't loop
183 			 * forever.
184 			 */
185 		} while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
186 		/*
187 		 * In order to keep the number of pages allocated to a
188 		 * minimum, we scan the all existing pages before increasing
189 		 * the size of the bitmap table.
190 		 */
191 		if (++i > max_scan) {
192 			if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
193 				break;
194 			map = &qpt->map[qpt->nmaps++];
195 			offset = 0;
196 		} else if (map < &qpt->map[qpt->nmaps]) {
197 			++map;
198 			offset = 0;
199 		} else {
200 			map = &qpt->map[0];
201 			offset = 2;
202 		}
203 		qpn = mk_qpn(qpt, map, offset);
204 	}
205 
206 	ret = -ENOMEM;
207 
208 bail:
209 	return ret;
210 }
211 
212 /**
213  * qib_free_all_qps - check for QPs still in use
214  */
215 unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
216 {
217 	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
218 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
219 					      verbs_dev);
220 	unsigned n, qp_inuse = 0;
221 
222 	for (n = 0; n < dd->num_pports; n++) {
223 		struct qib_ibport *ibp = &dd->pport[n].ibport_data;
224 
225 		rcu_read_lock();
226 		if (rcu_dereference(ibp->rvp.qp[0]))
227 			qp_inuse++;
228 		if (rcu_dereference(ibp->rvp.qp[1]))
229 			qp_inuse++;
230 		rcu_read_unlock();
231 	}
232 	return qp_inuse;
233 }
234 
235 void notify_qp_reset(struct rvt_qp *qp)
236 {
237 	struct qib_qp_priv *priv = qp->priv;
238 
239 	atomic_set(&priv->s_dma_busy, 0);
240 }
241 
242 void notify_error_qp(struct rvt_qp *qp)
243 {
244 	struct qib_qp_priv *priv = qp->priv;
245 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
246 
247 	spin_lock(&dev->rdi.pending_lock);
248 	if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
249 		qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
250 		list_del_init(&priv->iowait);
251 	}
252 	spin_unlock(&dev->rdi.pending_lock);
253 
254 	if (!(qp->s_flags & RVT_S_BUSY)) {
255 		qp->s_hdrwords = 0;
256 		if (qp->s_rdma_mr) {
257 			rvt_put_mr(qp->s_rdma_mr);
258 			qp->s_rdma_mr = NULL;
259 		}
260 		if (priv->s_tx) {
261 			qib_put_txreq(priv->s_tx);
262 			priv->s_tx = NULL;
263 		}
264 	}
265 }
266 
267 static int mtu_to_enum(u32 mtu)
268 {
269 	int enum_mtu;
270 
271 	switch (mtu) {
272 	case 4096:
273 		enum_mtu = IB_MTU_4096;
274 		break;
275 	case 2048:
276 		enum_mtu = IB_MTU_2048;
277 		break;
278 	case 1024:
279 		enum_mtu = IB_MTU_1024;
280 		break;
281 	case 512:
282 		enum_mtu = IB_MTU_512;
283 		break;
284 	case 256:
285 		enum_mtu = IB_MTU_256;
286 		break;
287 	default:
288 		enum_mtu = IB_MTU_2048;
289 	}
290 	return enum_mtu;
291 }
292 
293 int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
294 		       struct ib_qp_attr *attr)
295 {
296 	int mtu, pmtu, pidx = qp->port_num - 1;
297 	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
298 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
299 					      verbs_dev);
300 	mtu = ib_mtu_enum_to_int(attr->path_mtu);
301 	if (mtu == -1)
302 		return -EINVAL;
303 
304 	if (mtu > dd->pport[pidx].ibmtu)
305 		pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
306 	else
307 		pmtu = attr->path_mtu;
308 	return pmtu;
309 }
310 
311 int mtu_to_path_mtu(u32 mtu)
312 {
313 	return mtu_to_enum(mtu);
314 }
315 
316 u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
317 {
318 	return ib_mtu_enum_to_int(pmtu);
319 }
320 
321 /**
322  * qib_compute_aeth - compute the AETH (syndrome + MSN)
323  * @qp: the queue pair to compute the AETH for
324  *
325  * Returns the AETH.
326  */
327 __be32 qib_compute_aeth(struct rvt_qp *qp)
328 {
329 	u32 aeth = qp->r_msn & QIB_MSN_MASK;
330 
331 	if (qp->ibqp.srq) {
332 		/*
333 		 * Shared receive queues don't generate credits.
334 		 * Set the credit field to the invalid value.
335 		 */
336 		aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
337 	} else {
338 		u32 min, max, x;
339 		u32 credits;
340 		struct rvt_rwq *wq = qp->r_rq.wq;
341 		u32 head;
342 		u32 tail;
343 
344 		/* sanity check pointers before trusting them */
345 		head = wq->head;
346 		if (head >= qp->r_rq.size)
347 			head = 0;
348 		tail = wq->tail;
349 		if (tail >= qp->r_rq.size)
350 			tail = 0;
351 		/*
352 		 * Compute the number of credits available (RWQEs).
353 		 * XXX Not holding the r_rq.lock here so there is a small
354 		 * chance that the pair of reads are not atomic.
355 		 */
356 		credits = head - tail;
357 		if ((int)credits < 0)
358 			credits += qp->r_rq.size;
359 		/*
360 		 * Binary search the credit table to find the code to
361 		 * use.
362 		 */
363 		min = 0;
364 		max = 31;
365 		for (;;) {
366 			x = (min + max) / 2;
367 			if (credit_table[x] == credits)
368 				break;
369 			if (credit_table[x] > credits)
370 				max = x;
371 			else if (min == x)
372 				break;
373 			else
374 				min = x;
375 		}
376 		aeth |= x << QIB_AETH_CREDIT_SHIFT;
377 	}
378 	return cpu_to_be32(aeth);
379 }
380 
381 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
382 {
383 	struct qib_qp_priv *priv;
384 
385 	priv = kzalloc(sizeof(*priv), gfp);
386 	if (!priv)
387 		return ERR_PTR(-ENOMEM);
388 	priv->owner = qp;
389 
390 	priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
391 	if (!priv->s_hdr) {
392 		kfree(priv);
393 		return ERR_PTR(-ENOMEM);
394 	}
395 	init_waitqueue_head(&priv->wait_dma);
396 	INIT_WORK(&priv->s_work, _qib_do_send);
397 	INIT_LIST_HEAD(&priv->iowait);
398 
399 	return priv;
400 }
401 
402 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
403 {
404 	struct qib_qp_priv *priv = qp->priv;
405 
406 	kfree(priv->s_hdr);
407 	kfree(priv);
408 }
409 
410 void stop_send_queue(struct rvt_qp *qp)
411 {
412 	struct qib_qp_priv *priv = qp->priv;
413 
414 	cancel_work_sync(&priv->s_work);
415 }
416 
417 void quiesce_qp(struct rvt_qp *qp)
418 {
419 	struct qib_qp_priv *priv = qp->priv;
420 
421 	wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
422 	if (priv->s_tx) {
423 		qib_put_txreq(priv->s_tx);
424 		priv->s_tx = NULL;
425 	}
426 }
427 
428 void flush_qp_waiters(struct rvt_qp *qp)
429 {
430 	struct qib_qp_priv *priv = qp->priv;
431 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
432 
433 	spin_lock(&dev->rdi.pending_lock);
434 	if (!list_empty(&priv->iowait))
435 		list_del_init(&priv->iowait);
436 	spin_unlock(&dev->rdi.pending_lock);
437 }
438 
439 /**
440  * qib_get_credit - flush the send work queue of a QP
441  * @qp: the qp who's send work queue to flush
442  * @aeth: the Acknowledge Extended Transport Header
443  *
444  * The QP s_lock should be held.
445  */
446 void qib_get_credit(struct rvt_qp *qp, u32 aeth)
447 {
448 	u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
449 
450 	/*
451 	 * If the credit is invalid, we can send
452 	 * as many packets as we like.  Otherwise, we have to
453 	 * honor the credit field.
454 	 */
455 	if (credit == QIB_AETH_CREDIT_INVAL) {
456 		if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
457 			qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
458 			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
459 				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
460 				qib_schedule_send(qp);
461 			}
462 		}
463 	} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
464 		/* Compute new LSN (i.e., MSN + credit) */
465 		credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
466 		if (qib_cmp24(credit, qp->s_lsn) > 0) {
467 			qp->s_lsn = credit;
468 			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
469 				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
470 				qib_schedule_send(qp);
471 			}
472 		}
473 	}
474 }
475 
476 #ifdef CONFIG_DEBUG_FS
477 
478 struct qib_qp_iter {
479 	struct qib_ibdev *dev;
480 	struct rvt_qp *qp;
481 	int n;
482 };
483 
484 struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
485 {
486 	struct qib_qp_iter *iter;
487 
488 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
489 	if (!iter)
490 		return NULL;
491 
492 	iter->dev = dev;
493 	if (qib_qp_iter_next(iter)) {
494 		kfree(iter);
495 		return NULL;
496 	}
497 
498 	return iter;
499 }
500 
501 int qib_qp_iter_next(struct qib_qp_iter *iter)
502 {
503 	struct qib_ibdev *dev = iter->dev;
504 	int n = iter->n;
505 	int ret = 1;
506 	struct rvt_qp *pqp = iter->qp;
507 	struct rvt_qp *qp;
508 
509 	for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
510 		if (pqp)
511 			qp = rcu_dereference(pqp->next);
512 		else
513 			qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
514 		pqp = qp;
515 		if (qp) {
516 			iter->qp = qp;
517 			iter->n = n;
518 			return 0;
519 		}
520 	}
521 	return ret;
522 }
523 
524 static const char * const qp_type_str[] = {
525 	"SMI", "GSI", "RC", "UC", "UD",
526 };
527 
528 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
529 {
530 	struct rvt_swqe *wqe;
531 	struct rvt_qp *qp = iter->qp;
532 	struct qib_qp_priv *priv = qp->priv;
533 
534 	wqe = rvt_get_swqe_ptr(qp, qp->s_last);
535 	seq_printf(s,
536 		   "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
537 		   iter->n,
538 		   qp->ibqp.qp_num,
539 		   qp_type_str[qp->ibqp.qp_type],
540 		   qp->state,
541 		   wqe->wr.opcode,
542 		   qp->s_hdrwords,
543 		   qp->s_flags,
544 		   atomic_read(&priv->s_dma_busy),
545 		   !list_empty(&priv->iowait),
546 		   qp->timeout,
547 		   wqe->ssn,
548 		   qp->s_lsn,
549 		   qp->s_last_psn,
550 		   qp->s_psn, qp->s_next_psn,
551 		   qp->s_sending_psn, qp->s_sending_hpsn,
552 		   qp->s_last, qp->s_acked, qp->s_cur,
553 		   qp->s_tail, qp->s_head, qp->s_size,
554 		   qp->remote_qpn,
555 		   qp->remote_ah_attr.dlid);
556 }
557 
558 #endif
559